1837f08fdSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2837f08fdSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3837f08fdSAnirudh Venkataramanan 4837f08fdSAnirudh Venkataramanan /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5837f08fdSAnirudh Venkataramanan 6837f08fdSAnirudh Venkataramanan #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7837f08fdSAnirudh Venkataramanan 834a2a3b8SJeff Kirsher #include <generated/utsrelease.h> 9837f08fdSAnirudh Venkataramanan #include "ice.h" 10eff380aaSAnirudh Venkataramanan #include "ice_base.h" 1145d3d428SAnirudh Venkataramanan #include "ice_lib.h" 121b8f15b6SMichal Swiatkowski #include "ice_fltr.h" 1337b6f646SAnirudh Venkataramanan #include "ice_dcb_lib.h" 14b94b013eSDave Ertman #include "ice_dcb_nl.h" 151adf7eadSJacob Keller #include "ice_devlink.h" 163089cf6dSJesse Brandeburg /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 173089cf6dSJesse Brandeburg * ice tracepoint functions. This must be done exactly once across the 183089cf6dSJesse Brandeburg * ice driver. 193089cf6dSJesse Brandeburg */ 203089cf6dSJesse Brandeburg #define CREATE_TRACE_POINTS 213089cf6dSJesse Brandeburg #include "ice_trace.h" 22b3be918dSGrzegorz Nitka #include "ice_eswitch.h" 230d08a441SKiran Patil #include "ice_tc_lib.h" 24837f08fdSAnirudh Venkataramanan 25837f08fdSAnirudh Venkataramanan #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 26837f08fdSAnirudh Venkataramanan static const char ice_driver_string[] = DRV_SUMMARY; 27837f08fdSAnirudh Venkataramanan static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 28837f08fdSAnirudh Venkataramanan 29462acf6aSTony Nguyen /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 30462acf6aSTony Nguyen #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 31462acf6aSTony Nguyen #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 32462acf6aSTony Nguyen 33837f08fdSAnirudh Venkataramanan MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 34837f08fdSAnirudh Venkataramanan MODULE_DESCRIPTION(DRV_SUMMARY); 3598674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2"); 36462acf6aSTony Nguyen MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 37837f08fdSAnirudh Venkataramanan 38837f08fdSAnirudh Venkataramanan static int debug = -1; 39837f08fdSAnirudh Venkataramanan module_param(debug, int, 0644); 407ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG 417ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 427ec59eeaSAnirudh Venkataramanan #else 437ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 447ec59eeaSAnirudh Venkataramanan #endif /* !CONFIG_DYNAMIC_DEBUG */ 45837f08fdSAnirudh Venkataramanan 46d25a0fc4SDave Ertman static DEFINE_IDA(ice_aux_ida); 4722bf877eSMaciej Fijalkowski DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 4822bf877eSMaciej Fijalkowski EXPORT_SYMBOL(ice_xdp_locking_key); 49d25a0fc4SDave Ertman 50940b61afSAnirudh Venkataramanan static struct workqueue_struct *ice_wq; 51462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops; 52cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops; 53940b61afSAnirudh Venkataramanan 54462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 5528c2a645SAnirudh Venkataramanan 560f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf); 573a858ba3SAnirudh Venkataramanan 58fbc7b27aSKiran Patil static int ice_rebuild_channels(struct ice_pf *pf); 59fbc7b27aSKiran Patil static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 60fbc7b27aSKiran Patil 61195bb48fSMichal Swiatkowski static int 62195bb48fSMichal Swiatkowski ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 63195bb48fSMichal Swiatkowski void *cb_priv, enum tc_setup_type type, void *type_data, 64195bb48fSMichal Swiatkowski void *data, 65195bb48fSMichal Swiatkowski void (*cleanup)(struct flow_block_cb *block_cb)); 66195bb48fSMichal Swiatkowski 67df006dd4SDave Ertman bool netif_is_ice(struct net_device *dev) 68df006dd4SDave Ertman { 69df006dd4SDave Ertman return dev && (dev->netdev_ops == &ice_netdev_ops); 70df006dd4SDave Ertman } 71df006dd4SDave Ertman 723a858ba3SAnirudh Venkataramanan /** 73b3969fd7SSudheer Mogilappagari * ice_get_tx_pending - returns number of Tx descriptors not processed 74b3969fd7SSudheer Mogilappagari * @ring: the ring of descriptors 75b3969fd7SSudheer Mogilappagari */ 76e72bba21SMaciej Fijalkowski static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 77b3969fd7SSudheer Mogilappagari { 78c1ddf1f5SBrett Creeley u16 head, tail; 79b3969fd7SSudheer Mogilappagari 80b3969fd7SSudheer Mogilappagari head = ring->next_to_clean; 81c1ddf1f5SBrett Creeley tail = ring->next_to_use; 82b3969fd7SSudheer Mogilappagari 83b3969fd7SSudheer Mogilappagari if (head != tail) 84b3969fd7SSudheer Mogilappagari return (head < tail) ? 85b3969fd7SSudheer Mogilappagari tail - head : (tail + ring->count - head); 86b3969fd7SSudheer Mogilappagari return 0; 87b3969fd7SSudheer Mogilappagari } 88b3969fd7SSudheer Mogilappagari 89b3969fd7SSudheer Mogilappagari /** 90b3969fd7SSudheer Mogilappagari * ice_check_for_hang_subtask - check for and recover hung queues 91b3969fd7SSudheer Mogilappagari * @pf: pointer to PF struct 92b3969fd7SSudheer Mogilappagari */ 93b3969fd7SSudheer Mogilappagari static void ice_check_for_hang_subtask(struct ice_pf *pf) 94b3969fd7SSudheer Mogilappagari { 95b3969fd7SSudheer Mogilappagari struct ice_vsi *vsi = NULL; 96e89e899fSBrett Creeley struct ice_hw *hw; 97b3969fd7SSudheer Mogilappagari unsigned int i; 98b3969fd7SSudheer Mogilappagari int packets; 99e89e899fSBrett Creeley u32 v; 100b3969fd7SSudheer Mogilappagari 101b3969fd7SSudheer Mogilappagari ice_for_each_vsi(pf, v) 102b3969fd7SSudheer Mogilappagari if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 103b3969fd7SSudheer Mogilappagari vsi = pf->vsi[v]; 104b3969fd7SSudheer Mogilappagari break; 105b3969fd7SSudheer Mogilappagari } 106b3969fd7SSudheer Mogilappagari 107e97fb1aeSAnirudh Venkataramanan if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 108b3969fd7SSudheer Mogilappagari return; 109b3969fd7SSudheer Mogilappagari 110b3969fd7SSudheer Mogilappagari if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 111b3969fd7SSudheer Mogilappagari return; 112b3969fd7SSudheer Mogilappagari 113e89e899fSBrett Creeley hw = &vsi->back->hw; 114e89e899fSBrett Creeley 1152faf63b6SMaciej Fijalkowski ice_for_each_txq(vsi, i) { 116e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 117b3969fd7SSudheer Mogilappagari 118fbc7b27aSKiran Patil if (!tx_ring) 119fbc7b27aSKiran Patil continue; 120fbc7b27aSKiran Patil if (ice_ring_ch_enabled(tx_ring)) 121fbc7b27aSKiran Patil continue; 122fbc7b27aSKiran Patil 123fbc7b27aSKiran Patil if (tx_ring->desc) { 124b3969fd7SSudheer Mogilappagari /* If packet counter has not changed the queue is 125b3969fd7SSudheer Mogilappagari * likely stalled, so force an interrupt for this 126b3969fd7SSudheer Mogilappagari * queue. 127b3969fd7SSudheer Mogilappagari * 128b3969fd7SSudheer Mogilappagari * prev_pkt would be negative if there was no 129b3969fd7SSudheer Mogilappagari * pending work. 130b3969fd7SSudheer Mogilappagari */ 131b3969fd7SSudheer Mogilappagari packets = tx_ring->stats.pkts & INT_MAX; 132b3969fd7SSudheer Mogilappagari if (tx_ring->tx_stats.prev_pkt == packets) { 133b3969fd7SSudheer Mogilappagari /* Trigger sw interrupt to revive the queue */ 134e89e899fSBrett Creeley ice_trigger_sw_intr(hw, tx_ring->q_vector); 135b3969fd7SSudheer Mogilappagari continue; 136b3969fd7SSudheer Mogilappagari } 137b3969fd7SSudheer Mogilappagari 138b3969fd7SSudheer Mogilappagari /* Memory barrier between read of packet count and call 139b3969fd7SSudheer Mogilappagari * to ice_get_tx_pending() 140b3969fd7SSudheer Mogilappagari */ 141b3969fd7SSudheer Mogilappagari smp_rmb(); 142b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = 143b3969fd7SSudheer Mogilappagari ice_get_tx_pending(tx_ring) ? packets : -1; 144b3969fd7SSudheer Mogilappagari } 145b3969fd7SSudheer Mogilappagari } 146b3969fd7SSudheer Mogilappagari } 147b3969fd7SSudheer Mogilappagari 148b3969fd7SSudheer Mogilappagari /** 149561f4379STony Nguyen * ice_init_mac_fltr - Set initial MAC filters 150561f4379STony Nguyen * @pf: board private structure 151561f4379STony Nguyen * 1522f2da36eSAnirudh Venkataramanan * Set initial set of MAC filters for PF VSI; configure filters for permanent 153561f4379STony Nguyen * address and broadcast address. If an error is encountered, netdevice will be 154561f4379STony Nguyen * unregistered. 155561f4379STony Nguyen */ 156561f4379STony Nguyen static int ice_init_mac_fltr(struct ice_pf *pf) 157561f4379STony Nguyen { 158561f4379STony Nguyen struct ice_vsi *vsi; 1591b8f15b6SMichal Swiatkowski u8 *perm_addr; 160561f4379STony Nguyen 161208ff751SAnirudh Venkataramanan vsi = ice_get_main_vsi(pf); 162561f4379STony Nguyen if (!vsi) 163561f4379STony Nguyen return -EINVAL; 164561f4379STony Nguyen 1651b8f15b6SMichal Swiatkowski perm_addr = vsi->port_info->mac.perm_addr; 166c1484691STony Nguyen return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 167561f4379STony Nguyen } 168561f4379STony Nguyen 169561f4379STony Nguyen /** 170f9867df6SAnirudh Venkataramanan * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 171e94d4478SAnirudh Venkataramanan * @netdev: the net device on which the sync is happening 172f9867df6SAnirudh Venkataramanan * @addr: MAC address to sync 173e94d4478SAnirudh Venkataramanan * 174e94d4478SAnirudh Venkataramanan * This is a callback function which is called by the in kernel device sync 175e94d4478SAnirudh Venkataramanan * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 176e94d4478SAnirudh Venkataramanan * populates the tmp_sync_list, which is later used by ice_add_mac to add the 177f9867df6SAnirudh Venkataramanan * MAC filters from the hardware. 178e94d4478SAnirudh Venkataramanan */ 179e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 180e94d4478SAnirudh Venkataramanan { 181e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 182e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 183e94d4478SAnirudh Venkataramanan 1841b8f15b6SMichal Swiatkowski if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 1851b8f15b6SMichal Swiatkowski ICE_FWD_TO_VSI)) 186e94d4478SAnirudh Venkataramanan return -EINVAL; 187e94d4478SAnirudh Venkataramanan 188e94d4478SAnirudh Venkataramanan return 0; 189e94d4478SAnirudh Venkataramanan } 190e94d4478SAnirudh Venkataramanan 191e94d4478SAnirudh Venkataramanan /** 192f9867df6SAnirudh Venkataramanan * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 193e94d4478SAnirudh Venkataramanan * @netdev: the net device on which the unsync is happening 194f9867df6SAnirudh Venkataramanan * @addr: MAC address to unsync 195e94d4478SAnirudh Venkataramanan * 196e94d4478SAnirudh Venkataramanan * This is a callback function which is called by the in kernel device unsync 197e94d4478SAnirudh Venkataramanan * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 198e94d4478SAnirudh Venkataramanan * populates the tmp_unsync_list, which is later used by ice_remove_mac to 199f9867df6SAnirudh Venkataramanan * delete the MAC filters from the hardware. 200e94d4478SAnirudh Venkataramanan */ 201e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 202e94d4478SAnirudh Venkataramanan { 203e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 204e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 205e94d4478SAnirudh Venkataramanan 2063ba7f53fSBrett Creeley /* Under some circumstances, we might receive a request to delete our 2073ba7f53fSBrett Creeley * own device address from our uc list. Because we store the device 2083ba7f53fSBrett Creeley * address in the VSI's MAC filter list, we need to ignore such 2093ba7f53fSBrett Creeley * requests and not delete our device address from this list. 2103ba7f53fSBrett Creeley */ 2113ba7f53fSBrett Creeley if (ether_addr_equal(addr, netdev->dev_addr)) 2123ba7f53fSBrett Creeley return 0; 2133ba7f53fSBrett Creeley 2141b8f15b6SMichal Swiatkowski if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 2151b8f15b6SMichal Swiatkowski ICE_FWD_TO_VSI)) 216e94d4478SAnirudh Venkataramanan return -EINVAL; 217e94d4478SAnirudh Venkataramanan 218e94d4478SAnirudh Venkataramanan return 0; 219e94d4478SAnirudh Venkataramanan } 220e94d4478SAnirudh Venkataramanan 221e94d4478SAnirudh Venkataramanan /** 222e94d4478SAnirudh Venkataramanan * ice_vsi_fltr_changed - check if filter state changed 223e94d4478SAnirudh Venkataramanan * @vsi: VSI to be checked 224e94d4478SAnirudh Venkataramanan * 225e94d4478SAnirudh Venkataramanan * returns true if filter state has changed, false otherwise. 226e94d4478SAnirudh Venkataramanan */ 227e94d4478SAnirudh Venkataramanan static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 228e94d4478SAnirudh Venkataramanan { 229e97fb1aeSAnirudh Venkataramanan return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 230e97fb1aeSAnirudh Venkataramanan test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || 231e97fb1aeSAnirudh Venkataramanan test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 232e94d4478SAnirudh Venkataramanan } 233e94d4478SAnirudh Venkataramanan 234e94d4478SAnirudh Venkataramanan /** 235fabf480bSBrett Creeley * ice_set_promisc - Enable promiscuous mode for a given PF 2365eda8afdSAkeem G Abodunrin * @vsi: the VSI being configured 2375eda8afdSAkeem G Abodunrin * @promisc_m: mask of promiscuous config bits 2385eda8afdSAkeem G Abodunrin * 2395eda8afdSAkeem G Abodunrin */ 240fabf480bSBrett Creeley static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 2415eda8afdSAkeem G Abodunrin { 2425e24d598STony Nguyen int status; 2435eda8afdSAkeem G Abodunrin 2445eda8afdSAkeem G Abodunrin if (vsi->type != ICE_VSI_PF) 2455eda8afdSAkeem G Abodunrin return 0; 2465eda8afdSAkeem G Abodunrin 247fabf480bSBrett Creeley if (vsi->num_vlan > 1) 248fabf480bSBrett Creeley status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); 2495eda8afdSAkeem G Abodunrin else 250fabf480bSBrett Creeley status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); 251c1484691STony Nguyen return status; 2525eda8afdSAkeem G Abodunrin } 2535eda8afdSAkeem G Abodunrin 254fabf480bSBrett Creeley /** 255fabf480bSBrett Creeley * ice_clear_promisc - Disable promiscuous mode for a given PF 256fabf480bSBrett Creeley * @vsi: the VSI being configured 257fabf480bSBrett Creeley * @promisc_m: mask of promiscuous config bits 258fabf480bSBrett Creeley * 259fabf480bSBrett Creeley */ 260fabf480bSBrett Creeley static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 261fabf480bSBrett Creeley { 2625e24d598STony Nguyen int status; 263fabf480bSBrett Creeley 264fabf480bSBrett Creeley if (vsi->type != ICE_VSI_PF) 265fabf480bSBrett Creeley return 0; 266fabf480bSBrett Creeley 267fabf480bSBrett Creeley if (vsi->num_vlan > 1) 268fabf480bSBrett Creeley status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); 269fabf480bSBrett Creeley else 270fabf480bSBrett Creeley status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); 271c1484691STony Nguyen return status; 2725eda8afdSAkeem G Abodunrin } 2735eda8afdSAkeem G Abodunrin 2745eda8afdSAkeem G Abodunrin /** 275e94d4478SAnirudh Venkataramanan * ice_vsi_sync_fltr - Update the VSI filter list to the HW 276e94d4478SAnirudh Venkataramanan * @vsi: ptr to the VSI 277e94d4478SAnirudh Venkataramanan * 278e94d4478SAnirudh Venkataramanan * Push any outstanding VSI filter changes through the AdminQ. 279e94d4478SAnirudh Venkataramanan */ 280e94d4478SAnirudh Venkataramanan static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 281e94d4478SAnirudh Venkataramanan { 2829a946843SAnirudh Venkataramanan struct device *dev = ice_pf_to_dev(vsi->back); 283e94d4478SAnirudh Venkataramanan struct net_device *netdev = vsi->netdev; 284e94d4478SAnirudh Venkataramanan bool promisc_forced_on = false; 285e94d4478SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 286e94d4478SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 287e94d4478SAnirudh Venkataramanan u32 changed_flags = 0; 2885eda8afdSAkeem G Abodunrin u8 promisc_m; 2892ccc1c1cSTony Nguyen int err; 290e94d4478SAnirudh Venkataramanan 291e94d4478SAnirudh Venkataramanan if (!vsi->netdev) 292e94d4478SAnirudh Venkataramanan return -EINVAL; 293e94d4478SAnirudh Venkataramanan 2947e408e07SAnirudh Venkataramanan while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 295e94d4478SAnirudh Venkataramanan usleep_range(1000, 2000); 296e94d4478SAnirudh Venkataramanan 297e94d4478SAnirudh Venkataramanan changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 298e94d4478SAnirudh Venkataramanan vsi->current_netdev_flags = vsi->netdev->flags; 299e94d4478SAnirudh Venkataramanan 300e94d4478SAnirudh Venkataramanan INIT_LIST_HEAD(&vsi->tmp_sync_list); 301e94d4478SAnirudh Venkataramanan INIT_LIST_HEAD(&vsi->tmp_unsync_list); 302e94d4478SAnirudh Venkataramanan 303e94d4478SAnirudh Venkataramanan if (ice_vsi_fltr_changed(vsi)) { 304e97fb1aeSAnirudh Venkataramanan clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 305e97fb1aeSAnirudh Venkataramanan clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 306e97fb1aeSAnirudh Venkataramanan clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 307e94d4478SAnirudh Venkataramanan 308e94d4478SAnirudh Venkataramanan /* grab the netdev's addr_list_lock */ 309e94d4478SAnirudh Venkataramanan netif_addr_lock_bh(netdev); 310e94d4478SAnirudh Venkataramanan __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 311e94d4478SAnirudh Venkataramanan ice_add_mac_to_unsync_list); 312e94d4478SAnirudh Venkataramanan __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 313e94d4478SAnirudh Venkataramanan ice_add_mac_to_unsync_list); 314e94d4478SAnirudh Venkataramanan /* our temp lists are populated. release lock */ 315e94d4478SAnirudh Venkataramanan netif_addr_unlock_bh(netdev); 316e94d4478SAnirudh Venkataramanan } 317e94d4478SAnirudh Venkataramanan 318f9867df6SAnirudh Venkataramanan /* Remove MAC addresses in the unsync list */ 3192ccc1c1cSTony Nguyen err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 3201b8f15b6SMichal Swiatkowski ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 3212ccc1c1cSTony Nguyen if (err) { 322e94d4478SAnirudh Venkataramanan netdev_err(netdev, "Failed to delete MAC filters\n"); 323e94d4478SAnirudh Venkataramanan /* if we failed because of alloc failures, just bail */ 3242ccc1c1cSTony Nguyen if (err == -ENOMEM) 325e94d4478SAnirudh Venkataramanan goto out; 326e94d4478SAnirudh Venkataramanan } 327e94d4478SAnirudh Venkataramanan 328f9867df6SAnirudh Venkataramanan /* Add MAC addresses in the sync list */ 3292ccc1c1cSTony Nguyen err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 3301b8f15b6SMichal Swiatkowski ice_fltr_free_list(dev, &vsi->tmp_sync_list); 33189f3e4a5SPreethi Banala /* If filter is added successfully or already exists, do not go into 33289f3e4a5SPreethi Banala * 'if' condition and report it as error. Instead continue processing 33389f3e4a5SPreethi Banala * rest of the function. 33489f3e4a5SPreethi Banala */ 3352ccc1c1cSTony Nguyen if (err && err != -EEXIST) { 336e94d4478SAnirudh Venkataramanan netdev_err(netdev, "Failed to add MAC filters\n"); 337f9867df6SAnirudh Venkataramanan /* If there is no more space for new umac filters, VSI 338e94d4478SAnirudh Venkataramanan * should go into promiscuous mode. There should be some 339e94d4478SAnirudh Venkataramanan * space reserved for promiscuous filters. 340e94d4478SAnirudh Venkataramanan */ 341e94d4478SAnirudh Venkataramanan if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 3427e408e07SAnirudh Venkataramanan !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 343e94d4478SAnirudh Venkataramanan vsi->state)) { 344e94d4478SAnirudh Venkataramanan promisc_forced_on = true; 34519cce2c6SAnirudh Venkataramanan netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 346e94d4478SAnirudh Venkataramanan vsi->vsi_num); 347e94d4478SAnirudh Venkataramanan } else { 348e94d4478SAnirudh Venkataramanan goto out; 349e94d4478SAnirudh Venkataramanan } 350e94d4478SAnirudh Venkataramanan } 3512ccc1c1cSTony Nguyen err = 0; 352e94d4478SAnirudh Venkataramanan /* check for changes in promiscuous modes */ 3535eda8afdSAkeem G Abodunrin if (changed_flags & IFF_ALLMULTI) { 3545eda8afdSAkeem G Abodunrin if (vsi->current_netdev_flags & IFF_ALLMULTI) { 355bcf68ea1SNick Nunley if (vsi->num_vlan > 1) 3565eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 3575eda8afdSAkeem G Abodunrin else 3585eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_PROMISC_BITS; 3595eda8afdSAkeem G Abodunrin 360fabf480bSBrett Creeley err = ice_set_promisc(vsi, promisc_m); 3615eda8afdSAkeem G Abodunrin if (err) { 3625eda8afdSAkeem G Abodunrin netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", 3635eda8afdSAkeem G Abodunrin vsi->vsi_num); 3645eda8afdSAkeem G Abodunrin vsi->current_netdev_flags &= ~IFF_ALLMULTI; 3655eda8afdSAkeem G Abodunrin goto out_promisc; 3665eda8afdSAkeem G Abodunrin } 36792ace482SBruce Allan } else { 36892ace482SBruce Allan /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 369bcf68ea1SNick Nunley if (vsi->num_vlan > 1) 3705eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 3715eda8afdSAkeem G Abodunrin else 3725eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_PROMISC_BITS; 3735eda8afdSAkeem G Abodunrin 374fabf480bSBrett Creeley err = ice_clear_promisc(vsi, promisc_m); 3755eda8afdSAkeem G Abodunrin if (err) { 3765eda8afdSAkeem G Abodunrin netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", 3775eda8afdSAkeem G Abodunrin vsi->vsi_num); 3785eda8afdSAkeem G Abodunrin vsi->current_netdev_flags |= IFF_ALLMULTI; 3795eda8afdSAkeem G Abodunrin goto out_promisc; 3805eda8afdSAkeem G Abodunrin } 3815eda8afdSAkeem G Abodunrin } 3825eda8afdSAkeem G Abodunrin } 383e94d4478SAnirudh Venkataramanan 384e94d4478SAnirudh Venkataramanan if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 385e97fb1aeSAnirudh Venkataramanan test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 386e97fb1aeSAnirudh Venkataramanan clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 387e94d4478SAnirudh Venkataramanan if (vsi->current_netdev_flags & IFF_PROMISC) { 388f9867df6SAnirudh Venkataramanan /* Apply Rx filter rule to get traffic from wire */ 389fc0f39bcSBrett Creeley if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { 390fc0f39bcSBrett Creeley err = ice_set_dflt_vsi(pf->first_sw, vsi); 391fc0f39bcSBrett Creeley if (err && err != -EEXIST) { 39219cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 393fc0f39bcSBrett Creeley err, vsi->vsi_num); 394fc0f39bcSBrett Creeley vsi->current_netdev_flags &= 395fc0f39bcSBrett Creeley ~IFF_PROMISC; 396e94d4478SAnirudh Venkataramanan goto out_promisc; 397e94d4478SAnirudh Venkataramanan } 3982ccc1c1cSTony Nguyen err = 0; 39929e71f41SBrett Creeley ice_cfg_vlan_pruning(vsi, false); 400fc0f39bcSBrett Creeley } 401e94d4478SAnirudh Venkataramanan } else { 402f9867df6SAnirudh Venkataramanan /* Clear Rx filter to remove traffic from wire */ 403fc0f39bcSBrett Creeley if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { 404fc0f39bcSBrett Creeley err = ice_clear_dflt_vsi(pf->first_sw); 405fc0f39bcSBrett Creeley if (err) { 40619cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 407fc0f39bcSBrett Creeley err, vsi->vsi_num); 408fc0f39bcSBrett Creeley vsi->current_netdev_flags |= 409fc0f39bcSBrett Creeley IFF_PROMISC; 410e94d4478SAnirudh Venkataramanan goto out_promisc; 411e94d4478SAnirudh Venkataramanan } 41268d210a6SNick Nunley if (vsi->num_vlan > 1) 41329e71f41SBrett Creeley ice_cfg_vlan_pruning(vsi, true); 414e94d4478SAnirudh Venkataramanan } 415e94d4478SAnirudh Venkataramanan } 416fc0f39bcSBrett Creeley } 417e94d4478SAnirudh Venkataramanan goto exit; 418e94d4478SAnirudh Venkataramanan 419e94d4478SAnirudh Venkataramanan out_promisc: 420e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 421e94d4478SAnirudh Venkataramanan goto exit; 422e94d4478SAnirudh Venkataramanan out: 423e94d4478SAnirudh Venkataramanan /* if something went wrong then set the changed flag so we try again */ 424e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 425e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 426e94d4478SAnirudh Venkataramanan exit: 4277e408e07SAnirudh Venkataramanan clear_bit(ICE_CFG_BUSY, vsi->state); 428e94d4478SAnirudh Venkataramanan return err; 429e94d4478SAnirudh Venkataramanan } 430e94d4478SAnirudh Venkataramanan 431e94d4478SAnirudh Venkataramanan /** 432e94d4478SAnirudh Venkataramanan * ice_sync_fltr_subtask - Sync the VSI filter list with HW 433e94d4478SAnirudh Venkataramanan * @pf: board private structure 434e94d4478SAnirudh Venkataramanan */ 435e94d4478SAnirudh Venkataramanan static void ice_sync_fltr_subtask(struct ice_pf *pf) 436e94d4478SAnirudh Venkataramanan { 437e94d4478SAnirudh Venkataramanan int v; 438e94d4478SAnirudh Venkataramanan 439e94d4478SAnirudh Venkataramanan if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 440e94d4478SAnirudh Venkataramanan return; 441e94d4478SAnirudh Venkataramanan 442e94d4478SAnirudh Venkataramanan clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 443e94d4478SAnirudh Venkataramanan 44480ed404aSBrett Creeley ice_for_each_vsi(pf, v) 445e94d4478SAnirudh Venkataramanan if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 446e94d4478SAnirudh Venkataramanan ice_vsi_sync_fltr(pf->vsi[v])) { 447e94d4478SAnirudh Venkataramanan /* come back and try again later */ 448e94d4478SAnirudh Venkataramanan set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 449e94d4478SAnirudh Venkataramanan break; 450e94d4478SAnirudh Venkataramanan } 451e94d4478SAnirudh Venkataramanan } 452e94d4478SAnirudh Venkataramanan 453e94d4478SAnirudh Venkataramanan /** 4547b9ffc76SAnirudh Venkataramanan * ice_pf_dis_all_vsi - Pause all VSIs on a PF 4557b9ffc76SAnirudh Venkataramanan * @pf: the PF 4567b9ffc76SAnirudh Venkataramanan * @locked: is the rtnl_lock already held 4577b9ffc76SAnirudh Venkataramanan */ 4587b9ffc76SAnirudh Venkataramanan static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 4597b9ffc76SAnirudh Venkataramanan { 460b126bd6bSKiran Patil int node; 4617b9ffc76SAnirudh Venkataramanan int v; 4627b9ffc76SAnirudh Venkataramanan 4637b9ffc76SAnirudh Venkataramanan ice_for_each_vsi(pf, v) 4647b9ffc76SAnirudh Venkataramanan if (pf->vsi[v]) 4657b9ffc76SAnirudh Venkataramanan ice_dis_vsi(pf->vsi[v], locked); 466b126bd6bSKiran Patil 467b126bd6bSKiran Patil for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 468b126bd6bSKiran Patil pf->pf_agg_node[node].num_vsis = 0; 469b126bd6bSKiran Patil 470b126bd6bSKiran Patil for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 471b126bd6bSKiran Patil pf->vf_agg_node[node].num_vsis = 0; 4727b9ffc76SAnirudh Venkataramanan } 4737b9ffc76SAnirudh Venkataramanan 4747b9ffc76SAnirudh Venkataramanan /** 475c1e5da5dSWojciech Drewek * ice_clear_sw_switch_recipes - clear switch recipes 476c1e5da5dSWojciech Drewek * @pf: board private structure 477c1e5da5dSWojciech Drewek * 478c1e5da5dSWojciech Drewek * Mark switch recipes as not created in sw structures. There are cases where 479c1e5da5dSWojciech Drewek * rules (especially advanced rules) need to be restored, either re-read from 480c1e5da5dSWojciech Drewek * hardware or added again. For example after the reset. 'recp_created' flag 481c1e5da5dSWojciech Drewek * prevents from doing that and need to be cleared upfront. 482c1e5da5dSWojciech Drewek */ 483c1e5da5dSWojciech Drewek static void ice_clear_sw_switch_recipes(struct ice_pf *pf) 484c1e5da5dSWojciech Drewek { 485c1e5da5dSWojciech Drewek struct ice_sw_recipe *recp; 486c1e5da5dSWojciech Drewek u8 i; 487c1e5da5dSWojciech Drewek 488c1e5da5dSWojciech Drewek recp = pf->hw.switch_info->recp_list; 489c1e5da5dSWojciech Drewek for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 490c1e5da5dSWojciech Drewek recp[i].recp_created = false; 491c1e5da5dSWojciech Drewek } 492c1e5da5dSWojciech Drewek 493c1e5da5dSWojciech Drewek /** 494fbc7b27aSKiran Patil * ice_prepare_for_reset - prep for reset 4950b28b702SAnirudh Venkataramanan * @pf: board private structure 496fbc7b27aSKiran Patil * @reset_type: reset type requested 4970b28b702SAnirudh Venkataramanan * 4980b28b702SAnirudh Venkataramanan * Inform or close all dependent features in prep for reset. 4990b28b702SAnirudh Venkataramanan */ 5000b28b702SAnirudh Venkataramanan static void 501fbc7b27aSKiran Patil ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 5020b28b702SAnirudh Venkataramanan { 5030b28b702SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 504fbc7b27aSKiran Patil struct ice_vsi *vsi; 505c1e08830SJesse Brandeburg unsigned int i; 5060b28b702SAnirudh Venkataramanan 507fbc7b27aSKiran Patil dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 508fbc7b27aSKiran Patil 5095abac9d7SBrett Creeley /* already prepared for reset */ 5107e408e07SAnirudh Venkataramanan if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 5115abac9d7SBrett Creeley return; 5125abac9d7SBrett Creeley 513f9f5301eSDave Ertman ice_unplug_aux_dev(pf); 514f9f5301eSDave Ertman 515007676b4SAnirudh Venkataramanan /* Notify VFs of impending reset */ 516007676b4SAnirudh Venkataramanan if (ice_check_sq_alive(hw, &hw->mailboxq)) 517007676b4SAnirudh Venkataramanan ice_vc_notify_reset(pf); 518007676b4SAnirudh Venkataramanan 519c7aeb4d1SAkeem G Abodunrin /* Disable VFs until reset is completed */ 520005881bcSBrett Creeley ice_for_each_vf(pf, i) 52177ca27c4SPaul Greenwalt ice_set_vf_state_qs_dis(&pf->vf[i]); 522c7aeb4d1SAkeem G Abodunrin 523c1e5da5dSWojciech Drewek if (ice_is_eswitch_mode_switchdev(pf)) { 524c1e5da5dSWojciech Drewek if (reset_type != ICE_RESET_PFR) 525c1e5da5dSWojciech Drewek ice_clear_sw_switch_recipes(pf); 526c1e5da5dSWojciech Drewek } 527c1e5da5dSWojciech Drewek 528fbc7b27aSKiran Patil /* release ADQ specific HW and SW resources */ 529fbc7b27aSKiran Patil vsi = ice_get_main_vsi(pf); 530fbc7b27aSKiran Patil if (!vsi) 531fbc7b27aSKiran Patil goto skip; 532fbc7b27aSKiran Patil 533fbc7b27aSKiran Patil /* to be on safe side, reset orig_rss_size so that normal flow 534fbc7b27aSKiran Patil * of deciding rss_size can take precedence 535fbc7b27aSKiran Patil */ 536fbc7b27aSKiran Patil vsi->orig_rss_size = 0; 537fbc7b27aSKiran Patil 538fbc7b27aSKiran Patil if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 539fbc7b27aSKiran Patil if (reset_type == ICE_RESET_PFR) { 540fbc7b27aSKiran Patil vsi->old_ena_tc = vsi->all_enatc; 541fbc7b27aSKiran Patil vsi->old_numtc = vsi->all_numtc; 542fbc7b27aSKiran Patil } else { 543fbc7b27aSKiran Patil ice_remove_q_channels(vsi, true); 544fbc7b27aSKiran Patil 545fbc7b27aSKiran Patil /* for other reset type, do not support channel rebuild 546fbc7b27aSKiran Patil * hence reset needed info 547fbc7b27aSKiran Patil */ 548fbc7b27aSKiran Patil vsi->old_ena_tc = 0; 549fbc7b27aSKiran Patil vsi->all_enatc = 0; 550fbc7b27aSKiran Patil vsi->old_numtc = 0; 551fbc7b27aSKiran Patil vsi->all_numtc = 0; 552fbc7b27aSKiran Patil vsi->req_txq = 0; 553fbc7b27aSKiran Patil vsi->req_rxq = 0; 554fbc7b27aSKiran Patil clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 555fbc7b27aSKiran Patil memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 556fbc7b27aSKiran Patil } 557fbc7b27aSKiran Patil } 558fbc7b27aSKiran Patil skip: 559fbc7b27aSKiran Patil 560462acf6aSTony Nguyen /* clear SW filtering DB */ 561462acf6aSTony Nguyen ice_clear_hw_tbls(hw); 5620b28b702SAnirudh Venkataramanan /* disable the VSIs and their queues that are not already DOWN */ 5637b9ffc76SAnirudh Venkataramanan ice_pf_dis_all_vsi(pf, false); 5640b28b702SAnirudh Venkataramanan 56506c16d89SJacob Keller if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 56648096710SKarol Kolacinski ice_ptp_prepare_for_reset(pf); 56706c16d89SJacob Keller 568c5a2a4a3SUsha Ketineni if (hw->port_info) 569c5a2a4a3SUsha Ketineni ice_sched_clear_port(hw->port_info); 570c5a2a4a3SUsha Ketineni 5710b28b702SAnirudh Venkataramanan ice_shutdown_all_ctrlq(hw); 5720f9d5027SAnirudh Venkataramanan 5737e408e07SAnirudh Venkataramanan set_bit(ICE_PREPARED_FOR_RESET, pf->state); 5740b28b702SAnirudh Venkataramanan } 5750b28b702SAnirudh Venkataramanan 5760b28b702SAnirudh Venkataramanan /** 5770b28b702SAnirudh Venkataramanan * ice_do_reset - Initiate one of many types of resets 5780b28b702SAnirudh Venkataramanan * @pf: board private structure 579fbc7b27aSKiran Patil * @reset_type: reset type requested before this function was called. 5800b28b702SAnirudh Venkataramanan */ 5810b28b702SAnirudh Venkataramanan static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 5820b28b702SAnirudh Venkataramanan { 5834015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 5840b28b702SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 5850b28b702SAnirudh Venkataramanan 5860b28b702SAnirudh Venkataramanan dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 5870b28b702SAnirudh Venkataramanan 588fbc7b27aSKiran Patil ice_prepare_for_reset(pf, reset_type); 5890b28b702SAnirudh Venkataramanan 5900b28b702SAnirudh Venkataramanan /* trigger the reset */ 5910b28b702SAnirudh Venkataramanan if (ice_reset(hw, reset_type)) { 5920b28b702SAnirudh Venkataramanan dev_err(dev, "reset %d failed\n", reset_type); 5937e408e07SAnirudh Venkataramanan set_bit(ICE_RESET_FAILED, pf->state); 5947e408e07SAnirudh Venkataramanan clear_bit(ICE_RESET_OICR_RECV, pf->state); 5957e408e07SAnirudh Venkataramanan clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 5967e408e07SAnirudh Venkataramanan clear_bit(ICE_PFR_REQ, pf->state); 5977e408e07SAnirudh Venkataramanan clear_bit(ICE_CORER_REQ, pf->state); 5987e408e07SAnirudh Venkataramanan clear_bit(ICE_GLOBR_REQ, pf->state); 5991c08052eSJacob Keller wake_up(&pf->reset_wait_queue); 6000b28b702SAnirudh Venkataramanan return; 6010b28b702SAnirudh Venkataramanan } 6020b28b702SAnirudh Venkataramanan 6030f9d5027SAnirudh Venkataramanan /* PFR is a bit of a special case because it doesn't result in an OICR 6040f9d5027SAnirudh Venkataramanan * interrupt. So for PFR, rebuild after the reset and clear the reset- 6050f9d5027SAnirudh Venkataramanan * associated state bits. 6060f9d5027SAnirudh Venkataramanan */ 6070b28b702SAnirudh Venkataramanan if (reset_type == ICE_RESET_PFR) { 6080b28b702SAnirudh Venkataramanan pf->pfr_count++; 609462acf6aSTony Nguyen ice_rebuild(pf, reset_type); 6107e408e07SAnirudh Venkataramanan clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 6117e408e07SAnirudh Venkataramanan clear_bit(ICE_PFR_REQ, pf->state); 6121c08052eSJacob Keller wake_up(&pf->reset_wait_queue); 6131c44e3bcSAkeem G Abodunrin ice_reset_all_vfs(pf, true); 6140b28b702SAnirudh Venkataramanan } 6150b28b702SAnirudh Venkataramanan } 6160b28b702SAnirudh Venkataramanan 6170b28b702SAnirudh Venkataramanan /** 6180b28b702SAnirudh Venkataramanan * ice_reset_subtask - Set up for resetting the device and driver 6190b28b702SAnirudh Venkataramanan * @pf: board private structure 6200b28b702SAnirudh Venkataramanan */ 6210b28b702SAnirudh Venkataramanan static void ice_reset_subtask(struct ice_pf *pf) 6220b28b702SAnirudh Venkataramanan { 6230f9d5027SAnirudh Venkataramanan enum ice_reset_req reset_type = ICE_RESET_INVAL; 6240b28b702SAnirudh Venkataramanan 6250b28b702SAnirudh Venkataramanan /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 6260f9d5027SAnirudh Venkataramanan * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 6270f9d5027SAnirudh Venkataramanan * of reset is pending and sets bits in pf->state indicating the reset 6287e408e07SAnirudh Venkataramanan * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 6290f9d5027SAnirudh Venkataramanan * prepare for pending reset if not already (for PF software-initiated 6300f9d5027SAnirudh Venkataramanan * global resets the software should already be prepared for it as 6317e408e07SAnirudh Venkataramanan * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 6320f9d5027SAnirudh Venkataramanan * by firmware or software on other PFs, that bit is not set so prepare 6330f9d5027SAnirudh Venkataramanan * for the reset now), poll for reset done, rebuild and return. 6340b28b702SAnirudh Venkataramanan */ 6357e408e07SAnirudh Venkataramanan if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 6362ebd4428SDave Ertman /* Perform the largest reset requested */ 6377e408e07SAnirudh Venkataramanan if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 6382ebd4428SDave Ertman reset_type = ICE_RESET_CORER; 6397e408e07SAnirudh Venkataramanan if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 6402ebd4428SDave Ertman reset_type = ICE_RESET_GLOBR; 6417e408e07SAnirudh Venkataramanan if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 64203af8406SAnirudh Venkataramanan reset_type = ICE_RESET_EMPR; 6432ebd4428SDave Ertman /* return if no valid reset type requested */ 6442ebd4428SDave Ertman if (reset_type == ICE_RESET_INVAL) 6452ebd4428SDave Ertman return; 646fbc7b27aSKiran Patil ice_prepare_for_reset(pf, reset_type); 6470b28b702SAnirudh Venkataramanan 6480b28b702SAnirudh Venkataramanan /* make sure we are ready to rebuild */ 649fd2a9817SAnirudh Venkataramanan if (ice_check_reset(&pf->hw)) { 6507e408e07SAnirudh Venkataramanan set_bit(ICE_RESET_FAILED, pf->state); 651fd2a9817SAnirudh Venkataramanan } else { 652fd2a9817SAnirudh Venkataramanan /* done with reset. start rebuild */ 653fd2a9817SAnirudh Venkataramanan pf->hw.reset_ongoing = false; 654462acf6aSTony Nguyen ice_rebuild(pf, reset_type); 6550f9d5027SAnirudh Venkataramanan /* clear bit to resume normal operations, but 6560f9d5027SAnirudh Venkataramanan * ICE_NEEDS_RESTART bit is set in case rebuild failed 6570f9d5027SAnirudh Venkataramanan */ 6587e408e07SAnirudh Venkataramanan clear_bit(ICE_RESET_OICR_RECV, pf->state); 6597e408e07SAnirudh Venkataramanan clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 6607e408e07SAnirudh Venkataramanan clear_bit(ICE_PFR_REQ, pf->state); 6617e408e07SAnirudh Venkataramanan clear_bit(ICE_CORER_REQ, pf->state); 6627e408e07SAnirudh Venkataramanan clear_bit(ICE_GLOBR_REQ, pf->state); 6631c08052eSJacob Keller wake_up(&pf->reset_wait_queue); 6641c44e3bcSAkeem G Abodunrin ice_reset_all_vfs(pf, true); 6650f9d5027SAnirudh Venkataramanan } 6660f9d5027SAnirudh Venkataramanan 6670f9d5027SAnirudh Venkataramanan return; 6680b28b702SAnirudh Venkataramanan } 6690b28b702SAnirudh Venkataramanan 6700b28b702SAnirudh Venkataramanan /* No pending resets to finish processing. Check for new resets */ 6717e408e07SAnirudh Venkataramanan if (test_bit(ICE_PFR_REQ, pf->state)) 6720f9d5027SAnirudh Venkataramanan reset_type = ICE_RESET_PFR; 6737e408e07SAnirudh Venkataramanan if (test_bit(ICE_CORER_REQ, pf->state)) 6740f9d5027SAnirudh Venkataramanan reset_type = ICE_RESET_CORER; 6757e408e07SAnirudh Venkataramanan if (test_bit(ICE_GLOBR_REQ, pf->state)) 6760b28b702SAnirudh Venkataramanan reset_type = ICE_RESET_GLOBR; 6770f9d5027SAnirudh Venkataramanan /* If no valid reset type requested just return */ 6780f9d5027SAnirudh Venkataramanan if (reset_type == ICE_RESET_INVAL) 6790f9d5027SAnirudh Venkataramanan return; 6800b28b702SAnirudh Venkataramanan 6810f9d5027SAnirudh Venkataramanan /* reset if not already down or busy */ 6827e408e07SAnirudh Venkataramanan if (!test_bit(ICE_DOWN, pf->state) && 6837e408e07SAnirudh Venkataramanan !test_bit(ICE_CFG_BUSY, pf->state)) { 6840b28b702SAnirudh Venkataramanan ice_do_reset(pf, reset_type); 6850b28b702SAnirudh Venkataramanan } 6860b28b702SAnirudh Venkataramanan } 6870b28b702SAnirudh Venkataramanan 6880b28b702SAnirudh Venkataramanan /** 6892e0ab37cSJesse Brandeburg * ice_print_topo_conflict - print topology conflict message 6902e0ab37cSJesse Brandeburg * @vsi: the VSI whose topology status is being checked 6912e0ab37cSJesse Brandeburg */ 6922e0ab37cSJesse Brandeburg static void ice_print_topo_conflict(struct ice_vsi *vsi) 6932e0ab37cSJesse Brandeburg { 6942e0ab37cSJesse Brandeburg switch (vsi->port_info->phy.link_info.topo_media_conflict) { 6952e0ab37cSJesse Brandeburg case ICE_AQ_LINK_TOPO_CONFLICT: 6962e0ab37cSJesse Brandeburg case ICE_AQ_LINK_MEDIA_CONFLICT: 6975878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNREACH_PRT: 6985878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 6995878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 7005c57145aSPaul Greenwalt netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 7012e0ab37cSJesse Brandeburg break; 7025878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 7034fc5fbeeSAnirudh Venkataramanan if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 7044fc5fbeeSAnirudh Venkataramanan netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 7054fc5fbeeSAnirudh Venkataramanan else 7064fc5fbeeSAnirudh Venkataramanan netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 7075878589dSPaul Greenwalt break; 7082e0ab37cSJesse Brandeburg default: 7092e0ab37cSJesse Brandeburg break; 7102e0ab37cSJesse Brandeburg } 7112e0ab37cSJesse Brandeburg } 7122e0ab37cSJesse Brandeburg 7132e0ab37cSJesse Brandeburg /** 714cdedef59SAnirudh Venkataramanan * ice_print_link_msg - print link up or down message 715cdedef59SAnirudh Venkataramanan * @vsi: the VSI whose link status is being queried 716cdedef59SAnirudh Venkataramanan * @isup: boolean for if the link is now up or down 717cdedef59SAnirudh Venkataramanan */ 718fcea6f3dSAnirudh Venkataramanan void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 719cdedef59SAnirudh Venkataramanan { 720f776b3acSPaul Greenwalt struct ice_aqc_get_phy_caps_data *caps; 7215ee30564SPaul Greenwalt const char *an_advertised; 722f776b3acSPaul Greenwalt const char *fec_req; 723cdedef59SAnirudh Venkataramanan const char *speed; 724f776b3acSPaul Greenwalt const char *fec; 725cdedef59SAnirudh Venkataramanan const char *fc; 72643260988SJesse Brandeburg const char *an; 7275518ac2aSTony Nguyen int status; 728cdedef59SAnirudh Venkataramanan 729c2a23e00SBrett Creeley if (!vsi) 730c2a23e00SBrett Creeley return; 731c2a23e00SBrett Creeley 732cdedef59SAnirudh Venkataramanan if (vsi->current_isup == isup) 733cdedef59SAnirudh Venkataramanan return; 734cdedef59SAnirudh Venkataramanan 735cdedef59SAnirudh Venkataramanan vsi->current_isup = isup; 736cdedef59SAnirudh Venkataramanan 737cdedef59SAnirudh Venkataramanan if (!isup) { 738cdedef59SAnirudh Venkataramanan netdev_info(vsi->netdev, "NIC Link is Down\n"); 739cdedef59SAnirudh Venkataramanan return; 740cdedef59SAnirudh Venkataramanan } 741cdedef59SAnirudh Venkataramanan 742cdedef59SAnirudh Venkataramanan switch (vsi->port_info->phy.link_info.link_speed) { 743072efdf8SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_100GB: 744072efdf8SAnirudh Venkataramanan speed = "100 G"; 745072efdf8SAnirudh Venkataramanan break; 746072efdf8SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_50GB: 747072efdf8SAnirudh Venkataramanan speed = "50 G"; 748072efdf8SAnirudh Venkataramanan break; 749cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_40GB: 750cdedef59SAnirudh Venkataramanan speed = "40 G"; 751cdedef59SAnirudh Venkataramanan break; 752cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_25GB: 753cdedef59SAnirudh Venkataramanan speed = "25 G"; 754cdedef59SAnirudh Venkataramanan break; 755cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_20GB: 756cdedef59SAnirudh Venkataramanan speed = "20 G"; 757cdedef59SAnirudh Venkataramanan break; 758cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_10GB: 759cdedef59SAnirudh Venkataramanan speed = "10 G"; 760cdedef59SAnirudh Venkataramanan break; 761cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_5GB: 762cdedef59SAnirudh Venkataramanan speed = "5 G"; 763cdedef59SAnirudh Venkataramanan break; 764cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_2500MB: 765cdedef59SAnirudh Venkataramanan speed = "2.5 G"; 766cdedef59SAnirudh Venkataramanan break; 767cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_1000MB: 768cdedef59SAnirudh Venkataramanan speed = "1 G"; 769cdedef59SAnirudh Venkataramanan break; 770cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_100MB: 771cdedef59SAnirudh Venkataramanan speed = "100 M"; 772cdedef59SAnirudh Venkataramanan break; 773cdedef59SAnirudh Venkataramanan default: 774cdedef59SAnirudh Venkataramanan speed = "Unknown "; 775cdedef59SAnirudh Venkataramanan break; 776cdedef59SAnirudh Venkataramanan } 777cdedef59SAnirudh Venkataramanan 778cdedef59SAnirudh Venkataramanan switch (vsi->port_info->fc.current_mode) { 779cdedef59SAnirudh Venkataramanan case ICE_FC_FULL: 7802f2da36eSAnirudh Venkataramanan fc = "Rx/Tx"; 781cdedef59SAnirudh Venkataramanan break; 782cdedef59SAnirudh Venkataramanan case ICE_FC_TX_PAUSE: 7832f2da36eSAnirudh Venkataramanan fc = "Tx"; 784cdedef59SAnirudh Venkataramanan break; 785cdedef59SAnirudh Venkataramanan case ICE_FC_RX_PAUSE: 7862f2da36eSAnirudh Venkataramanan fc = "Rx"; 787cdedef59SAnirudh Venkataramanan break; 788203a068aSBrett Creeley case ICE_FC_NONE: 789203a068aSBrett Creeley fc = "None"; 790203a068aSBrett Creeley break; 791cdedef59SAnirudh Venkataramanan default: 792cdedef59SAnirudh Venkataramanan fc = "Unknown"; 793cdedef59SAnirudh Venkataramanan break; 794cdedef59SAnirudh Venkataramanan } 795cdedef59SAnirudh Venkataramanan 796f776b3acSPaul Greenwalt /* Get FEC mode based on negotiated link info */ 797f776b3acSPaul Greenwalt switch (vsi->port_info->phy.link_info.fec_info) { 798f776b3acSPaul Greenwalt case ICE_AQ_LINK_25G_RS_528_FEC_EN: 799f776b3acSPaul Greenwalt case ICE_AQ_LINK_25G_RS_544_FEC_EN: 800f776b3acSPaul Greenwalt fec = "RS-FEC"; 801f776b3acSPaul Greenwalt break; 802f776b3acSPaul Greenwalt case ICE_AQ_LINK_25G_KR_FEC_EN: 803f776b3acSPaul Greenwalt fec = "FC-FEC/BASE-R"; 804f776b3acSPaul Greenwalt break; 805f776b3acSPaul Greenwalt default: 806f776b3acSPaul Greenwalt fec = "NONE"; 807f776b3acSPaul Greenwalt break; 808f776b3acSPaul Greenwalt } 809f776b3acSPaul Greenwalt 81043260988SJesse Brandeburg /* check if autoneg completed, might be false due to not supported */ 81143260988SJesse Brandeburg if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 81243260988SJesse Brandeburg an = "True"; 81343260988SJesse Brandeburg else 81443260988SJesse Brandeburg an = "False"; 81543260988SJesse Brandeburg 816f776b3acSPaul Greenwalt /* Get FEC mode requested based on PHY caps last SW configuration */ 8179efe35d0STony Nguyen caps = kzalloc(sizeof(*caps), GFP_KERNEL); 818f776b3acSPaul Greenwalt if (!caps) { 819f776b3acSPaul Greenwalt fec_req = "Unknown"; 8205ee30564SPaul Greenwalt an_advertised = "Unknown"; 821f776b3acSPaul Greenwalt goto done; 822f776b3acSPaul Greenwalt } 823f776b3acSPaul Greenwalt 824f776b3acSPaul Greenwalt status = ice_aq_get_phy_caps(vsi->port_info, false, 825d6730a87SAnirudh Venkataramanan ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 826f776b3acSPaul Greenwalt if (status) 827f776b3acSPaul Greenwalt netdev_info(vsi->netdev, "Get phy capability failed.\n"); 828f776b3acSPaul Greenwalt 8295ee30564SPaul Greenwalt an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 8305ee30564SPaul Greenwalt 831f776b3acSPaul Greenwalt if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 832f776b3acSPaul Greenwalt caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 833f776b3acSPaul Greenwalt fec_req = "RS-FEC"; 834f776b3acSPaul Greenwalt else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 835f776b3acSPaul Greenwalt caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 836f776b3acSPaul Greenwalt fec_req = "FC-FEC/BASE-R"; 837f776b3acSPaul Greenwalt else 838f776b3acSPaul Greenwalt fec_req = "NONE"; 839f776b3acSPaul Greenwalt 8409efe35d0STony Nguyen kfree(caps); 841f776b3acSPaul Greenwalt 842f776b3acSPaul Greenwalt done: 8435ee30564SPaul Greenwalt netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 8445ee30564SPaul Greenwalt speed, fec_req, fec, an_advertised, an, fc); 8452e0ab37cSJesse Brandeburg ice_print_topo_conflict(vsi); 846cdedef59SAnirudh Venkataramanan } 847cdedef59SAnirudh Venkataramanan 848cdedef59SAnirudh Venkataramanan /** 849f9867df6SAnirudh Venkataramanan * ice_vsi_link_event - update the VSI's netdev 850f9867df6SAnirudh Venkataramanan * @vsi: the VSI on which the link event occurred 851f9867df6SAnirudh Venkataramanan * @link_up: whether or not the VSI needs to be set up or down 8520b28b702SAnirudh Venkataramanan */ 8530b28b702SAnirudh Venkataramanan static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 8540b28b702SAnirudh Venkataramanan { 855c2a23e00SBrett Creeley if (!vsi) 856c2a23e00SBrett Creeley return; 857c2a23e00SBrett Creeley 858e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 8590b28b702SAnirudh Venkataramanan return; 8600b28b702SAnirudh Venkataramanan 8610b28b702SAnirudh Venkataramanan if (vsi->type == ICE_VSI_PF) { 862c2a23e00SBrett Creeley if (link_up == netif_carrier_ok(vsi->netdev)) 8630b28b702SAnirudh Venkataramanan return; 864c2a23e00SBrett Creeley 8650b28b702SAnirudh Venkataramanan if (link_up) { 8660b28b702SAnirudh Venkataramanan netif_carrier_on(vsi->netdev); 8670b28b702SAnirudh Venkataramanan netif_tx_wake_all_queues(vsi->netdev); 8680b28b702SAnirudh Venkataramanan } else { 8690b28b702SAnirudh Venkataramanan netif_carrier_off(vsi->netdev); 8700b28b702SAnirudh Venkataramanan netif_tx_stop_all_queues(vsi->netdev); 8710b28b702SAnirudh Venkataramanan } 8720b28b702SAnirudh Venkataramanan } 8730b28b702SAnirudh Venkataramanan } 8740b28b702SAnirudh Venkataramanan 8750b28b702SAnirudh Venkataramanan /** 8767d9c9b79SDave Ertman * ice_set_dflt_mib - send a default config MIB to the FW 8777d9c9b79SDave Ertman * @pf: private PF struct 8787d9c9b79SDave Ertman * 8797d9c9b79SDave Ertman * This function sends a default configuration MIB to the FW. 8807d9c9b79SDave Ertman * 8817d9c9b79SDave Ertman * If this function errors out at any point, the driver is still able to 8827d9c9b79SDave Ertman * function. The main impact is that LFC may not operate as expected. 8837d9c9b79SDave Ertman * Therefore an error state in this function should be treated with a DBG 8847d9c9b79SDave Ertman * message and continue on with driver rebuild/reenable. 8857d9c9b79SDave Ertman */ 8867d9c9b79SDave Ertman static void ice_set_dflt_mib(struct ice_pf *pf) 8877d9c9b79SDave Ertman { 8887d9c9b79SDave Ertman struct device *dev = ice_pf_to_dev(pf); 8897d9c9b79SDave Ertman u8 mib_type, *buf, *lldpmib = NULL; 8907d9c9b79SDave Ertman u16 len, typelen, offset = 0; 8917d9c9b79SDave Ertman struct ice_lldp_org_tlv *tlv; 89212aae8f1SBruce Allan struct ice_hw *hw = &pf->hw; 8937d9c9b79SDave Ertman u32 ouisubtype; 8947d9c9b79SDave Ertman 8957d9c9b79SDave Ertman mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 8967d9c9b79SDave Ertman lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 8977d9c9b79SDave Ertman if (!lldpmib) { 8987d9c9b79SDave Ertman dev_dbg(dev, "%s Failed to allocate MIB memory\n", 8997d9c9b79SDave Ertman __func__); 9007d9c9b79SDave Ertman return; 9017d9c9b79SDave Ertman } 9027d9c9b79SDave Ertman 9037d9c9b79SDave Ertman /* Add ETS CFG TLV */ 9047d9c9b79SDave Ertman tlv = (struct ice_lldp_org_tlv *)lldpmib; 9057d9c9b79SDave Ertman typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 9067d9c9b79SDave Ertman ICE_IEEE_ETS_TLV_LEN); 9077d9c9b79SDave Ertman tlv->typelen = htons(typelen); 9087d9c9b79SDave Ertman ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 9097d9c9b79SDave Ertman ICE_IEEE_SUBTYPE_ETS_CFG); 9107d9c9b79SDave Ertman tlv->ouisubtype = htonl(ouisubtype); 9117d9c9b79SDave Ertman 9127d9c9b79SDave Ertman buf = tlv->tlvinfo; 9137d9c9b79SDave Ertman buf[0] = 0; 9147d9c9b79SDave Ertman 9157d9c9b79SDave Ertman /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 9167d9c9b79SDave Ertman * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 9177d9c9b79SDave Ertman * Octets 13 - 20 are TSA values - leave as zeros 9187d9c9b79SDave Ertman */ 9197d9c9b79SDave Ertman buf[5] = 0x64; 9207d9c9b79SDave Ertman len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 9217d9c9b79SDave Ertman offset += len + 2; 9227d9c9b79SDave Ertman tlv = (struct ice_lldp_org_tlv *) 9237d9c9b79SDave Ertman ((char *)tlv + sizeof(tlv->typelen) + len); 9247d9c9b79SDave Ertman 9257d9c9b79SDave Ertman /* Add ETS REC TLV */ 9267d9c9b79SDave Ertman buf = tlv->tlvinfo; 9277d9c9b79SDave Ertman tlv->typelen = htons(typelen); 9287d9c9b79SDave Ertman 9297d9c9b79SDave Ertman ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 9307d9c9b79SDave Ertman ICE_IEEE_SUBTYPE_ETS_REC); 9317d9c9b79SDave Ertman tlv->ouisubtype = htonl(ouisubtype); 9327d9c9b79SDave Ertman 9337d9c9b79SDave Ertman /* First octet of buf is reserved 9347d9c9b79SDave Ertman * Octets 1 - 4 map UP to TC - all UPs map to zero 9357d9c9b79SDave Ertman * Octets 5 - 12 are BW values - set TC 0 to 100%. 9367d9c9b79SDave Ertman * Octets 13 - 20 are TSA value - leave as zeros 9377d9c9b79SDave Ertman */ 9387d9c9b79SDave Ertman buf[5] = 0x64; 9397d9c9b79SDave Ertman offset += len + 2; 9407d9c9b79SDave Ertman tlv = (struct ice_lldp_org_tlv *) 9417d9c9b79SDave Ertman ((char *)tlv + sizeof(tlv->typelen) + len); 9427d9c9b79SDave Ertman 9437d9c9b79SDave Ertman /* Add PFC CFG TLV */ 9447d9c9b79SDave Ertman typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 9457d9c9b79SDave Ertman ICE_IEEE_PFC_TLV_LEN); 9467d9c9b79SDave Ertman tlv->typelen = htons(typelen); 9477d9c9b79SDave Ertman 9487d9c9b79SDave Ertman ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 9497d9c9b79SDave Ertman ICE_IEEE_SUBTYPE_PFC_CFG); 9507d9c9b79SDave Ertman tlv->ouisubtype = htonl(ouisubtype); 9517d9c9b79SDave Ertman 9527d9c9b79SDave Ertman /* Octet 1 left as all zeros - PFC disabled */ 9537d9c9b79SDave Ertman buf[0] = 0x08; 9547d9c9b79SDave Ertman len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 9557d9c9b79SDave Ertman offset += len + 2; 9567d9c9b79SDave Ertman 9577d9c9b79SDave Ertman if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 9587d9c9b79SDave Ertman dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 9597d9c9b79SDave Ertman 9607d9c9b79SDave Ertman kfree(lldpmib); 9617d9c9b79SDave Ertman } 9627d9c9b79SDave Ertman 9637d9c9b79SDave Ertman /** 96499d40752SBrett Creeley * ice_check_phy_fw_load - check if PHY FW load failed 96599d40752SBrett Creeley * @pf: pointer to PF struct 96699d40752SBrett Creeley * @link_cfg_err: bitmap from the link info structure 96799d40752SBrett Creeley * 96899d40752SBrett Creeley * check if external PHY FW load failed and print an error message if it did 96999d40752SBrett Creeley */ 97099d40752SBrett Creeley static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 97199d40752SBrett Creeley { 97299d40752SBrett Creeley if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 97399d40752SBrett Creeley clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 97499d40752SBrett Creeley return; 97599d40752SBrett Creeley } 97699d40752SBrett Creeley 97799d40752SBrett Creeley if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 97899d40752SBrett Creeley return; 97999d40752SBrett Creeley 98099d40752SBrett Creeley if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 98199d40752SBrett Creeley dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 98299d40752SBrett Creeley set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 98399d40752SBrett Creeley } 98499d40752SBrett Creeley } 98599d40752SBrett Creeley 98699d40752SBrett Creeley /** 987c77849f5SAnirudh Venkataramanan * ice_check_module_power 988c77849f5SAnirudh Venkataramanan * @pf: pointer to PF struct 989c77849f5SAnirudh Venkataramanan * @link_cfg_err: bitmap from the link info structure 990c77849f5SAnirudh Venkataramanan * 991c77849f5SAnirudh Venkataramanan * check module power level returned by a previous call to aq_get_link_info 992c77849f5SAnirudh Venkataramanan * and print error messages if module power level is not supported 993c77849f5SAnirudh Venkataramanan */ 994c77849f5SAnirudh Venkataramanan static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 995c77849f5SAnirudh Venkataramanan { 996c77849f5SAnirudh Venkataramanan /* if module power level is supported, clear the flag */ 997c77849f5SAnirudh Venkataramanan if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 998c77849f5SAnirudh Venkataramanan ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 999c77849f5SAnirudh Venkataramanan clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1000c77849f5SAnirudh Venkataramanan return; 1001c77849f5SAnirudh Venkataramanan } 1002c77849f5SAnirudh Venkataramanan 1003c77849f5SAnirudh Venkataramanan /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1004c77849f5SAnirudh Venkataramanan * above block didn't clear this bit, there's nothing to do 1005c77849f5SAnirudh Venkataramanan */ 1006c77849f5SAnirudh Venkataramanan if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1007c77849f5SAnirudh Venkataramanan return; 1008c77849f5SAnirudh Venkataramanan 1009c77849f5SAnirudh Venkataramanan if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1010c77849f5SAnirudh Venkataramanan dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1011c77849f5SAnirudh Venkataramanan set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1012c77849f5SAnirudh Venkataramanan } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1013c77849f5SAnirudh Venkataramanan dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1014c77849f5SAnirudh Venkataramanan set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1015c77849f5SAnirudh Venkataramanan } 1016c77849f5SAnirudh Venkataramanan } 1017c77849f5SAnirudh Venkataramanan 1018c77849f5SAnirudh Venkataramanan /** 101999d40752SBrett Creeley * ice_check_link_cfg_err - check if link configuration failed 102099d40752SBrett Creeley * @pf: pointer to the PF struct 102199d40752SBrett Creeley * @link_cfg_err: bitmap from the link info structure 102299d40752SBrett Creeley * 102399d40752SBrett Creeley * print if any link configuration failure happens due to the value in the 102499d40752SBrett Creeley * link_cfg_err parameter in the link info structure 102599d40752SBrett Creeley */ 102699d40752SBrett Creeley static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 102799d40752SBrett Creeley { 102899d40752SBrett Creeley ice_check_module_power(pf, link_cfg_err); 102999d40752SBrett Creeley ice_check_phy_fw_load(pf, link_cfg_err); 103099d40752SBrett Creeley } 103199d40752SBrett Creeley 103299d40752SBrett Creeley /** 10330b28b702SAnirudh Venkataramanan * ice_link_event - process the link event 10342f2da36eSAnirudh Venkataramanan * @pf: PF that the link event is associated with 10350b28b702SAnirudh Venkataramanan * @pi: port_info for the port that the link event is associated with 1036c2a23e00SBrett Creeley * @link_up: true if the physical link is up and false if it is down 1037c2a23e00SBrett Creeley * @link_speed: current link speed received from the link event 10380b28b702SAnirudh Venkataramanan * 1039c2a23e00SBrett Creeley * Returns 0 on success and negative on failure 10400b28b702SAnirudh Venkataramanan */ 10410b28b702SAnirudh Venkataramanan static int 1042c2a23e00SBrett Creeley ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1043c2a23e00SBrett Creeley u16 link_speed) 10440b28b702SAnirudh Venkataramanan { 10454015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 10460b28b702SAnirudh Venkataramanan struct ice_phy_info *phy_info; 1047c2a23e00SBrett Creeley struct ice_vsi *vsi; 1048c2a23e00SBrett Creeley u16 old_link_speed; 1049c2a23e00SBrett Creeley bool old_link; 10505518ac2aSTony Nguyen int status; 10510b28b702SAnirudh Venkataramanan 10520b28b702SAnirudh Venkataramanan phy_info = &pi->phy; 10530b28b702SAnirudh Venkataramanan phy_info->link_info_old = phy_info->link_info; 10540b28b702SAnirudh Venkataramanan 1055c2a23e00SBrett Creeley old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 10560b28b702SAnirudh Venkataramanan old_link_speed = phy_info->link_info_old.link_speed; 10570b28b702SAnirudh Venkataramanan 1058c2a23e00SBrett Creeley /* update the link info structures and re-enable link events, 1059c2a23e00SBrett Creeley * don't bail on failure due to other book keeping needed 1060c2a23e00SBrett Creeley */ 1061d348d517SAnirudh Venkataramanan status = ice_update_link_info(pi); 1062d348d517SAnirudh Venkataramanan if (status) 10635f87ec48STony Nguyen dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 10645f87ec48STony Nguyen pi->lport, status, 1065d348d517SAnirudh Venkataramanan ice_aq_str(pi->hw->adminq.sq_last_status)); 10660b28b702SAnirudh Venkataramanan 106799d40752SBrett Creeley ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1068c77849f5SAnirudh Venkataramanan 10690ce6c34aSDave Ertman /* Check if the link state is up after updating link info, and treat 10700ce6c34aSDave Ertman * this event as an UP event since the link is actually UP now. 10710ce6c34aSDave Ertman */ 10720ce6c34aSDave Ertman if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 10730ce6c34aSDave Ertman link_up = true; 10740ce6c34aSDave Ertman 1075208ff751SAnirudh Venkataramanan vsi = ice_get_main_vsi(pf); 10760b28b702SAnirudh Venkataramanan if (!vsi || !vsi->port_info) 1077c2a23e00SBrett Creeley return -EINVAL; 10780b28b702SAnirudh Venkataramanan 10796d599946STony Nguyen /* turn off PHY if media was removed */ 10806d599946STony Nguyen if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 10816d599946STony Nguyen !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 10826d599946STony Nguyen set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1083d348d517SAnirudh Venkataramanan ice_set_link(vsi, false); 10846d599946STony Nguyen } 10856d599946STony Nguyen 10861a3571b5SPaul Greenwalt /* if the old link up/down and speed is the same as the new */ 10871a3571b5SPaul Greenwalt if (link_up == old_link && link_speed == old_link_speed) 1088d348d517SAnirudh Venkataramanan return 0; 10891a3571b5SPaul Greenwalt 10903a749623SJacob Keller if (!ice_is_e810(&pf->hw)) 10913a749623SJacob Keller ice_ptp_link_change(pf, pf->hw.pf_id, link_up); 10923a749623SJacob Keller 10937d9c9b79SDave Ertman if (ice_is_dcb_active(pf)) { 10947d9c9b79SDave Ertman if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1095242b5e06SDave Ertman ice_dcb_rebuild(pf); 10967d9c9b79SDave Ertman } else { 10977d9c9b79SDave Ertman if (link_up) 10987d9c9b79SDave Ertman ice_set_dflt_mib(pf); 10997d9c9b79SDave Ertman } 1100c2a23e00SBrett Creeley ice_vsi_link_event(vsi, link_up); 1101c2a23e00SBrett Creeley ice_print_link_msg(vsi, link_up); 11020b28b702SAnirudh Venkataramanan 110353b8decbSAnirudh Venkataramanan ice_vc_notify_link_state(pf); 110453b8decbSAnirudh Venkataramanan 1105d348d517SAnirudh Venkataramanan return 0; 11060b28b702SAnirudh Venkataramanan } 11070b28b702SAnirudh Venkataramanan 11080b28b702SAnirudh Venkataramanan /** 11094f4be03bSAnirudh Venkataramanan * ice_watchdog_subtask - periodic tasks not using event driven scheduling 11104f4be03bSAnirudh Venkataramanan * @pf: board private structure 11110b28b702SAnirudh Venkataramanan */ 11124f4be03bSAnirudh Venkataramanan static void ice_watchdog_subtask(struct ice_pf *pf) 11130b28b702SAnirudh Venkataramanan { 11144f4be03bSAnirudh Venkataramanan int i; 11150b28b702SAnirudh Venkataramanan 11164f4be03bSAnirudh Venkataramanan /* if interface is down do nothing */ 11177e408e07SAnirudh Venkataramanan if (test_bit(ICE_DOWN, pf->state) || 11187e408e07SAnirudh Venkataramanan test_bit(ICE_CFG_BUSY, pf->state)) 11194f4be03bSAnirudh Venkataramanan return; 11200b28b702SAnirudh Venkataramanan 11214f4be03bSAnirudh Venkataramanan /* make sure we don't do these things too often */ 11224f4be03bSAnirudh Venkataramanan if (time_before(jiffies, 11234f4be03bSAnirudh Venkataramanan pf->serv_tmr_prev + pf->serv_tmr_period)) 11244f4be03bSAnirudh Venkataramanan return; 11250b28b702SAnirudh Venkataramanan 11264f4be03bSAnirudh Venkataramanan pf->serv_tmr_prev = jiffies; 11274f4be03bSAnirudh Venkataramanan 11284f4be03bSAnirudh Venkataramanan /* Update the stats for active netdevs so the network stack 11294f4be03bSAnirudh Venkataramanan * can look at updated numbers whenever it cares to 11304f4be03bSAnirudh Venkataramanan */ 11314f4be03bSAnirudh Venkataramanan ice_update_pf_stats(pf); 113280ed404aSBrett Creeley ice_for_each_vsi(pf, i) 11334f4be03bSAnirudh Venkataramanan if (pf->vsi[i] && pf->vsi[i]->netdev) 11344f4be03bSAnirudh Venkataramanan ice_update_vsi_stats(pf->vsi[i]); 11350b28b702SAnirudh Venkataramanan } 11360b28b702SAnirudh Venkataramanan 11370b28b702SAnirudh Venkataramanan /** 1138250c3b3eSBrett Creeley * ice_init_link_events - enable/initialize link events 1139250c3b3eSBrett Creeley * @pi: pointer to the port_info instance 1140250c3b3eSBrett Creeley * 1141250c3b3eSBrett Creeley * Returns -EIO on failure, 0 on success 1142250c3b3eSBrett Creeley */ 1143250c3b3eSBrett Creeley static int ice_init_link_events(struct ice_port_info *pi) 1144250c3b3eSBrett Creeley { 1145250c3b3eSBrett Creeley u16 mask; 1146250c3b3eSBrett Creeley 1147250c3b3eSBrett Creeley mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 114899d40752SBrett Creeley ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 114999d40752SBrett Creeley ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1150250c3b3eSBrett Creeley 1151250c3b3eSBrett Creeley if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 115219cce2c6SAnirudh Venkataramanan dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1153250c3b3eSBrett Creeley pi->lport); 1154250c3b3eSBrett Creeley return -EIO; 1155250c3b3eSBrett Creeley } 1156250c3b3eSBrett Creeley 1157250c3b3eSBrett Creeley if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 115819cce2c6SAnirudh Venkataramanan dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1159250c3b3eSBrett Creeley pi->lport); 1160250c3b3eSBrett Creeley return -EIO; 1161250c3b3eSBrett Creeley } 1162250c3b3eSBrett Creeley 1163250c3b3eSBrett Creeley return 0; 1164250c3b3eSBrett Creeley } 1165250c3b3eSBrett Creeley 1166250c3b3eSBrett Creeley /** 1167250c3b3eSBrett Creeley * ice_handle_link_event - handle link event via ARQ 11682f2da36eSAnirudh Venkataramanan * @pf: PF that the link event is associated with 1169c2a23e00SBrett Creeley * @event: event structure containing link status info 1170250c3b3eSBrett Creeley */ 1171c2a23e00SBrett Creeley static int 1172c2a23e00SBrett Creeley ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1173250c3b3eSBrett Creeley { 1174c2a23e00SBrett Creeley struct ice_aqc_get_link_status_data *link_data; 1175250c3b3eSBrett Creeley struct ice_port_info *port_info; 1176250c3b3eSBrett Creeley int status; 1177250c3b3eSBrett Creeley 1178c2a23e00SBrett Creeley link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1179250c3b3eSBrett Creeley port_info = pf->hw.port_info; 1180250c3b3eSBrett Creeley if (!port_info) 1181250c3b3eSBrett Creeley return -EINVAL; 1182250c3b3eSBrett Creeley 1183c2a23e00SBrett Creeley status = ice_link_event(pf, port_info, 1184c2a23e00SBrett Creeley !!(link_data->link_info & ICE_AQ_LINK_UP), 1185c2a23e00SBrett Creeley le16_to_cpu(link_data->link_speed)); 1186250c3b3eSBrett Creeley if (status) 118719cce2c6SAnirudh Venkataramanan dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 118819cce2c6SAnirudh Venkataramanan status); 1189250c3b3eSBrett Creeley 1190250c3b3eSBrett Creeley return status; 1191250c3b3eSBrett Creeley } 1192250c3b3eSBrett Creeley 1193d69ea414SJacob Keller enum ice_aq_task_state { 1194d69ea414SJacob Keller ICE_AQ_TASK_WAITING = 0, 1195d69ea414SJacob Keller ICE_AQ_TASK_COMPLETE, 1196d69ea414SJacob Keller ICE_AQ_TASK_CANCELED, 1197d69ea414SJacob Keller }; 1198d69ea414SJacob Keller 1199d69ea414SJacob Keller struct ice_aq_task { 1200d69ea414SJacob Keller struct hlist_node entry; 1201d69ea414SJacob Keller 1202d69ea414SJacob Keller u16 opcode; 1203d69ea414SJacob Keller struct ice_rq_event_info *event; 1204d69ea414SJacob Keller enum ice_aq_task_state state; 1205d69ea414SJacob Keller }; 1206d69ea414SJacob Keller 1207d69ea414SJacob Keller /** 1208ef860480STony Nguyen * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1209d69ea414SJacob Keller * @pf: pointer to the PF private structure 1210d69ea414SJacob Keller * @opcode: the opcode to wait for 1211d69ea414SJacob Keller * @timeout: how long to wait, in jiffies 1212d69ea414SJacob Keller * @event: storage for the event info 1213d69ea414SJacob Keller * 1214d69ea414SJacob Keller * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1215d69ea414SJacob Keller * current thread will be put to sleep until the specified event occurs or 1216d69ea414SJacob Keller * until the given timeout is reached. 1217d69ea414SJacob Keller * 1218d69ea414SJacob Keller * To obtain only the descriptor contents, pass an event without an allocated 1219d69ea414SJacob Keller * msg_buf. If the complete data buffer is desired, allocate the 1220d69ea414SJacob Keller * event->msg_buf with enough space ahead of time. 1221d69ea414SJacob Keller * 1222d69ea414SJacob Keller * Returns: zero on success, or a negative error code on failure. 1223d69ea414SJacob Keller */ 1224d69ea414SJacob Keller int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1225d69ea414SJacob Keller struct ice_rq_event_info *event) 1226d69ea414SJacob Keller { 12271e8249ccSJacob Keller struct device *dev = ice_pf_to_dev(pf); 1228d69ea414SJacob Keller struct ice_aq_task *task; 12291e8249ccSJacob Keller unsigned long start; 1230d69ea414SJacob Keller long ret; 1231d69ea414SJacob Keller int err; 1232d69ea414SJacob Keller 1233d69ea414SJacob Keller task = kzalloc(sizeof(*task), GFP_KERNEL); 1234d69ea414SJacob Keller if (!task) 1235d69ea414SJacob Keller return -ENOMEM; 1236d69ea414SJacob Keller 1237d69ea414SJacob Keller INIT_HLIST_NODE(&task->entry); 1238d69ea414SJacob Keller task->opcode = opcode; 1239d69ea414SJacob Keller task->event = event; 1240d69ea414SJacob Keller task->state = ICE_AQ_TASK_WAITING; 1241d69ea414SJacob Keller 1242d69ea414SJacob Keller spin_lock_bh(&pf->aq_wait_lock); 1243d69ea414SJacob Keller hlist_add_head(&task->entry, &pf->aq_wait_list); 1244d69ea414SJacob Keller spin_unlock_bh(&pf->aq_wait_lock); 1245d69ea414SJacob Keller 12461e8249ccSJacob Keller start = jiffies; 12471e8249ccSJacob Keller 1248d69ea414SJacob Keller ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1249d69ea414SJacob Keller timeout); 1250d69ea414SJacob Keller switch (task->state) { 1251d69ea414SJacob Keller case ICE_AQ_TASK_WAITING: 1252d69ea414SJacob Keller err = ret < 0 ? ret : -ETIMEDOUT; 1253d69ea414SJacob Keller break; 1254d69ea414SJacob Keller case ICE_AQ_TASK_CANCELED: 1255d69ea414SJacob Keller err = ret < 0 ? ret : -ECANCELED; 1256d69ea414SJacob Keller break; 1257d69ea414SJacob Keller case ICE_AQ_TASK_COMPLETE: 1258d69ea414SJacob Keller err = ret < 0 ? ret : 0; 1259d69ea414SJacob Keller break; 1260d69ea414SJacob Keller default: 1261d69ea414SJacob Keller WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1262d69ea414SJacob Keller err = -EINVAL; 1263d69ea414SJacob Keller break; 1264d69ea414SJacob Keller } 1265d69ea414SJacob Keller 12661e8249ccSJacob Keller dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 12671e8249ccSJacob Keller jiffies_to_msecs(jiffies - start), 12681e8249ccSJacob Keller jiffies_to_msecs(timeout), 12691e8249ccSJacob Keller opcode); 12701e8249ccSJacob Keller 1271d69ea414SJacob Keller spin_lock_bh(&pf->aq_wait_lock); 1272d69ea414SJacob Keller hlist_del(&task->entry); 1273d69ea414SJacob Keller spin_unlock_bh(&pf->aq_wait_lock); 1274d69ea414SJacob Keller kfree(task); 1275d69ea414SJacob Keller 1276d69ea414SJacob Keller return err; 1277d69ea414SJacob Keller } 1278d69ea414SJacob Keller 1279d69ea414SJacob Keller /** 1280d69ea414SJacob Keller * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1281d69ea414SJacob Keller * @pf: pointer to the PF private structure 1282d69ea414SJacob Keller * @opcode: the opcode of the event 1283d69ea414SJacob Keller * @event: the event to check 1284d69ea414SJacob Keller * 1285d69ea414SJacob Keller * Loops over the current list of pending threads waiting for an AdminQ event. 1286d69ea414SJacob Keller * For each matching task, copy the contents of the event into the task 1287d69ea414SJacob Keller * structure and wake up the thread. 1288d69ea414SJacob Keller * 1289d69ea414SJacob Keller * If multiple threads wait for the same opcode, they will all be woken up. 1290d69ea414SJacob Keller * 1291d69ea414SJacob Keller * Note that event->msg_buf will only be duplicated if the event has a buffer 1292d69ea414SJacob Keller * with enough space already allocated. Otherwise, only the descriptor and 1293d69ea414SJacob Keller * message length will be copied. 1294d69ea414SJacob Keller * 1295d69ea414SJacob Keller * Returns: true if an event was found, false otherwise 1296d69ea414SJacob Keller */ 1297d69ea414SJacob Keller static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1298d69ea414SJacob Keller struct ice_rq_event_info *event) 1299d69ea414SJacob Keller { 1300d69ea414SJacob Keller struct ice_aq_task *task; 1301d69ea414SJacob Keller bool found = false; 1302d69ea414SJacob Keller 1303d69ea414SJacob Keller spin_lock_bh(&pf->aq_wait_lock); 1304d69ea414SJacob Keller hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1305d69ea414SJacob Keller if (task->state || task->opcode != opcode) 1306d69ea414SJacob Keller continue; 1307d69ea414SJacob Keller 1308d69ea414SJacob Keller memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1309d69ea414SJacob Keller task->event->msg_len = event->msg_len; 1310d69ea414SJacob Keller 1311d69ea414SJacob Keller /* Only copy the data buffer if a destination was set */ 1312d69ea414SJacob Keller if (task->event->msg_buf && 1313d69ea414SJacob Keller task->event->buf_len > event->buf_len) { 1314d69ea414SJacob Keller memcpy(task->event->msg_buf, event->msg_buf, 1315d69ea414SJacob Keller event->buf_len); 1316d69ea414SJacob Keller task->event->buf_len = event->buf_len; 1317d69ea414SJacob Keller } 1318d69ea414SJacob Keller 1319d69ea414SJacob Keller task->state = ICE_AQ_TASK_COMPLETE; 1320d69ea414SJacob Keller found = true; 1321d69ea414SJacob Keller } 1322d69ea414SJacob Keller spin_unlock_bh(&pf->aq_wait_lock); 1323d69ea414SJacob Keller 1324d69ea414SJacob Keller if (found) 1325d69ea414SJacob Keller wake_up(&pf->aq_wait_queue); 1326d69ea414SJacob Keller } 1327d69ea414SJacob Keller 1328d69ea414SJacob Keller /** 1329d69ea414SJacob Keller * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1330d69ea414SJacob Keller * @pf: the PF private structure 1331d69ea414SJacob Keller * 1332d69ea414SJacob Keller * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1333d69ea414SJacob Keller * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1334d69ea414SJacob Keller */ 1335d69ea414SJacob Keller static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1336d69ea414SJacob Keller { 1337d69ea414SJacob Keller struct ice_aq_task *task; 1338d69ea414SJacob Keller 1339d69ea414SJacob Keller spin_lock_bh(&pf->aq_wait_lock); 1340d69ea414SJacob Keller hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1341d69ea414SJacob Keller task->state = ICE_AQ_TASK_CANCELED; 1342d69ea414SJacob Keller spin_unlock_bh(&pf->aq_wait_lock); 1343d69ea414SJacob Keller 1344d69ea414SJacob Keller wake_up(&pf->aq_wait_queue); 1345d69ea414SJacob Keller } 1346d69ea414SJacob Keller 1347250c3b3eSBrett Creeley /** 1348940b61afSAnirudh Venkataramanan * __ice_clean_ctrlq - helper function to clean controlq rings 1349940b61afSAnirudh Venkataramanan * @pf: ptr to struct ice_pf 1350940b61afSAnirudh Venkataramanan * @q_type: specific Control queue type 1351940b61afSAnirudh Venkataramanan */ 1352940b61afSAnirudh Venkataramanan static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1353940b61afSAnirudh Venkataramanan { 13544015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 1355940b61afSAnirudh Venkataramanan struct ice_rq_event_info event; 1356940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 1357940b61afSAnirudh Venkataramanan struct ice_ctl_q_info *cq; 1358940b61afSAnirudh Venkataramanan u16 pending, i = 0; 1359940b61afSAnirudh Venkataramanan const char *qtype; 1360940b61afSAnirudh Venkataramanan u32 oldval, val; 1361940b61afSAnirudh Venkataramanan 13620b28b702SAnirudh Venkataramanan /* Do not clean control queue if/when PF reset fails */ 13637e408e07SAnirudh Venkataramanan if (test_bit(ICE_RESET_FAILED, pf->state)) 13640b28b702SAnirudh Venkataramanan return 0; 13650b28b702SAnirudh Venkataramanan 1366940b61afSAnirudh Venkataramanan switch (q_type) { 1367940b61afSAnirudh Venkataramanan case ICE_CTL_Q_ADMIN: 1368940b61afSAnirudh Venkataramanan cq = &hw->adminq; 1369940b61afSAnirudh Venkataramanan qtype = "Admin"; 1370940b61afSAnirudh Venkataramanan break; 13718f5ee3c4SJacob Keller case ICE_CTL_Q_SB: 13728f5ee3c4SJacob Keller cq = &hw->sbq; 13738f5ee3c4SJacob Keller qtype = "Sideband"; 13748f5ee3c4SJacob Keller break; 137575d2b253SAnirudh Venkataramanan case ICE_CTL_Q_MAILBOX: 137675d2b253SAnirudh Venkataramanan cq = &hw->mailboxq; 137775d2b253SAnirudh Venkataramanan qtype = "Mailbox"; 13780891c896SVignesh Sridhar /* we are going to try to detect a malicious VF, so set the 13790891c896SVignesh Sridhar * state to begin detection 13800891c896SVignesh Sridhar */ 13810891c896SVignesh Sridhar hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 138275d2b253SAnirudh Venkataramanan break; 1383940b61afSAnirudh Venkataramanan default: 13844015d11eSBrett Creeley dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1385940b61afSAnirudh Venkataramanan return 0; 1386940b61afSAnirudh Venkataramanan } 1387940b61afSAnirudh Venkataramanan 1388940b61afSAnirudh Venkataramanan /* check for error indications - PF_xx_AxQLEN register layout for 1389940b61afSAnirudh Venkataramanan * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1390940b61afSAnirudh Venkataramanan */ 1391940b61afSAnirudh Venkataramanan val = rd32(hw, cq->rq.len); 1392940b61afSAnirudh Venkataramanan if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1393940b61afSAnirudh Venkataramanan PF_FW_ARQLEN_ARQCRIT_M)) { 1394940b61afSAnirudh Venkataramanan oldval = val; 1395940b61afSAnirudh Venkataramanan if (val & PF_FW_ARQLEN_ARQVFE_M) 13964015d11eSBrett Creeley dev_dbg(dev, "%s Receive Queue VF Error detected\n", 13974015d11eSBrett Creeley qtype); 1398940b61afSAnirudh Venkataramanan if (val & PF_FW_ARQLEN_ARQOVFL_M) { 139919cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1400940b61afSAnirudh Venkataramanan qtype); 1401940b61afSAnirudh Venkataramanan } 1402940b61afSAnirudh Venkataramanan if (val & PF_FW_ARQLEN_ARQCRIT_M) 140319cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1404940b61afSAnirudh Venkataramanan qtype); 1405940b61afSAnirudh Venkataramanan val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1406940b61afSAnirudh Venkataramanan PF_FW_ARQLEN_ARQCRIT_M); 1407940b61afSAnirudh Venkataramanan if (oldval != val) 1408940b61afSAnirudh Venkataramanan wr32(hw, cq->rq.len, val); 1409940b61afSAnirudh Venkataramanan } 1410940b61afSAnirudh Venkataramanan 1411940b61afSAnirudh Venkataramanan val = rd32(hw, cq->sq.len); 1412940b61afSAnirudh Venkataramanan if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1413940b61afSAnirudh Venkataramanan PF_FW_ATQLEN_ATQCRIT_M)) { 1414940b61afSAnirudh Venkataramanan oldval = val; 1415940b61afSAnirudh Venkataramanan if (val & PF_FW_ATQLEN_ATQVFE_M) 141619cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Send Queue VF Error detected\n", 141719cce2c6SAnirudh Venkataramanan qtype); 1418940b61afSAnirudh Venkataramanan if (val & PF_FW_ATQLEN_ATQOVFL_M) { 14194015d11eSBrett Creeley dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1420940b61afSAnirudh Venkataramanan qtype); 1421940b61afSAnirudh Venkataramanan } 1422940b61afSAnirudh Venkataramanan if (val & PF_FW_ATQLEN_ATQCRIT_M) 14234015d11eSBrett Creeley dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1424940b61afSAnirudh Venkataramanan qtype); 1425940b61afSAnirudh Venkataramanan val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1426940b61afSAnirudh Venkataramanan PF_FW_ATQLEN_ATQCRIT_M); 1427940b61afSAnirudh Venkataramanan if (oldval != val) 1428940b61afSAnirudh Venkataramanan wr32(hw, cq->sq.len, val); 1429940b61afSAnirudh Venkataramanan } 1430940b61afSAnirudh Venkataramanan 1431940b61afSAnirudh Venkataramanan event.buf_len = cq->rq_buf_size; 14329efe35d0STony Nguyen event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1433940b61afSAnirudh Venkataramanan if (!event.msg_buf) 1434940b61afSAnirudh Venkataramanan return 0; 1435940b61afSAnirudh Venkataramanan 1436940b61afSAnirudh Venkataramanan do { 14370b28b702SAnirudh Venkataramanan u16 opcode; 14385518ac2aSTony Nguyen int ret; 1439940b61afSAnirudh Venkataramanan 1440940b61afSAnirudh Venkataramanan ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1441d54699e2STony Nguyen if (ret == -EALREADY) 1442940b61afSAnirudh Venkataramanan break; 1443940b61afSAnirudh Venkataramanan if (ret) { 14445f87ec48STony Nguyen dev_err(dev, "%s Receive Queue event error %d\n", qtype, 14455f87ec48STony Nguyen ret); 1446940b61afSAnirudh Venkataramanan break; 1447940b61afSAnirudh Venkataramanan } 14480b28b702SAnirudh Venkataramanan 14490b28b702SAnirudh Venkataramanan opcode = le16_to_cpu(event.desc.opcode); 14500b28b702SAnirudh Venkataramanan 1451d69ea414SJacob Keller /* Notify any thread that might be waiting for this event */ 1452d69ea414SJacob Keller ice_aq_check_events(pf, opcode, &event); 1453d69ea414SJacob Keller 14540b28b702SAnirudh Venkataramanan switch (opcode) { 1455250c3b3eSBrett Creeley case ice_aqc_opc_get_link_status: 1456c2a23e00SBrett Creeley if (ice_handle_link_event(pf, &event)) 14574015d11eSBrett Creeley dev_err(dev, "Could not handle link event\n"); 1458250c3b3eSBrett Creeley break; 14592309ae38SBrett Creeley case ice_aqc_opc_event_lan_overflow: 14602309ae38SBrett Creeley ice_vf_lan_overflow_event(pf, &event); 14612309ae38SBrett Creeley break; 14621071a835SAnirudh Venkataramanan case ice_mbx_opc_send_msg_to_pf: 14630891c896SVignesh Sridhar if (!ice_is_malicious_vf(pf, &event, i, pending)) 14641071a835SAnirudh Venkataramanan ice_vc_process_vf_msg(pf, &event); 14651071a835SAnirudh Venkataramanan break; 14668b97ceb1SHieu Tran case ice_aqc_opc_fw_logging: 14678b97ceb1SHieu Tran ice_output_fw_log(hw, &event.desc, event.msg_buf); 14688b97ceb1SHieu Tran break; 146900cc3f1bSAnirudh Venkataramanan case ice_aqc_opc_lldp_set_mib_change: 147000cc3f1bSAnirudh Venkataramanan ice_dcb_process_lldp_set_mib_change(pf, &event); 147100cc3f1bSAnirudh Venkataramanan break; 14720b28b702SAnirudh Venkataramanan default: 147319cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 14740b28b702SAnirudh Venkataramanan qtype, opcode); 14750b28b702SAnirudh Venkataramanan break; 14760b28b702SAnirudh Venkataramanan } 1477940b61afSAnirudh Venkataramanan } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1478940b61afSAnirudh Venkataramanan 14799efe35d0STony Nguyen kfree(event.msg_buf); 1480940b61afSAnirudh Venkataramanan 1481940b61afSAnirudh Venkataramanan return pending && (i == ICE_DFLT_IRQ_WORK); 1482940b61afSAnirudh Venkataramanan } 1483940b61afSAnirudh Venkataramanan 1484940b61afSAnirudh Venkataramanan /** 14853d6b640eSAnirudh Venkataramanan * ice_ctrlq_pending - check if there is a difference between ntc and ntu 14863d6b640eSAnirudh Venkataramanan * @hw: pointer to hardware info 14873d6b640eSAnirudh Venkataramanan * @cq: control queue information 14883d6b640eSAnirudh Venkataramanan * 14893d6b640eSAnirudh Venkataramanan * returns true if there are pending messages in a queue, false if there aren't 14903d6b640eSAnirudh Venkataramanan */ 14913d6b640eSAnirudh Venkataramanan static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 14923d6b640eSAnirudh Venkataramanan { 14933d6b640eSAnirudh Venkataramanan u16 ntu; 14943d6b640eSAnirudh Venkataramanan 14953d6b640eSAnirudh Venkataramanan ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 14963d6b640eSAnirudh Venkataramanan return cq->rq.next_to_clean != ntu; 14973d6b640eSAnirudh Venkataramanan } 14983d6b640eSAnirudh Venkataramanan 14993d6b640eSAnirudh Venkataramanan /** 1500940b61afSAnirudh Venkataramanan * ice_clean_adminq_subtask - clean the AdminQ rings 1501940b61afSAnirudh Venkataramanan * @pf: board private structure 1502940b61afSAnirudh Venkataramanan */ 1503940b61afSAnirudh Venkataramanan static void ice_clean_adminq_subtask(struct ice_pf *pf) 1504940b61afSAnirudh Venkataramanan { 1505940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 1506940b61afSAnirudh Venkataramanan 15077e408e07SAnirudh Venkataramanan if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1508940b61afSAnirudh Venkataramanan return; 1509940b61afSAnirudh Venkataramanan 1510940b61afSAnirudh Venkataramanan if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1511940b61afSAnirudh Venkataramanan return; 1512940b61afSAnirudh Venkataramanan 15137e408e07SAnirudh Venkataramanan clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1514940b61afSAnirudh Venkataramanan 15153d6b640eSAnirudh Venkataramanan /* There might be a situation where new messages arrive to a control 15163d6b640eSAnirudh Venkataramanan * queue between processing the last message and clearing the 15173d6b640eSAnirudh Venkataramanan * EVENT_PENDING bit. So before exiting, check queue head again (using 15183d6b640eSAnirudh Venkataramanan * ice_ctrlq_pending) and process new messages if any. 15193d6b640eSAnirudh Venkataramanan */ 15203d6b640eSAnirudh Venkataramanan if (ice_ctrlq_pending(hw, &hw->adminq)) 15213d6b640eSAnirudh Venkataramanan __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1522940b61afSAnirudh Venkataramanan 1523940b61afSAnirudh Venkataramanan ice_flush(hw); 1524940b61afSAnirudh Venkataramanan } 1525940b61afSAnirudh Venkataramanan 1526940b61afSAnirudh Venkataramanan /** 152775d2b253SAnirudh Venkataramanan * ice_clean_mailboxq_subtask - clean the MailboxQ rings 152875d2b253SAnirudh Venkataramanan * @pf: board private structure 152975d2b253SAnirudh Venkataramanan */ 153075d2b253SAnirudh Venkataramanan static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 153175d2b253SAnirudh Venkataramanan { 153275d2b253SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 153375d2b253SAnirudh Venkataramanan 15347e408e07SAnirudh Venkataramanan if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 153575d2b253SAnirudh Venkataramanan return; 153675d2b253SAnirudh Venkataramanan 153775d2b253SAnirudh Venkataramanan if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 153875d2b253SAnirudh Venkataramanan return; 153975d2b253SAnirudh Venkataramanan 15407e408e07SAnirudh Venkataramanan clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 154175d2b253SAnirudh Venkataramanan 154275d2b253SAnirudh Venkataramanan if (ice_ctrlq_pending(hw, &hw->mailboxq)) 154375d2b253SAnirudh Venkataramanan __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 154475d2b253SAnirudh Venkataramanan 154575d2b253SAnirudh Venkataramanan ice_flush(hw); 154675d2b253SAnirudh Venkataramanan } 154775d2b253SAnirudh Venkataramanan 154875d2b253SAnirudh Venkataramanan /** 15498f5ee3c4SJacob Keller * ice_clean_sbq_subtask - clean the Sideband Queue rings 15508f5ee3c4SJacob Keller * @pf: board private structure 15518f5ee3c4SJacob Keller */ 15528f5ee3c4SJacob Keller static void ice_clean_sbq_subtask(struct ice_pf *pf) 15538f5ee3c4SJacob Keller { 15548f5ee3c4SJacob Keller struct ice_hw *hw = &pf->hw; 15558f5ee3c4SJacob Keller 15568f5ee3c4SJacob Keller /* Nothing to do here if sideband queue is not supported */ 15578f5ee3c4SJacob Keller if (!ice_is_sbq_supported(hw)) { 15588f5ee3c4SJacob Keller clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 15598f5ee3c4SJacob Keller return; 15608f5ee3c4SJacob Keller } 15618f5ee3c4SJacob Keller 15628f5ee3c4SJacob Keller if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 15638f5ee3c4SJacob Keller return; 15648f5ee3c4SJacob Keller 15658f5ee3c4SJacob Keller if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 15668f5ee3c4SJacob Keller return; 15678f5ee3c4SJacob Keller 15688f5ee3c4SJacob Keller clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 15698f5ee3c4SJacob Keller 15708f5ee3c4SJacob Keller if (ice_ctrlq_pending(hw, &hw->sbq)) 15718f5ee3c4SJacob Keller __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 15728f5ee3c4SJacob Keller 15738f5ee3c4SJacob Keller ice_flush(hw); 15748f5ee3c4SJacob Keller } 15758f5ee3c4SJacob Keller 15768f5ee3c4SJacob Keller /** 1577940b61afSAnirudh Venkataramanan * ice_service_task_schedule - schedule the service task to wake up 1578940b61afSAnirudh Venkataramanan * @pf: board private structure 1579940b61afSAnirudh Venkataramanan * 1580940b61afSAnirudh Venkataramanan * If not already scheduled, this puts the task into the work queue. 1581940b61afSAnirudh Venkataramanan */ 158228bf2672SBrett Creeley void ice_service_task_schedule(struct ice_pf *pf) 1583940b61afSAnirudh Venkataramanan { 15847e408e07SAnirudh Venkataramanan if (!test_bit(ICE_SERVICE_DIS, pf->state) && 15857e408e07SAnirudh Venkataramanan !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 15867e408e07SAnirudh Venkataramanan !test_bit(ICE_NEEDS_RESTART, pf->state)) 1587940b61afSAnirudh Venkataramanan queue_work(ice_wq, &pf->serv_task); 1588940b61afSAnirudh Venkataramanan } 1589940b61afSAnirudh Venkataramanan 1590940b61afSAnirudh Venkataramanan /** 1591940b61afSAnirudh Venkataramanan * ice_service_task_complete - finish up the service task 1592940b61afSAnirudh Venkataramanan * @pf: board private structure 1593940b61afSAnirudh Venkataramanan */ 1594940b61afSAnirudh Venkataramanan static void ice_service_task_complete(struct ice_pf *pf) 1595940b61afSAnirudh Venkataramanan { 15967e408e07SAnirudh Venkataramanan WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1597940b61afSAnirudh Venkataramanan 1598940b61afSAnirudh Venkataramanan /* force memory (pf->state) to sync before next service task */ 1599940b61afSAnirudh Venkataramanan smp_mb__before_atomic(); 16007e408e07SAnirudh Venkataramanan clear_bit(ICE_SERVICE_SCHED, pf->state); 1601940b61afSAnirudh Venkataramanan } 1602940b61afSAnirudh Venkataramanan 1603940b61afSAnirudh Venkataramanan /** 16048d81fa55SAkeem G Abodunrin * ice_service_task_stop - stop service task and cancel works 16058d81fa55SAkeem G Abodunrin * @pf: board private structure 1606769c500dSAkeem G Abodunrin * 16077e408e07SAnirudh Venkataramanan * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1608769c500dSAkeem G Abodunrin * 1 otherwise. 16098d81fa55SAkeem G Abodunrin */ 1610769c500dSAkeem G Abodunrin static int ice_service_task_stop(struct ice_pf *pf) 16118d81fa55SAkeem G Abodunrin { 1612769c500dSAkeem G Abodunrin int ret; 1613769c500dSAkeem G Abodunrin 16147e408e07SAnirudh Venkataramanan ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 16158d81fa55SAkeem G Abodunrin 16168d81fa55SAkeem G Abodunrin if (pf->serv_tmr.function) 16178d81fa55SAkeem G Abodunrin del_timer_sync(&pf->serv_tmr); 16188d81fa55SAkeem G Abodunrin if (pf->serv_task.func) 16198d81fa55SAkeem G Abodunrin cancel_work_sync(&pf->serv_task); 16208d81fa55SAkeem G Abodunrin 16217e408e07SAnirudh Venkataramanan clear_bit(ICE_SERVICE_SCHED, pf->state); 1622769c500dSAkeem G Abodunrin return ret; 16238d81fa55SAkeem G Abodunrin } 16248d81fa55SAkeem G Abodunrin 16258d81fa55SAkeem G Abodunrin /** 16265995b6d0SBrett Creeley * ice_service_task_restart - restart service task and schedule works 16275995b6d0SBrett Creeley * @pf: board private structure 16285995b6d0SBrett Creeley * 16295995b6d0SBrett Creeley * This function is needed for suspend and resume works (e.g WoL scenario) 16305995b6d0SBrett Creeley */ 16315995b6d0SBrett Creeley static void ice_service_task_restart(struct ice_pf *pf) 16325995b6d0SBrett Creeley { 16337e408e07SAnirudh Venkataramanan clear_bit(ICE_SERVICE_DIS, pf->state); 16345995b6d0SBrett Creeley ice_service_task_schedule(pf); 16355995b6d0SBrett Creeley } 16365995b6d0SBrett Creeley 16375995b6d0SBrett Creeley /** 1638940b61afSAnirudh Venkataramanan * ice_service_timer - timer callback to schedule service task 1639940b61afSAnirudh Venkataramanan * @t: pointer to timer_list 1640940b61afSAnirudh Venkataramanan */ 1641940b61afSAnirudh Venkataramanan static void ice_service_timer(struct timer_list *t) 1642940b61afSAnirudh Venkataramanan { 1643940b61afSAnirudh Venkataramanan struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1644940b61afSAnirudh Venkataramanan 1645940b61afSAnirudh Venkataramanan mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1646940b61afSAnirudh Venkataramanan ice_service_task_schedule(pf); 1647940b61afSAnirudh Venkataramanan } 1648940b61afSAnirudh Venkataramanan 1649940b61afSAnirudh Venkataramanan /** 1650b3969fd7SSudheer Mogilappagari * ice_handle_mdd_event - handle malicious driver detect event 1651b3969fd7SSudheer Mogilappagari * @pf: pointer to the PF structure 1652b3969fd7SSudheer Mogilappagari * 16539d5c5a52SPaul Greenwalt * Called from service task. OICR interrupt handler indicates MDD event. 16549d5c5a52SPaul Greenwalt * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 16559d5c5a52SPaul Greenwalt * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 16569d5c5a52SPaul Greenwalt * disable the queue, the PF can be configured to reset the VF using ethtool 16579d5c5a52SPaul Greenwalt * private flag mdd-auto-reset-vf. 1658b3969fd7SSudheer Mogilappagari */ 1659b3969fd7SSudheer Mogilappagari static void ice_handle_mdd_event(struct ice_pf *pf) 1660b3969fd7SSudheer Mogilappagari { 16614015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 1662b3969fd7SSudheer Mogilappagari struct ice_hw *hw = &pf->hw; 1663c1e08830SJesse Brandeburg unsigned int i; 1664b3969fd7SSudheer Mogilappagari u32 reg; 1665b3969fd7SSudheer Mogilappagari 16667e408e07SAnirudh Venkataramanan if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 16679d5c5a52SPaul Greenwalt /* Since the VF MDD event logging is rate limited, check if 16689d5c5a52SPaul Greenwalt * there are pending MDD events. 16699d5c5a52SPaul Greenwalt */ 16709d5c5a52SPaul Greenwalt ice_print_vfs_mdd_events(pf); 1671b3969fd7SSudheer Mogilappagari return; 16729d5c5a52SPaul Greenwalt } 1673b3969fd7SSudheer Mogilappagari 16749d5c5a52SPaul Greenwalt /* find what triggered an MDD event */ 1675b3969fd7SSudheer Mogilappagari reg = rd32(hw, GL_MDET_TX_PQM); 1676b3969fd7SSudheer Mogilappagari if (reg & GL_MDET_TX_PQM_VALID_M) { 1677b3969fd7SSudheer Mogilappagari u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1678b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_PF_NUM_S; 1679b3969fd7SSudheer Mogilappagari u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1680b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_VF_NUM_S; 1681b3969fd7SSudheer Mogilappagari u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1682b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_MAL_TYPE_S; 1683b3969fd7SSudheer Mogilappagari u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1684b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_QNUM_S); 1685b3969fd7SSudheer Mogilappagari 1686b3969fd7SSudheer Mogilappagari if (netif_msg_tx_err(pf)) 16874015d11eSBrett Creeley dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1688b3969fd7SSudheer Mogilappagari event, queue, pf_num, vf_num); 1689b3969fd7SSudheer Mogilappagari wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1690b3969fd7SSudheer Mogilappagari } 1691b3969fd7SSudheer Mogilappagari 1692b3969fd7SSudheer Mogilappagari reg = rd32(hw, GL_MDET_TX_TCLAN); 1693b3969fd7SSudheer Mogilappagari if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1694b3969fd7SSudheer Mogilappagari u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1695b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_PF_NUM_S; 1696b3969fd7SSudheer Mogilappagari u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1697b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_VF_NUM_S; 1698b3969fd7SSudheer Mogilappagari u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1699b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_MAL_TYPE_S; 1700b3969fd7SSudheer Mogilappagari u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1701b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_QNUM_S); 1702b3969fd7SSudheer Mogilappagari 17031d8bd992SBen Shelton if (netif_msg_tx_err(pf)) 17044015d11eSBrett Creeley dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1705b3969fd7SSudheer Mogilappagari event, queue, pf_num, vf_num); 1706b3969fd7SSudheer Mogilappagari wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1707b3969fd7SSudheer Mogilappagari } 1708b3969fd7SSudheer Mogilappagari 1709b3969fd7SSudheer Mogilappagari reg = rd32(hw, GL_MDET_RX); 1710b3969fd7SSudheer Mogilappagari if (reg & GL_MDET_RX_VALID_M) { 1711b3969fd7SSudheer Mogilappagari u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1712b3969fd7SSudheer Mogilappagari GL_MDET_RX_PF_NUM_S; 1713b3969fd7SSudheer Mogilappagari u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1714b3969fd7SSudheer Mogilappagari GL_MDET_RX_VF_NUM_S; 1715b3969fd7SSudheer Mogilappagari u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1716b3969fd7SSudheer Mogilappagari GL_MDET_RX_MAL_TYPE_S; 1717b3969fd7SSudheer Mogilappagari u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1718b3969fd7SSudheer Mogilappagari GL_MDET_RX_QNUM_S); 1719b3969fd7SSudheer Mogilappagari 1720b3969fd7SSudheer Mogilappagari if (netif_msg_rx_err(pf)) 17214015d11eSBrett Creeley dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1722b3969fd7SSudheer Mogilappagari event, queue, pf_num, vf_num); 1723b3969fd7SSudheer Mogilappagari wr32(hw, GL_MDET_RX, 0xffffffff); 1724b3969fd7SSudheer Mogilappagari } 1725b3969fd7SSudheer Mogilappagari 17269d5c5a52SPaul Greenwalt /* check to see if this PF caused an MDD event */ 1727b3969fd7SSudheer Mogilappagari reg = rd32(hw, PF_MDET_TX_PQM); 1728b3969fd7SSudheer Mogilappagari if (reg & PF_MDET_TX_PQM_VALID_M) { 1729b3969fd7SSudheer Mogilappagari wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 17309d5c5a52SPaul Greenwalt if (netif_msg_tx_err(pf)) 17319d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1732b3969fd7SSudheer Mogilappagari } 1733b3969fd7SSudheer Mogilappagari 1734b3969fd7SSudheer Mogilappagari reg = rd32(hw, PF_MDET_TX_TCLAN); 1735b3969fd7SSudheer Mogilappagari if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1736b3969fd7SSudheer Mogilappagari wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 17379d5c5a52SPaul Greenwalt if (netif_msg_tx_err(pf)) 17389d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1739b3969fd7SSudheer Mogilappagari } 1740b3969fd7SSudheer Mogilappagari 1741b3969fd7SSudheer Mogilappagari reg = rd32(hw, PF_MDET_RX); 1742b3969fd7SSudheer Mogilappagari if (reg & PF_MDET_RX_VALID_M) { 1743b3969fd7SSudheer Mogilappagari wr32(hw, PF_MDET_RX, 0xFFFF); 17449d5c5a52SPaul Greenwalt if (netif_msg_rx_err(pf)) 17459d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1746b3969fd7SSudheer Mogilappagari } 1747b3969fd7SSudheer Mogilappagari 17489d5c5a52SPaul Greenwalt /* Check to see if one of the VFs caused an MDD event, and then 17499d5c5a52SPaul Greenwalt * increment counters and set print pending 17509d5c5a52SPaul Greenwalt */ 1751005881bcSBrett Creeley ice_for_each_vf(pf, i) { 17527c4bc1f5SAnirudh Venkataramanan struct ice_vf *vf = &pf->vf[i]; 17537c4bc1f5SAnirudh Venkataramanan 17547c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_TX_PQM(i)); 17557c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_TX_PQM_VALID_M) { 17567c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 17579d5c5a52SPaul Greenwalt vf->mdd_tx_events.count++; 17587e408e07SAnirudh Venkataramanan set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 17599d5c5a52SPaul Greenwalt if (netif_msg_tx_err(pf)) 17609d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 17617c4bc1f5SAnirudh Venkataramanan i); 17627c4bc1f5SAnirudh Venkataramanan } 17637c4bc1f5SAnirudh Venkataramanan 17647c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 17657c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_TX_TCLAN_VALID_M) { 17667c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 17679d5c5a52SPaul Greenwalt vf->mdd_tx_events.count++; 17687e408e07SAnirudh Venkataramanan set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 17699d5c5a52SPaul Greenwalt if (netif_msg_tx_err(pf)) 17709d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 17717c4bc1f5SAnirudh Venkataramanan i); 17727c4bc1f5SAnirudh Venkataramanan } 17737c4bc1f5SAnirudh Venkataramanan 17747c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_TX_TDPU(i)); 17757c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_TX_TDPU_VALID_M) { 17767c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 17779d5c5a52SPaul Greenwalt vf->mdd_tx_events.count++; 17787e408e07SAnirudh Venkataramanan set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 17799d5c5a52SPaul Greenwalt if (netif_msg_tx_err(pf)) 17809d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 17817c4bc1f5SAnirudh Venkataramanan i); 17827c4bc1f5SAnirudh Venkataramanan } 17837c4bc1f5SAnirudh Venkataramanan 17847c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_RX(i)); 17857c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_RX_VALID_M) { 17867c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_RX(i), 0xFFFF); 17879d5c5a52SPaul Greenwalt vf->mdd_rx_events.count++; 17887e408e07SAnirudh Venkataramanan set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 17899d5c5a52SPaul Greenwalt if (netif_msg_rx_err(pf)) 17909d5c5a52SPaul Greenwalt dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 17917c4bc1f5SAnirudh Venkataramanan i); 17929d5c5a52SPaul Greenwalt 17939d5c5a52SPaul Greenwalt /* Since the queue is disabled on VF Rx MDD events, the 17949d5c5a52SPaul Greenwalt * PF can be configured to reset the VF through ethtool 17959d5c5a52SPaul Greenwalt * private flag mdd-auto-reset-vf. 17969d5c5a52SPaul Greenwalt */ 17977438a3b0SPaul Greenwalt if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 17987438a3b0SPaul Greenwalt /* VF MDD event counters will be cleared by 17997438a3b0SPaul Greenwalt * reset, so print the event prior to reset. 18007438a3b0SPaul Greenwalt */ 18017438a3b0SPaul Greenwalt ice_print_vf_rx_mdd_event(vf); 18029d5c5a52SPaul Greenwalt ice_reset_vf(&pf->vf[i], false); 18039d5c5a52SPaul Greenwalt } 18047c4bc1f5SAnirudh Venkataramanan } 18057438a3b0SPaul Greenwalt } 18067c4bc1f5SAnirudh Venkataramanan 18079d5c5a52SPaul Greenwalt ice_print_vfs_mdd_events(pf); 1808b3969fd7SSudheer Mogilappagari } 1809b3969fd7SSudheer Mogilappagari 1810b3969fd7SSudheer Mogilappagari /** 18116d599946STony Nguyen * ice_force_phys_link_state - Force the physical link state 18126d599946STony Nguyen * @vsi: VSI to force the physical link state to up/down 18136d599946STony Nguyen * @link_up: true/false indicates to set the physical link to up/down 18146d599946STony Nguyen * 18156d599946STony Nguyen * Force the physical link state by getting the current PHY capabilities from 18166d599946STony Nguyen * hardware and setting the PHY config based on the determined capabilities. If 18176d599946STony Nguyen * link changes a link event will be triggered because both the Enable Automatic 18186d599946STony Nguyen * Link Update and LESM Enable bits are set when setting the PHY capabilities. 18196d599946STony Nguyen * 18206d599946STony Nguyen * Returns 0 on success, negative on failure 18216d599946STony Nguyen */ 18226d599946STony Nguyen static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 18236d599946STony Nguyen { 18246d599946STony Nguyen struct ice_aqc_get_phy_caps_data *pcaps; 18256d599946STony Nguyen struct ice_aqc_set_phy_cfg_data *cfg; 18266d599946STony Nguyen struct ice_port_info *pi; 18276d599946STony Nguyen struct device *dev; 18286d599946STony Nguyen int retcode; 18296d599946STony Nguyen 18306d599946STony Nguyen if (!vsi || !vsi->port_info || !vsi->back) 18316d599946STony Nguyen return -EINVAL; 18326d599946STony Nguyen if (vsi->type != ICE_VSI_PF) 18336d599946STony Nguyen return 0; 18346d599946STony Nguyen 18359a946843SAnirudh Venkataramanan dev = ice_pf_to_dev(vsi->back); 18366d599946STony Nguyen 18376d599946STony Nguyen pi = vsi->port_info; 18386d599946STony Nguyen 18399efe35d0STony Nguyen pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 18406d599946STony Nguyen if (!pcaps) 18416d599946STony Nguyen return -ENOMEM; 18426d599946STony Nguyen 1843d6730a87SAnirudh Venkataramanan retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 18446d599946STony Nguyen NULL); 18456d599946STony Nguyen if (retcode) { 184619cce2c6SAnirudh Venkataramanan dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 18476d599946STony Nguyen vsi->vsi_num, retcode); 18486d599946STony Nguyen retcode = -EIO; 18496d599946STony Nguyen goto out; 18506d599946STony Nguyen } 18516d599946STony Nguyen 18526d599946STony Nguyen /* No change in link */ 18536d599946STony Nguyen if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 18546d599946STony Nguyen link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 18556d599946STony Nguyen goto out; 18566d599946STony Nguyen 18571a3571b5SPaul Greenwalt /* Use the current user PHY configuration. The current user PHY 18581a3571b5SPaul Greenwalt * configuration is initialized during probe from PHY capabilities 18591a3571b5SPaul Greenwalt * software mode, and updated on set PHY configuration. 18601a3571b5SPaul Greenwalt */ 18611a3571b5SPaul Greenwalt cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 18626d599946STony Nguyen if (!cfg) { 18636d599946STony Nguyen retcode = -ENOMEM; 18646d599946STony Nguyen goto out; 18656d599946STony Nguyen } 18666d599946STony Nguyen 18671a3571b5SPaul Greenwalt cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 18686d599946STony Nguyen if (link_up) 18696d599946STony Nguyen cfg->caps |= ICE_AQ_PHY_ENA_LINK; 18706d599946STony Nguyen else 18716d599946STony Nguyen cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 18726d599946STony Nguyen 18731a3571b5SPaul Greenwalt retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 18746d599946STony Nguyen if (retcode) { 18756d599946STony Nguyen dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 18766d599946STony Nguyen vsi->vsi_num, retcode); 18776d599946STony Nguyen retcode = -EIO; 18786d599946STony Nguyen } 18796d599946STony Nguyen 18809efe35d0STony Nguyen kfree(cfg); 18816d599946STony Nguyen out: 18829efe35d0STony Nguyen kfree(pcaps); 18836d599946STony Nguyen return retcode; 18846d599946STony Nguyen } 18856d599946STony Nguyen 18866d599946STony Nguyen /** 18871a3571b5SPaul Greenwalt * ice_init_nvm_phy_type - Initialize the NVM PHY type 18881a3571b5SPaul Greenwalt * @pi: port info structure 18891a3571b5SPaul Greenwalt * 1890ea78ce4dSPaul Greenwalt * Initialize nvm_phy_type_[low|high] for link lenient mode support 18911a3571b5SPaul Greenwalt */ 18921a3571b5SPaul Greenwalt static int ice_init_nvm_phy_type(struct ice_port_info *pi) 18931a3571b5SPaul Greenwalt { 18941a3571b5SPaul Greenwalt struct ice_aqc_get_phy_caps_data *pcaps; 18951a3571b5SPaul Greenwalt struct ice_pf *pf = pi->hw->back; 18962ccc1c1cSTony Nguyen int err; 18971a3571b5SPaul Greenwalt 18981a3571b5SPaul Greenwalt pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 18991a3571b5SPaul Greenwalt if (!pcaps) 19001a3571b5SPaul Greenwalt return -ENOMEM; 19011a3571b5SPaul Greenwalt 19022ccc1c1cSTony Nguyen err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 19032ccc1c1cSTony Nguyen pcaps, NULL); 19041a3571b5SPaul Greenwalt 19052ccc1c1cSTony Nguyen if (err) { 19061a3571b5SPaul Greenwalt dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 19071a3571b5SPaul Greenwalt goto out; 19081a3571b5SPaul Greenwalt } 19091a3571b5SPaul Greenwalt 19101a3571b5SPaul Greenwalt pf->nvm_phy_type_hi = pcaps->phy_type_high; 19111a3571b5SPaul Greenwalt pf->nvm_phy_type_lo = pcaps->phy_type_low; 19121a3571b5SPaul Greenwalt 19131a3571b5SPaul Greenwalt out: 19141a3571b5SPaul Greenwalt kfree(pcaps); 19151a3571b5SPaul Greenwalt return err; 19161a3571b5SPaul Greenwalt } 19171a3571b5SPaul Greenwalt 19181a3571b5SPaul Greenwalt /** 1919ea78ce4dSPaul Greenwalt * ice_init_link_dflt_override - Initialize link default override 1920ea78ce4dSPaul Greenwalt * @pi: port info structure 1921b4e813ddSBruce Allan * 1922b4e813ddSBruce Allan * Initialize link default override and PHY total port shutdown during probe 1923ea78ce4dSPaul Greenwalt */ 1924ea78ce4dSPaul Greenwalt static void ice_init_link_dflt_override(struct ice_port_info *pi) 1925ea78ce4dSPaul Greenwalt { 1926ea78ce4dSPaul Greenwalt struct ice_link_default_override_tlv *ldo; 1927ea78ce4dSPaul Greenwalt struct ice_pf *pf = pi->hw->back; 1928ea78ce4dSPaul Greenwalt 1929ea78ce4dSPaul Greenwalt ldo = &pf->link_dflt_override; 1930b4e813ddSBruce Allan if (ice_get_link_default_override(ldo, pi)) 1931b4e813ddSBruce Allan return; 1932b4e813ddSBruce Allan 1933b4e813ddSBruce Allan if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1934b4e813ddSBruce Allan return; 1935b4e813ddSBruce Allan 1936b4e813ddSBruce Allan /* Enable Total Port Shutdown (override/replace link-down-on-close 1937b4e813ddSBruce Allan * ethtool private flag) for ports with Port Disable bit set. 1938b4e813ddSBruce Allan */ 1939b4e813ddSBruce Allan set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1940b4e813ddSBruce Allan set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1941ea78ce4dSPaul Greenwalt } 1942ea78ce4dSPaul Greenwalt 1943ea78ce4dSPaul Greenwalt /** 1944ea78ce4dSPaul Greenwalt * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1945ea78ce4dSPaul Greenwalt * @pi: port info structure 1946ea78ce4dSPaul Greenwalt * 19470a02944fSAnirudh Venkataramanan * If default override is enabled, initialize the user PHY cfg speed and FEC 1948ea78ce4dSPaul Greenwalt * settings using the default override mask from the NVM. 1949ea78ce4dSPaul Greenwalt * 1950ea78ce4dSPaul Greenwalt * The PHY should only be configured with the default override settings the 19517e408e07SAnirudh Venkataramanan * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1952ea78ce4dSPaul Greenwalt * is used to indicate that the user PHY cfg default override is initialized 1953ea78ce4dSPaul Greenwalt * and the PHY has not been configured with the default override settings. The 1954ea78ce4dSPaul Greenwalt * state is set here, and cleared in ice_configure_phy the first time the PHY is 1955ea78ce4dSPaul Greenwalt * configured. 19560a02944fSAnirudh Venkataramanan * 19570a02944fSAnirudh Venkataramanan * This function should be called only if the FW doesn't support default 19580a02944fSAnirudh Venkataramanan * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 1959ea78ce4dSPaul Greenwalt */ 1960ea78ce4dSPaul Greenwalt static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1961ea78ce4dSPaul Greenwalt { 1962ea78ce4dSPaul Greenwalt struct ice_link_default_override_tlv *ldo; 1963ea78ce4dSPaul Greenwalt struct ice_aqc_set_phy_cfg_data *cfg; 1964ea78ce4dSPaul Greenwalt struct ice_phy_info *phy = &pi->phy; 1965ea78ce4dSPaul Greenwalt struct ice_pf *pf = pi->hw->back; 1966ea78ce4dSPaul Greenwalt 1967ea78ce4dSPaul Greenwalt ldo = &pf->link_dflt_override; 1968ea78ce4dSPaul Greenwalt 1969ea78ce4dSPaul Greenwalt /* If link default override is enabled, use to mask NVM PHY capabilities 1970ea78ce4dSPaul Greenwalt * for speed and FEC default configuration. 1971ea78ce4dSPaul Greenwalt */ 1972ea78ce4dSPaul Greenwalt cfg = &phy->curr_user_phy_cfg; 1973ea78ce4dSPaul Greenwalt 1974ea78ce4dSPaul Greenwalt if (ldo->phy_type_low || ldo->phy_type_high) { 1975ea78ce4dSPaul Greenwalt cfg->phy_type_low = pf->nvm_phy_type_lo & 1976ea78ce4dSPaul Greenwalt cpu_to_le64(ldo->phy_type_low); 1977ea78ce4dSPaul Greenwalt cfg->phy_type_high = pf->nvm_phy_type_hi & 1978ea78ce4dSPaul Greenwalt cpu_to_le64(ldo->phy_type_high); 1979ea78ce4dSPaul Greenwalt } 1980ea78ce4dSPaul Greenwalt cfg->link_fec_opt = ldo->fec_options; 1981ea78ce4dSPaul Greenwalt phy->curr_user_fec_req = ICE_FEC_AUTO; 1982ea78ce4dSPaul Greenwalt 19837e408e07SAnirudh Venkataramanan set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 1984ea78ce4dSPaul Greenwalt } 1985ea78ce4dSPaul Greenwalt 1986ea78ce4dSPaul Greenwalt /** 19871a3571b5SPaul Greenwalt * ice_init_phy_user_cfg - Initialize the PHY user configuration 19881a3571b5SPaul Greenwalt * @pi: port info structure 19891a3571b5SPaul Greenwalt * 19901a3571b5SPaul Greenwalt * Initialize the current user PHY configuration, speed, FEC, and FC requested 19911a3571b5SPaul Greenwalt * mode to default. The PHY defaults are from get PHY capabilities topology 19921a3571b5SPaul Greenwalt * with media so call when media is first available. An error is returned if 19931a3571b5SPaul Greenwalt * called when media is not available. The PHY initialization completed state is 19941a3571b5SPaul Greenwalt * set here. 19951a3571b5SPaul Greenwalt * 19961a3571b5SPaul Greenwalt * These configurations are used when setting PHY 19971a3571b5SPaul Greenwalt * configuration. The user PHY configuration is updated on set PHY 19981a3571b5SPaul Greenwalt * configuration. Returns 0 on success, negative on failure 19991a3571b5SPaul Greenwalt */ 20001a3571b5SPaul Greenwalt static int ice_init_phy_user_cfg(struct ice_port_info *pi) 20011a3571b5SPaul Greenwalt { 20021a3571b5SPaul Greenwalt struct ice_aqc_get_phy_caps_data *pcaps; 20031a3571b5SPaul Greenwalt struct ice_phy_info *phy = &pi->phy; 20041a3571b5SPaul Greenwalt struct ice_pf *pf = pi->hw->back; 20052ccc1c1cSTony Nguyen int err; 20061a3571b5SPaul Greenwalt 20071a3571b5SPaul Greenwalt if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 20081a3571b5SPaul Greenwalt return -EIO; 20091a3571b5SPaul Greenwalt 20101a3571b5SPaul Greenwalt pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 20111a3571b5SPaul Greenwalt if (!pcaps) 20121a3571b5SPaul Greenwalt return -ENOMEM; 20131a3571b5SPaul Greenwalt 20140a02944fSAnirudh Venkataramanan if (ice_fw_supports_report_dflt_cfg(pi->hw)) 20152ccc1c1cSTony Nguyen err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 20160a02944fSAnirudh Venkataramanan pcaps, NULL); 20170a02944fSAnirudh Venkataramanan else 20182ccc1c1cSTony Nguyen err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 20190a02944fSAnirudh Venkataramanan pcaps, NULL); 20202ccc1c1cSTony Nguyen if (err) { 20211a3571b5SPaul Greenwalt dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 20221a3571b5SPaul Greenwalt goto err_out; 20231a3571b5SPaul Greenwalt } 20241a3571b5SPaul Greenwalt 2025ea78ce4dSPaul Greenwalt ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2026ea78ce4dSPaul Greenwalt 2027ea78ce4dSPaul Greenwalt /* check if lenient mode is supported and enabled */ 2028dc6aaa13SAnirudh Venkataramanan if (ice_fw_supports_link_override(pi->hw) && 2029ea78ce4dSPaul Greenwalt !(pcaps->module_compliance_enforcement & 2030ea78ce4dSPaul Greenwalt ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2031ea78ce4dSPaul Greenwalt set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2032ea78ce4dSPaul Greenwalt 20330a02944fSAnirudh Venkataramanan /* if the FW supports default PHY configuration mode, then the driver 20340a02944fSAnirudh Venkataramanan * does not have to apply link override settings. If not, 20350a02944fSAnirudh Venkataramanan * initialize user PHY configuration with link override values 2036ea78ce4dSPaul Greenwalt */ 20370a02944fSAnirudh Venkataramanan if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 20380a02944fSAnirudh Venkataramanan (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2039ea78ce4dSPaul Greenwalt ice_init_phy_cfg_dflt_override(pi); 2040ea78ce4dSPaul Greenwalt goto out; 2041ea78ce4dSPaul Greenwalt } 2042ea78ce4dSPaul Greenwalt } 2043ea78ce4dSPaul Greenwalt 20440a02944fSAnirudh Venkataramanan /* if link default override is not enabled, set user flow control and 20450a02944fSAnirudh Venkataramanan * FEC settings based on what get_phy_caps returned 2046ea78ce4dSPaul Greenwalt */ 20471a3571b5SPaul Greenwalt phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 20481a3571b5SPaul Greenwalt pcaps->link_fec_options); 20491a3571b5SPaul Greenwalt phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 20501a3571b5SPaul Greenwalt 2051ea78ce4dSPaul Greenwalt out: 20521a3571b5SPaul Greenwalt phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 20537e408e07SAnirudh Venkataramanan set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 20541a3571b5SPaul Greenwalt err_out: 20551a3571b5SPaul Greenwalt kfree(pcaps); 20561a3571b5SPaul Greenwalt return err; 20571a3571b5SPaul Greenwalt } 20581a3571b5SPaul Greenwalt 20591a3571b5SPaul Greenwalt /** 20601a3571b5SPaul Greenwalt * ice_configure_phy - configure PHY 20611a3571b5SPaul Greenwalt * @vsi: VSI of PHY 20621a3571b5SPaul Greenwalt * 20631a3571b5SPaul Greenwalt * Set the PHY configuration. If the current PHY configuration is the same as 20641a3571b5SPaul Greenwalt * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 20651a3571b5SPaul Greenwalt * configure the based get PHY capabilities for topology with media. 20661a3571b5SPaul Greenwalt */ 20671a3571b5SPaul Greenwalt static int ice_configure_phy(struct ice_vsi *vsi) 20681a3571b5SPaul Greenwalt { 20691a3571b5SPaul Greenwalt struct device *dev = ice_pf_to_dev(vsi->back); 2070efc1eddbSAnirudh Venkataramanan struct ice_port_info *pi = vsi->port_info; 20711a3571b5SPaul Greenwalt struct ice_aqc_get_phy_caps_data *pcaps; 20721a3571b5SPaul Greenwalt struct ice_aqc_set_phy_cfg_data *cfg; 2073efc1eddbSAnirudh Venkataramanan struct ice_phy_info *phy = &pi->phy; 2074efc1eddbSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 20752ccc1c1cSTony Nguyen int err; 20761a3571b5SPaul Greenwalt 20771a3571b5SPaul Greenwalt /* Ensure we have media as we cannot configure a medialess port */ 2078efc1eddbSAnirudh Venkataramanan if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 20791a3571b5SPaul Greenwalt return -EPERM; 20801a3571b5SPaul Greenwalt 20811a3571b5SPaul Greenwalt ice_print_topo_conflict(vsi); 20821a3571b5SPaul Greenwalt 20834fc5fbeeSAnirudh Venkataramanan if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 20844fc5fbeeSAnirudh Venkataramanan phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 20851a3571b5SPaul Greenwalt return -EPERM; 20861a3571b5SPaul Greenwalt 2087efc1eddbSAnirudh Venkataramanan if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 20881a3571b5SPaul Greenwalt return ice_force_phys_link_state(vsi, true); 20891a3571b5SPaul Greenwalt 20901a3571b5SPaul Greenwalt pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 20911a3571b5SPaul Greenwalt if (!pcaps) 20921a3571b5SPaul Greenwalt return -ENOMEM; 20931a3571b5SPaul Greenwalt 20941a3571b5SPaul Greenwalt /* Get current PHY config */ 20952ccc1c1cSTony Nguyen err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 20961a3571b5SPaul Greenwalt NULL); 20972ccc1c1cSTony Nguyen if (err) { 20985f87ec48STony Nguyen dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 20992ccc1c1cSTony Nguyen vsi->vsi_num, err); 21001a3571b5SPaul Greenwalt goto done; 21011a3571b5SPaul Greenwalt } 21021a3571b5SPaul Greenwalt 21031a3571b5SPaul Greenwalt /* If PHY enable link is configured and configuration has not changed, 21041a3571b5SPaul Greenwalt * there's nothing to do 21051a3571b5SPaul Greenwalt */ 21061a3571b5SPaul Greenwalt if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2107efc1eddbSAnirudh Venkataramanan ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 21081a3571b5SPaul Greenwalt goto done; 21091a3571b5SPaul Greenwalt 21101a3571b5SPaul Greenwalt /* Use PHY topology as baseline for configuration */ 21111a3571b5SPaul Greenwalt memset(pcaps, 0, sizeof(*pcaps)); 21120a02944fSAnirudh Venkataramanan if (ice_fw_supports_report_dflt_cfg(pi->hw)) 21132ccc1c1cSTony Nguyen err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 21140a02944fSAnirudh Venkataramanan pcaps, NULL); 21150a02944fSAnirudh Venkataramanan else 21162ccc1c1cSTony Nguyen err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 21170a02944fSAnirudh Venkataramanan pcaps, NULL); 21182ccc1c1cSTony Nguyen if (err) { 21195f87ec48STony Nguyen dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 21202ccc1c1cSTony Nguyen vsi->vsi_num, err); 21211a3571b5SPaul Greenwalt goto done; 21221a3571b5SPaul Greenwalt } 21231a3571b5SPaul Greenwalt 21241a3571b5SPaul Greenwalt cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 21251a3571b5SPaul Greenwalt if (!cfg) { 21261a3571b5SPaul Greenwalt err = -ENOMEM; 21271a3571b5SPaul Greenwalt goto done; 21281a3571b5SPaul Greenwalt } 21291a3571b5SPaul Greenwalt 2130ea78ce4dSPaul Greenwalt ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 21311a3571b5SPaul Greenwalt 21321a3571b5SPaul Greenwalt /* Speed - If default override pending, use curr_user_phy_cfg set in 21331a3571b5SPaul Greenwalt * ice_init_phy_user_cfg_ldo. 21341a3571b5SPaul Greenwalt */ 21357e408e07SAnirudh Venkataramanan if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2136ea78ce4dSPaul Greenwalt vsi->back->state)) { 2137efc1eddbSAnirudh Venkataramanan cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2138efc1eddbSAnirudh Venkataramanan cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2139ea78ce4dSPaul Greenwalt } else { 2140ea78ce4dSPaul Greenwalt u64 phy_low = 0, phy_high = 0; 2141ea78ce4dSPaul Greenwalt 2142ea78ce4dSPaul Greenwalt ice_update_phy_type(&phy_low, &phy_high, 2143ea78ce4dSPaul Greenwalt pi->phy.curr_user_speed_req); 21441a3571b5SPaul Greenwalt cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2145ea78ce4dSPaul Greenwalt cfg->phy_type_high = pcaps->phy_type_high & 2146ea78ce4dSPaul Greenwalt cpu_to_le64(phy_high); 2147ea78ce4dSPaul Greenwalt } 21481a3571b5SPaul Greenwalt 21491a3571b5SPaul Greenwalt /* Can't provide what was requested; use PHY capabilities */ 21501a3571b5SPaul Greenwalt if (!cfg->phy_type_low && !cfg->phy_type_high) { 21511a3571b5SPaul Greenwalt cfg->phy_type_low = pcaps->phy_type_low; 21521a3571b5SPaul Greenwalt cfg->phy_type_high = pcaps->phy_type_high; 21531a3571b5SPaul Greenwalt } 21541a3571b5SPaul Greenwalt 21551a3571b5SPaul Greenwalt /* FEC */ 2156efc1eddbSAnirudh Venkataramanan ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 21571a3571b5SPaul Greenwalt 21581a3571b5SPaul Greenwalt /* Can't provide what was requested; use PHY capabilities */ 21591a3571b5SPaul Greenwalt if (cfg->link_fec_opt != 21601a3571b5SPaul Greenwalt (cfg->link_fec_opt & pcaps->link_fec_options)) { 21611a3571b5SPaul Greenwalt cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 21621a3571b5SPaul Greenwalt cfg->link_fec_opt = pcaps->link_fec_options; 21631a3571b5SPaul Greenwalt } 21641a3571b5SPaul Greenwalt 21651a3571b5SPaul Greenwalt /* Flow Control - always supported; no need to check against 21661a3571b5SPaul Greenwalt * capabilities 21671a3571b5SPaul Greenwalt */ 2168efc1eddbSAnirudh Venkataramanan ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 21691a3571b5SPaul Greenwalt 21701a3571b5SPaul Greenwalt /* Enable link and link update */ 21711a3571b5SPaul Greenwalt cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 21721a3571b5SPaul Greenwalt 21732ccc1c1cSTony Nguyen err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2174c1484691STony Nguyen if (err) 21755f87ec48STony Nguyen dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 21762ccc1c1cSTony Nguyen vsi->vsi_num, err); 21771a3571b5SPaul Greenwalt 21781a3571b5SPaul Greenwalt kfree(cfg); 21791a3571b5SPaul Greenwalt done: 21801a3571b5SPaul Greenwalt kfree(pcaps); 21811a3571b5SPaul Greenwalt return err; 21821a3571b5SPaul Greenwalt } 21831a3571b5SPaul Greenwalt 21841a3571b5SPaul Greenwalt /** 21851a3571b5SPaul Greenwalt * ice_check_media_subtask - Check for media 21866d599946STony Nguyen * @pf: pointer to PF struct 21871a3571b5SPaul Greenwalt * 21881a3571b5SPaul Greenwalt * If media is available, then initialize PHY user configuration if it is not 21891a3571b5SPaul Greenwalt * been, and configure the PHY if the interface is up. 21906d599946STony Nguyen */ 21916d599946STony Nguyen static void ice_check_media_subtask(struct ice_pf *pf) 21926d599946STony Nguyen { 21936d599946STony Nguyen struct ice_port_info *pi; 21946d599946STony Nguyen struct ice_vsi *vsi; 21956d599946STony Nguyen int err; 21966d599946STony Nguyen 21971a3571b5SPaul Greenwalt /* No need to check for media if it's already present */ 21981a3571b5SPaul Greenwalt if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 21996d599946STony Nguyen return; 22006d599946STony Nguyen 22011a3571b5SPaul Greenwalt vsi = ice_get_main_vsi(pf); 22021a3571b5SPaul Greenwalt if (!vsi) 22036d599946STony Nguyen return; 22046d599946STony Nguyen 22056d599946STony Nguyen /* Refresh link info and check if media is present */ 22066d599946STony Nguyen pi = vsi->port_info; 22076d599946STony Nguyen err = ice_update_link_info(pi); 22086d599946STony Nguyen if (err) 22096d599946STony Nguyen return; 22106d599946STony Nguyen 221199d40752SBrett Creeley ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2212c77849f5SAnirudh Venkataramanan 22136d599946STony Nguyen if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 22147e408e07SAnirudh Venkataramanan if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 22151a3571b5SPaul Greenwalt ice_init_phy_user_cfg(pi); 22161a3571b5SPaul Greenwalt 22171a3571b5SPaul Greenwalt /* PHY settings are reset on media insertion, reconfigure 22181a3571b5SPaul Greenwalt * PHY to preserve settings. 22191a3571b5SPaul Greenwalt */ 2220e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state) && 22211a3571b5SPaul Greenwalt test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 22226d599946STony Nguyen return; 22231a3571b5SPaul Greenwalt 22241a3571b5SPaul Greenwalt err = ice_configure_phy(vsi); 22251a3571b5SPaul Greenwalt if (!err) 22266d599946STony Nguyen clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 22276d599946STony Nguyen 22286d599946STony Nguyen /* A Link Status Event will be generated; the event handler 22296d599946STony Nguyen * will complete bringing the interface up 22306d599946STony Nguyen */ 22316d599946STony Nguyen } 22326d599946STony Nguyen } 22336d599946STony Nguyen 22346d599946STony Nguyen /** 2235940b61afSAnirudh Venkataramanan * ice_service_task - manage and run subtasks 2236940b61afSAnirudh Venkataramanan * @work: pointer to work_struct contained by the PF struct 2237940b61afSAnirudh Venkataramanan */ 2238940b61afSAnirudh Venkataramanan static void ice_service_task(struct work_struct *work) 2239940b61afSAnirudh Venkataramanan { 2240940b61afSAnirudh Venkataramanan struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2241940b61afSAnirudh Venkataramanan unsigned long start_time = jiffies; 2242940b61afSAnirudh Venkataramanan 2243940b61afSAnirudh Venkataramanan /* subtasks */ 22440b28b702SAnirudh Venkataramanan 22450b28b702SAnirudh Venkataramanan /* process reset requests first */ 22460b28b702SAnirudh Venkataramanan ice_reset_subtask(pf); 22470b28b702SAnirudh Venkataramanan 22480f9d5027SAnirudh Venkataramanan /* bail if a reset/recovery cycle is pending or rebuild failed */ 22495df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state) || 22507e408e07SAnirudh Venkataramanan test_bit(ICE_SUSPENDED, pf->state) || 22517e408e07SAnirudh Venkataramanan test_bit(ICE_NEEDS_RESTART, pf->state)) { 22520b28b702SAnirudh Venkataramanan ice_service_task_complete(pf); 22530b28b702SAnirudh Venkataramanan return; 22540b28b702SAnirudh Venkataramanan } 22550b28b702SAnirudh Venkataramanan 2256*5dbbbd01SDave Ertman if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2257*5dbbbd01SDave Ertman ice_plug_aux_dev(pf); 2258*5dbbbd01SDave Ertman 2259462acf6aSTony Nguyen ice_clean_adminq_subtask(pf); 22606d599946STony Nguyen ice_check_media_subtask(pf); 2261b3969fd7SSudheer Mogilappagari ice_check_for_hang_subtask(pf); 2262e94d4478SAnirudh Venkataramanan ice_sync_fltr_subtask(pf); 2263b3969fd7SSudheer Mogilappagari ice_handle_mdd_event(pf); 2264fcea6f3dSAnirudh Venkataramanan ice_watchdog_subtask(pf); 2265462acf6aSTony Nguyen 2266462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 2267462acf6aSTony Nguyen ice_service_task_complete(pf); 2268462acf6aSTony Nguyen return; 2269462acf6aSTony Nguyen } 2270462acf6aSTony Nguyen 2271462acf6aSTony Nguyen ice_process_vflr_event(pf); 227275d2b253SAnirudh Venkataramanan ice_clean_mailboxq_subtask(pf); 22738f5ee3c4SJacob Keller ice_clean_sbq_subtask(pf); 227428bf2672SBrett Creeley ice_sync_arfs_fltrs(pf); 2275d6218317SQi Zhang ice_flush_fdir_ctx(pf); 22767e408e07SAnirudh Venkataramanan 22777e408e07SAnirudh Venkataramanan /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2278940b61afSAnirudh Venkataramanan ice_service_task_complete(pf); 2279940b61afSAnirudh Venkataramanan 2280940b61afSAnirudh Venkataramanan /* If the tasks have taken longer than one service timer period 2281940b61afSAnirudh Venkataramanan * or there is more work to be done, reset the service timer to 2282940b61afSAnirudh Venkataramanan * schedule the service task now. 2283940b61afSAnirudh Venkataramanan */ 2284940b61afSAnirudh Venkataramanan if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 22857e408e07SAnirudh Venkataramanan test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 22867e408e07SAnirudh Venkataramanan test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 22877e408e07SAnirudh Venkataramanan test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 22887e408e07SAnirudh Venkataramanan test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 22898f5ee3c4SJacob Keller test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 22907e408e07SAnirudh Venkataramanan test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2291940b61afSAnirudh Venkataramanan mod_timer(&pf->serv_tmr, jiffies); 2292940b61afSAnirudh Venkataramanan } 2293940b61afSAnirudh Venkataramanan 2294837f08fdSAnirudh Venkataramanan /** 2295f31e4b6fSAnirudh Venkataramanan * ice_set_ctrlq_len - helper function to set controlq length 2296f9867df6SAnirudh Venkataramanan * @hw: pointer to the HW instance 2297f31e4b6fSAnirudh Venkataramanan */ 2298f31e4b6fSAnirudh Venkataramanan static void ice_set_ctrlq_len(struct ice_hw *hw) 2299f31e4b6fSAnirudh Venkataramanan { 2300f31e4b6fSAnirudh Venkataramanan hw->adminq.num_rq_entries = ICE_AQ_LEN; 2301f31e4b6fSAnirudh Venkataramanan hw->adminq.num_sq_entries = ICE_AQ_LEN; 2302f31e4b6fSAnirudh Venkataramanan hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2303f31e4b6fSAnirudh Venkataramanan hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2304c8a1071dSLukasz Czapnik hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 230511836214SBrett Creeley hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 230675d2b253SAnirudh Venkataramanan hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 230775d2b253SAnirudh Venkataramanan hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 23088f5ee3c4SJacob Keller hw->sbq.num_rq_entries = ICE_SBQ_LEN; 23098f5ee3c4SJacob Keller hw->sbq.num_sq_entries = ICE_SBQ_LEN; 23108f5ee3c4SJacob Keller hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 23118f5ee3c4SJacob Keller hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2312f31e4b6fSAnirudh Venkataramanan } 2313f31e4b6fSAnirudh Venkataramanan 2314f31e4b6fSAnirudh Venkataramanan /** 231587324e74SHenry Tieman * ice_schedule_reset - schedule a reset 231687324e74SHenry Tieman * @pf: board private structure 231787324e74SHenry Tieman * @reset: reset being requested 231887324e74SHenry Tieman */ 231987324e74SHenry Tieman int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 232087324e74SHenry Tieman { 232187324e74SHenry Tieman struct device *dev = ice_pf_to_dev(pf); 232287324e74SHenry Tieman 232387324e74SHenry Tieman /* bail out if earlier reset has failed */ 23247e408e07SAnirudh Venkataramanan if (test_bit(ICE_RESET_FAILED, pf->state)) { 232587324e74SHenry Tieman dev_dbg(dev, "earlier reset has failed\n"); 232687324e74SHenry Tieman return -EIO; 232787324e74SHenry Tieman } 232887324e74SHenry Tieman /* bail if reset/recovery already in progress */ 232987324e74SHenry Tieman if (ice_is_reset_in_progress(pf->state)) { 233087324e74SHenry Tieman dev_dbg(dev, "Reset already in progress\n"); 233187324e74SHenry Tieman return -EBUSY; 233287324e74SHenry Tieman } 233387324e74SHenry Tieman 2334f9f5301eSDave Ertman ice_unplug_aux_dev(pf); 2335f9f5301eSDave Ertman 233687324e74SHenry Tieman switch (reset) { 233787324e74SHenry Tieman case ICE_RESET_PFR: 23387e408e07SAnirudh Venkataramanan set_bit(ICE_PFR_REQ, pf->state); 233987324e74SHenry Tieman break; 234087324e74SHenry Tieman case ICE_RESET_CORER: 23417e408e07SAnirudh Venkataramanan set_bit(ICE_CORER_REQ, pf->state); 234287324e74SHenry Tieman break; 234387324e74SHenry Tieman case ICE_RESET_GLOBR: 23447e408e07SAnirudh Venkataramanan set_bit(ICE_GLOBR_REQ, pf->state); 234587324e74SHenry Tieman break; 234687324e74SHenry Tieman default: 234787324e74SHenry Tieman return -EINVAL; 234887324e74SHenry Tieman } 234987324e74SHenry Tieman 235087324e74SHenry Tieman ice_service_task_schedule(pf); 235187324e74SHenry Tieman return 0; 235287324e74SHenry Tieman } 235387324e74SHenry Tieman 235487324e74SHenry Tieman /** 2355cdedef59SAnirudh Venkataramanan * ice_irq_affinity_notify - Callback for affinity changes 2356cdedef59SAnirudh Venkataramanan * @notify: context as to what irq was changed 2357cdedef59SAnirudh Venkataramanan * @mask: the new affinity mask 2358cdedef59SAnirudh Venkataramanan * 2359cdedef59SAnirudh Venkataramanan * This is a callback function used by the irq_set_affinity_notifier function 2360cdedef59SAnirudh Venkataramanan * so that we may register to receive changes to the irq affinity masks. 2361cdedef59SAnirudh Venkataramanan */ 2362c8b7abddSBruce Allan static void 2363c8b7abddSBruce Allan ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2364cdedef59SAnirudh Venkataramanan const cpumask_t *mask) 2365cdedef59SAnirudh Venkataramanan { 2366cdedef59SAnirudh Venkataramanan struct ice_q_vector *q_vector = 2367cdedef59SAnirudh Venkataramanan container_of(notify, struct ice_q_vector, affinity_notify); 2368cdedef59SAnirudh Venkataramanan 2369cdedef59SAnirudh Venkataramanan cpumask_copy(&q_vector->affinity_mask, mask); 2370cdedef59SAnirudh Venkataramanan } 2371cdedef59SAnirudh Venkataramanan 2372cdedef59SAnirudh Venkataramanan /** 2373cdedef59SAnirudh Venkataramanan * ice_irq_affinity_release - Callback for affinity notifier release 2374cdedef59SAnirudh Venkataramanan * @ref: internal core kernel usage 2375cdedef59SAnirudh Venkataramanan * 2376cdedef59SAnirudh Venkataramanan * This is a callback function used by the irq_set_affinity_notifier function 2377cdedef59SAnirudh Venkataramanan * to inform the current notification subscriber that they will no longer 2378cdedef59SAnirudh Venkataramanan * receive notifications. 2379cdedef59SAnirudh Venkataramanan */ 2380cdedef59SAnirudh Venkataramanan static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2381cdedef59SAnirudh Venkataramanan 2382cdedef59SAnirudh Venkataramanan /** 2383cdedef59SAnirudh Venkataramanan * ice_vsi_ena_irq - Enable IRQ for the given VSI 2384cdedef59SAnirudh Venkataramanan * @vsi: the VSI being configured 2385cdedef59SAnirudh Venkataramanan */ 2386cdedef59SAnirudh Venkataramanan static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2387cdedef59SAnirudh Venkataramanan { 2388ba880734SBrett Creeley struct ice_hw *hw = &vsi->back->hw; 2389cdedef59SAnirudh Venkataramanan int i; 2390cdedef59SAnirudh Venkataramanan 23910c2561c8SBrett Creeley ice_for_each_q_vector(vsi, i) 2392cdedef59SAnirudh Venkataramanan ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2393cdedef59SAnirudh Venkataramanan 2394cdedef59SAnirudh Venkataramanan ice_flush(hw); 2395cdedef59SAnirudh Venkataramanan return 0; 2396cdedef59SAnirudh Venkataramanan } 2397cdedef59SAnirudh Venkataramanan 2398cdedef59SAnirudh Venkataramanan /** 2399cdedef59SAnirudh Venkataramanan * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2400cdedef59SAnirudh Venkataramanan * @vsi: the VSI being configured 2401cdedef59SAnirudh Venkataramanan * @basename: name for the vector 2402cdedef59SAnirudh Venkataramanan */ 2403cdedef59SAnirudh Venkataramanan static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2404cdedef59SAnirudh Venkataramanan { 2405cdedef59SAnirudh Venkataramanan int q_vectors = vsi->num_q_vectors; 2406cdedef59SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 2407cbe66bfeSBrett Creeley int base = vsi->base_vector; 24084015d11eSBrett Creeley struct device *dev; 2409cdedef59SAnirudh Venkataramanan int rx_int_idx = 0; 2410cdedef59SAnirudh Venkataramanan int tx_int_idx = 0; 2411cdedef59SAnirudh Venkataramanan int vector, err; 2412cdedef59SAnirudh Venkataramanan int irq_num; 2413cdedef59SAnirudh Venkataramanan 24144015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 2415cdedef59SAnirudh Venkataramanan for (vector = 0; vector < q_vectors; vector++) { 2416cdedef59SAnirudh Venkataramanan struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2417cdedef59SAnirudh Venkataramanan 2418cdedef59SAnirudh Venkataramanan irq_num = pf->msix_entries[base + vector].vector; 2419cdedef59SAnirudh Venkataramanan 2420e72bba21SMaciej Fijalkowski if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2421cdedef59SAnirudh Venkataramanan snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2422cdedef59SAnirudh Venkataramanan "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2423cdedef59SAnirudh Venkataramanan tx_int_idx++; 2424e72bba21SMaciej Fijalkowski } else if (q_vector->rx.rx_ring) { 2425cdedef59SAnirudh Venkataramanan snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2426cdedef59SAnirudh Venkataramanan "%s-%s-%d", basename, "rx", rx_int_idx++); 2427e72bba21SMaciej Fijalkowski } else if (q_vector->tx.tx_ring) { 2428cdedef59SAnirudh Venkataramanan snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2429cdedef59SAnirudh Venkataramanan "%s-%s-%d", basename, "tx", tx_int_idx++); 2430cdedef59SAnirudh Venkataramanan } else { 2431cdedef59SAnirudh Venkataramanan /* skip this unused q_vector */ 2432cdedef59SAnirudh Venkataramanan continue; 2433cdedef59SAnirudh Venkataramanan } 2434da62c5ffSQi Zhang if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) 2435da62c5ffSQi Zhang err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2436da62c5ffSQi Zhang IRQF_SHARED, q_vector->name, 2437da62c5ffSQi Zhang q_vector); 2438da62c5ffSQi Zhang else 2439da62c5ffSQi Zhang err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2440da62c5ffSQi Zhang 0, q_vector->name, q_vector); 2441cdedef59SAnirudh Venkataramanan if (err) { 244219cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 244319cce2c6SAnirudh Venkataramanan err); 2444cdedef59SAnirudh Venkataramanan goto free_q_irqs; 2445cdedef59SAnirudh Venkataramanan } 2446cdedef59SAnirudh Venkataramanan 2447cdedef59SAnirudh Venkataramanan /* register for affinity change notifications */ 244828bf2672SBrett Creeley if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 244928bf2672SBrett Creeley struct irq_affinity_notify *affinity_notify; 245028bf2672SBrett Creeley 245128bf2672SBrett Creeley affinity_notify = &q_vector->affinity_notify; 245228bf2672SBrett Creeley affinity_notify->notify = ice_irq_affinity_notify; 245328bf2672SBrett Creeley affinity_notify->release = ice_irq_affinity_release; 245428bf2672SBrett Creeley irq_set_affinity_notifier(irq_num, affinity_notify); 245528bf2672SBrett Creeley } 2456cdedef59SAnirudh Venkataramanan 2457cdedef59SAnirudh Venkataramanan /* assign the mask for this irq */ 2458cdedef59SAnirudh Venkataramanan irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2459cdedef59SAnirudh Venkataramanan } 2460cdedef59SAnirudh Venkataramanan 2461cdedef59SAnirudh Venkataramanan vsi->irqs_ready = true; 2462cdedef59SAnirudh Venkataramanan return 0; 2463cdedef59SAnirudh Venkataramanan 2464cdedef59SAnirudh Venkataramanan free_q_irqs: 2465cdedef59SAnirudh Venkataramanan while (vector) { 2466cdedef59SAnirudh Venkataramanan vector--; 246728bf2672SBrett Creeley irq_num = pf->msix_entries[base + vector].vector; 246828bf2672SBrett Creeley if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2469cdedef59SAnirudh Venkataramanan irq_set_affinity_notifier(irq_num, NULL); 2470cdedef59SAnirudh Venkataramanan irq_set_affinity_hint(irq_num, NULL); 24714015d11eSBrett Creeley devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2472cdedef59SAnirudh Venkataramanan } 2473cdedef59SAnirudh Venkataramanan return err; 2474cdedef59SAnirudh Venkataramanan } 2475cdedef59SAnirudh Venkataramanan 2476cdedef59SAnirudh Venkataramanan /** 2477efc2214bSMaciej Fijalkowski * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2478efc2214bSMaciej Fijalkowski * @vsi: VSI to setup Tx rings used by XDP 2479efc2214bSMaciej Fijalkowski * 2480efc2214bSMaciej Fijalkowski * Return 0 on success and negative value on error 2481efc2214bSMaciej Fijalkowski */ 2482efc2214bSMaciej Fijalkowski static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2483efc2214bSMaciej Fijalkowski { 24849a946843SAnirudh Venkataramanan struct device *dev = ice_pf_to_dev(vsi->back); 24859610bd98SMaciej Fijalkowski struct ice_tx_desc *tx_desc; 24869610bd98SMaciej Fijalkowski int i, j; 2487efc2214bSMaciej Fijalkowski 24882faf63b6SMaciej Fijalkowski ice_for_each_xdp_txq(vsi, i) { 2489efc2214bSMaciej Fijalkowski u16 xdp_q_idx = vsi->alloc_txq + i; 2490e72bba21SMaciej Fijalkowski struct ice_tx_ring *xdp_ring; 2491efc2214bSMaciej Fijalkowski 2492efc2214bSMaciej Fijalkowski xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2493efc2214bSMaciej Fijalkowski 2494efc2214bSMaciej Fijalkowski if (!xdp_ring) 2495efc2214bSMaciej Fijalkowski goto free_xdp_rings; 2496efc2214bSMaciej Fijalkowski 2497efc2214bSMaciej Fijalkowski xdp_ring->q_index = xdp_q_idx; 2498efc2214bSMaciej Fijalkowski xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2499efc2214bSMaciej Fijalkowski xdp_ring->vsi = vsi; 2500efc2214bSMaciej Fijalkowski xdp_ring->netdev = NULL; 25019610bd98SMaciej Fijalkowski xdp_ring->next_dd = ICE_TX_THRESH - 1; 25029610bd98SMaciej Fijalkowski xdp_ring->next_rs = ICE_TX_THRESH - 1; 2503efc2214bSMaciej Fijalkowski xdp_ring->dev = dev; 2504efc2214bSMaciej Fijalkowski xdp_ring->count = vsi->num_tx_desc; 2505b1d95cc2SCiara Loftus WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2506efc2214bSMaciej Fijalkowski if (ice_setup_tx_ring(xdp_ring)) 2507efc2214bSMaciej Fijalkowski goto free_xdp_rings; 2508efc2214bSMaciej Fijalkowski ice_set_ring_xdp(xdp_ring); 2509e72bba21SMaciej Fijalkowski xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); 251022bf877eSMaciej Fijalkowski spin_lock_init(&xdp_ring->tx_lock); 25119610bd98SMaciej Fijalkowski for (j = 0; j < xdp_ring->count; j++) { 25129610bd98SMaciej Fijalkowski tx_desc = ICE_TX_DESC(xdp_ring, j); 25139610bd98SMaciej Fijalkowski tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); 25149610bd98SMaciej Fijalkowski } 2515efc2214bSMaciej Fijalkowski } 2516efc2214bSMaciej Fijalkowski 251722bf877eSMaciej Fijalkowski ice_for_each_rxq(vsi, i) { 251822bf877eSMaciej Fijalkowski if (static_key_enabled(&ice_xdp_locking_key)) 251922bf877eSMaciej Fijalkowski vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; 252022bf877eSMaciej Fijalkowski else 2521eb087cd8SMaciej Fijalkowski vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i]; 252222bf877eSMaciej Fijalkowski } 2523eb087cd8SMaciej Fijalkowski 2524efc2214bSMaciej Fijalkowski return 0; 2525efc2214bSMaciej Fijalkowski 2526efc2214bSMaciej Fijalkowski free_xdp_rings: 2527efc2214bSMaciej Fijalkowski for (; i >= 0; i--) 2528efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 2529efc2214bSMaciej Fijalkowski ice_free_tx_ring(vsi->xdp_rings[i]); 2530efc2214bSMaciej Fijalkowski return -ENOMEM; 2531efc2214bSMaciej Fijalkowski } 2532efc2214bSMaciej Fijalkowski 2533efc2214bSMaciej Fijalkowski /** 2534efc2214bSMaciej Fijalkowski * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2535efc2214bSMaciej Fijalkowski * @vsi: VSI to set the bpf prog on 2536efc2214bSMaciej Fijalkowski * @prog: the bpf prog pointer 2537efc2214bSMaciej Fijalkowski */ 2538efc2214bSMaciej Fijalkowski static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2539efc2214bSMaciej Fijalkowski { 2540efc2214bSMaciej Fijalkowski struct bpf_prog *old_prog; 2541efc2214bSMaciej Fijalkowski int i; 2542efc2214bSMaciej Fijalkowski 2543efc2214bSMaciej Fijalkowski old_prog = xchg(&vsi->xdp_prog, prog); 2544efc2214bSMaciej Fijalkowski if (old_prog) 2545efc2214bSMaciej Fijalkowski bpf_prog_put(old_prog); 2546efc2214bSMaciej Fijalkowski 2547efc2214bSMaciej Fijalkowski ice_for_each_rxq(vsi, i) 2548efc2214bSMaciej Fijalkowski WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2549efc2214bSMaciej Fijalkowski } 2550efc2214bSMaciej Fijalkowski 2551efc2214bSMaciej Fijalkowski /** 2552efc2214bSMaciej Fijalkowski * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2553efc2214bSMaciej Fijalkowski * @vsi: VSI to bring up Tx rings used by XDP 2554efc2214bSMaciej Fijalkowski * @prog: bpf program that will be assigned to VSI 2555efc2214bSMaciej Fijalkowski * 2556efc2214bSMaciej Fijalkowski * Return 0 on success and negative value on error 2557efc2214bSMaciej Fijalkowski */ 2558efc2214bSMaciej Fijalkowski int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2559efc2214bSMaciej Fijalkowski { 2560efc2214bSMaciej Fijalkowski u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2561efc2214bSMaciej Fijalkowski int xdp_rings_rem = vsi->num_xdp_txq; 2562efc2214bSMaciej Fijalkowski struct ice_pf *pf = vsi->back; 2563efc2214bSMaciej Fijalkowski struct ice_qs_cfg xdp_qs_cfg = { 2564efc2214bSMaciej Fijalkowski .qs_mutex = &pf->avail_q_mutex, 2565efc2214bSMaciej Fijalkowski .pf_map = pf->avail_txqs, 2566efc2214bSMaciej Fijalkowski .pf_map_size = pf->max_pf_txqs, 2567efc2214bSMaciej Fijalkowski .q_count = vsi->num_xdp_txq, 2568efc2214bSMaciej Fijalkowski .scatter_count = ICE_MAX_SCATTER_TXQS, 2569efc2214bSMaciej Fijalkowski .vsi_map = vsi->txq_map, 2570efc2214bSMaciej Fijalkowski .vsi_map_offset = vsi->alloc_txq, 2571efc2214bSMaciej Fijalkowski .mapping_mode = ICE_VSI_MAP_CONTIG 2572efc2214bSMaciej Fijalkowski }; 25734015d11eSBrett Creeley struct device *dev; 2574efc2214bSMaciej Fijalkowski int i, v_idx; 25755518ac2aSTony Nguyen int status; 2576efc2214bSMaciej Fijalkowski 25774015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 25784015d11eSBrett Creeley vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2579efc2214bSMaciej Fijalkowski sizeof(*vsi->xdp_rings), GFP_KERNEL); 2580efc2214bSMaciej Fijalkowski if (!vsi->xdp_rings) 2581efc2214bSMaciej Fijalkowski return -ENOMEM; 2582efc2214bSMaciej Fijalkowski 2583efc2214bSMaciej Fijalkowski vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2584efc2214bSMaciej Fijalkowski if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2585efc2214bSMaciej Fijalkowski goto err_map_xdp; 2586efc2214bSMaciej Fijalkowski 258722bf877eSMaciej Fijalkowski if (static_key_enabled(&ice_xdp_locking_key)) 258822bf877eSMaciej Fijalkowski netdev_warn(vsi->netdev, 258922bf877eSMaciej Fijalkowski "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 259022bf877eSMaciej Fijalkowski 2591efc2214bSMaciej Fijalkowski if (ice_xdp_alloc_setup_rings(vsi)) 2592efc2214bSMaciej Fijalkowski goto clear_xdp_rings; 2593efc2214bSMaciej Fijalkowski 2594efc2214bSMaciej Fijalkowski /* follow the logic from ice_vsi_map_rings_to_vectors */ 2595efc2214bSMaciej Fijalkowski ice_for_each_q_vector(vsi, v_idx) { 2596efc2214bSMaciej Fijalkowski struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2597efc2214bSMaciej Fijalkowski int xdp_rings_per_v, q_id, q_base; 2598efc2214bSMaciej Fijalkowski 2599efc2214bSMaciej Fijalkowski xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2600efc2214bSMaciej Fijalkowski vsi->num_q_vectors - v_idx); 2601efc2214bSMaciej Fijalkowski q_base = vsi->num_xdp_txq - xdp_rings_rem; 2602efc2214bSMaciej Fijalkowski 2603efc2214bSMaciej Fijalkowski for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2604e72bba21SMaciej Fijalkowski struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2605efc2214bSMaciej Fijalkowski 2606efc2214bSMaciej Fijalkowski xdp_ring->q_vector = q_vector; 2607e72bba21SMaciej Fijalkowski xdp_ring->next = q_vector->tx.tx_ring; 2608e72bba21SMaciej Fijalkowski q_vector->tx.tx_ring = xdp_ring; 2609efc2214bSMaciej Fijalkowski } 2610efc2214bSMaciej Fijalkowski xdp_rings_rem -= xdp_rings_per_v; 2611efc2214bSMaciej Fijalkowski } 2612efc2214bSMaciej Fijalkowski 2613efc2214bSMaciej Fijalkowski /* omit the scheduler update if in reset path; XDP queues will be 2614efc2214bSMaciej Fijalkowski * taken into account at the end of ice_vsi_rebuild, where 2615efc2214bSMaciej Fijalkowski * ice_cfg_vsi_lan is being called 2616efc2214bSMaciej Fijalkowski */ 2617efc2214bSMaciej Fijalkowski if (ice_is_reset_in_progress(pf->state)) 2618efc2214bSMaciej Fijalkowski return 0; 2619efc2214bSMaciej Fijalkowski 2620efc2214bSMaciej Fijalkowski /* tell the Tx scheduler that right now we have 2621efc2214bSMaciej Fijalkowski * additional queues 2622efc2214bSMaciej Fijalkowski */ 2623efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->tc_cfg.numtc; i++) 2624efc2214bSMaciej Fijalkowski max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2625efc2214bSMaciej Fijalkowski 2626efc2214bSMaciej Fijalkowski status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2627efc2214bSMaciej Fijalkowski max_txqs); 2628efc2214bSMaciej Fijalkowski if (status) { 26295f87ec48STony Nguyen dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 26305f87ec48STony Nguyen status); 2631efc2214bSMaciej Fijalkowski goto clear_xdp_rings; 2632efc2214bSMaciej Fijalkowski } 2633f65ee535SMarta Plantykow 2634f65ee535SMarta Plantykow /* assign the prog only when it's not already present on VSI; 2635f65ee535SMarta Plantykow * this flow is a subject of both ethtool -L and ndo_bpf flows; 2636f65ee535SMarta Plantykow * VSI rebuild that happens under ethtool -L can expose us to 2637f65ee535SMarta Plantykow * the bpf_prog refcount issues as we would be swapping same 2638f65ee535SMarta Plantykow * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2639f65ee535SMarta Plantykow * on it as it would be treated as an 'old_prog'; for ndo_bpf 2640f65ee535SMarta Plantykow * this is not harmful as dev_xdp_install bumps the refcount 2641f65ee535SMarta Plantykow * before calling the op exposed by the driver; 2642f65ee535SMarta Plantykow */ 2643f65ee535SMarta Plantykow if (!ice_is_xdp_ena_vsi(vsi)) 2644efc2214bSMaciej Fijalkowski ice_vsi_assign_bpf_prog(vsi, prog); 2645efc2214bSMaciej Fijalkowski 2646efc2214bSMaciej Fijalkowski return 0; 2647efc2214bSMaciej Fijalkowski clear_xdp_rings: 26482faf63b6SMaciej Fijalkowski ice_for_each_xdp_txq(vsi, i) 2649efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i]) { 2650efc2214bSMaciej Fijalkowski kfree_rcu(vsi->xdp_rings[i], rcu); 2651efc2214bSMaciej Fijalkowski vsi->xdp_rings[i] = NULL; 2652efc2214bSMaciej Fijalkowski } 2653efc2214bSMaciej Fijalkowski 2654efc2214bSMaciej Fijalkowski err_map_xdp: 2655efc2214bSMaciej Fijalkowski mutex_lock(&pf->avail_q_mutex); 26562faf63b6SMaciej Fijalkowski ice_for_each_xdp_txq(vsi, i) { 2657efc2214bSMaciej Fijalkowski clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2658efc2214bSMaciej Fijalkowski vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2659efc2214bSMaciej Fijalkowski } 2660efc2214bSMaciej Fijalkowski mutex_unlock(&pf->avail_q_mutex); 2661efc2214bSMaciej Fijalkowski 26624015d11eSBrett Creeley devm_kfree(dev, vsi->xdp_rings); 2663efc2214bSMaciej Fijalkowski return -ENOMEM; 2664efc2214bSMaciej Fijalkowski } 2665efc2214bSMaciej Fijalkowski 2666efc2214bSMaciej Fijalkowski /** 2667efc2214bSMaciej Fijalkowski * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2668efc2214bSMaciej Fijalkowski * @vsi: VSI to remove XDP rings 2669efc2214bSMaciej Fijalkowski * 2670efc2214bSMaciej Fijalkowski * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2671efc2214bSMaciej Fijalkowski * resources 2672efc2214bSMaciej Fijalkowski */ 2673efc2214bSMaciej Fijalkowski int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2674efc2214bSMaciej Fijalkowski { 2675efc2214bSMaciej Fijalkowski u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2676efc2214bSMaciej Fijalkowski struct ice_pf *pf = vsi->back; 2677efc2214bSMaciej Fijalkowski int i, v_idx; 2678efc2214bSMaciej Fijalkowski 2679efc2214bSMaciej Fijalkowski /* q_vectors are freed in reset path so there's no point in detaching 2680ac382a09SBruce Allan * rings; in case of rebuild being triggered not from reset bits 2681efc2214bSMaciej Fijalkowski * in pf->state won't be set, so additionally check first q_vector 2682efc2214bSMaciej Fijalkowski * against NULL 2683efc2214bSMaciej Fijalkowski */ 2684efc2214bSMaciej Fijalkowski if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2685efc2214bSMaciej Fijalkowski goto free_qmap; 2686efc2214bSMaciej Fijalkowski 2687efc2214bSMaciej Fijalkowski ice_for_each_q_vector(vsi, v_idx) { 2688efc2214bSMaciej Fijalkowski struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2689e72bba21SMaciej Fijalkowski struct ice_tx_ring *ring; 2690efc2214bSMaciej Fijalkowski 2691e72bba21SMaciej Fijalkowski ice_for_each_tx_ring(ring, q_vector->tx) 2692efc2214bSMaciej Fijalkowski if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2693efc2214bSMaciej Fijalkowski break; 2694efc2214bSMaciej Fijalkowski 2695efc2214bSMaciej Fijalkowski /* restore the value of last node prior to XDP setup */ 2696e72bba21SMaciej Fijalkowski q_vector->tx.tx_ring = ring; 2697efc2214bSMaciej Fijalkowski } 2698efc2214bSMaciej Fijalkowski 2699efc2214bSMaciej Fijalkowski free_qmap: 2700efc2214bSMaciej Fijalkowski mutex_lock(&pf->avail_q_mutex); 27012faf63b6SMaciej Fijalkowski ice_for_each_xdp_txq(vsi, i) { 2702efc2214bSMaciej Fijalkowski clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2703efc2214bSMaciej Fijalkowski vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2704efc2214bSMaciej Fijalkowski } 2705efc2214bSMaciej Fijalkowski mutex_unlock(&pf->avail_q_mutex); 2706efc2214bSMaciej Fijalkowski 27072faf63b6SMaciej Fijalkowski ice_for_each_xdp_txq(vsi, i) 2708efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i]) { 2709efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i]->desc) 2710efc2214bSMaciej Fijalkowski ice_free_tx_ring(vsi->xdp_rings[i]); 2711efc2214bSMaciej Fijalkowski kfree_rcu(vsi->xdp_rings[i], rcu); 2712efc2214bSMaciej Fijalkowski vsi->xdp_rings[i] = NULL; 2713efc2214bSMaciej Fijalkowski } 2714efc2214bSMaciej Fijalkowski 27154015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2716efc2214bSMaciej Fijalkowski vsi->xdp_rings = NULL; 2717efc2214bSMaciej Fijalkowski 271822bf877eSMaciej Fijalkowski if (static_key_enabled(&ice_xdp_locking_key)) 271922bf877eSMaciej Fijalkowski static_branch_dec(&ice_xdp_locking_key); 272022bf877eSMaciej Fijalkowski 2721efc2214bSMaciej Fijalkowski if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2722efc2214bSMaciej Fijalkowski return 0; 2723efc2214bSMaciej Fijalkowski 2724efc2214bSMaciej Fijalkowski ice_vsi_assign_bpf_prog(vsi, NULL); 2725efc2214bSMaciej Fijalkowski 2726efc2214bSMaciej Fijalkowski /* notify Tx scheduler that we destroyed XDP queues and bring 2727efc2214bSMaciej Fijalkowski * back the old number of child nodes 2728efc2214bSMaciej Fijalkowski */ 2729efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->tc_cfg.numtc; i++) 2730efc2214bSMaciej Fijalkowski max_txqs[i] = vsi->num_txq; 2731efc2214bSMaciej Fijalkowski 2732c8f135c6SMarta Plantykow /* change number of XDP Tx queues to 0 */ 2733c8f135c6SMarta Plantykow vsi->num_xdp_txq = 0; 2734c8f135c6SMarta Plantykow 2735efc2214bSMaciej Fijalkowski return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2736efc2214bSMaciej Fijalkowski max_txqs); 2737efc2214bSMaciej Fijalkowski } 2738efc2214bSMaciej Fijalkowski 2739efc2214bSMaciej Fijalkowski /** 2740c7a21904SMichal Swiatkowski * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2741c7a21904SMichal Swiatkowski * @vsi: VSI to schedule napi on 2742c7a21904SMichal Swiatkowski */ 2743c7a21904SMichal Swiatkowski static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2744c7a21904SMichal Swiatkowski { 2745c7a21904SMichal Swiatkowski int i; 2746c7a21904SMichal Swiatkowski 2747c7a21904SMichal Swiatkowski ice_for_each_rxq(vsi, i) { 2748e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2749c7a21904SMichal Swiatkowski 2750c7a21904SMichal Swiatkowski if (rx_ring->xsk_pool) 2751c7a21904SMichal Swiatkowski napi_schedule(&rx_ring->q_vector->napi); 2752c7a21904SMichal Swiatkowski } 2753c7a21904SMichal Swiatkowski } 2754c7a21904SMichal Swiatkowski 2755c7a21904SMichal Swiatkowski /** 275622bf877eSMaciej Fijalkowski * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 275722bf877eSMaciej Fijalkowski * @vsi: VSI to determine the count of XDP Tx qs 275822bf877eSMaciej Fijalkowski * 275922bf877eSMaciej Fijalkowski * returns 0 if Tx qs count is higher than at least half of CPU count, 276022bf877eSMaciej Fijalkowski * -ENOMEM otherwise 276122bf877eSMaciej Fijalkowski */ 276222bf877eSMaciej Fijalkowski int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 276322bf877eSMaciej Fijalkowski { 276422bf877eSMaciej Fijalkowski u16 avail = ice_get_avail_txq_count(vsi->back); 276522bf877eSMaciej Fijalkowski u16 cpus = num_possible_cpus(); 276622bf877eSMaciej Fijalkowski 276722bf877eSMaciej Fijalkowski if (avail < cpus / 2) 276822bf877eSMaciej Fijalkowski return -ENOMEM; 276922bf877eSMaciej Fijalkowski 277022bf877eSMaciej Fijalkowski vsi->num_xdp_txq = min_t(u16, avail, cpus); 277122bf877eSMaciej Fijalkowski 277222bf877eSMaciej Fijalkowski if (vsi->num_xdp_txq < cpus) 277322bf877eSMaciej Fijalkowski static_branch_inc(&ice_xdp_locking_key); 277422bf877eSMaciej Fijalkowski 277522bf877eSMaciej Fijalkowski return 0; 277622bf877eSMaciej Fijalkowski } 277722bf877eSMaciej Fijalkowski 277822bf877eSMaciej Fijalkowski /** 2779efc2214bSMaciej Fijalkowski * ice_xdp_setup_prog - Add or remove XDP eBPF program 2780efc2214bSMaciej Fijalkowski * @vsi: VSI to setup XDP for 2781efc2214bSMaciej Fijalkowski * @prog: XDP program 2782efc2214bSMaciej Fijalkowski * @extack: netlink extended ack 2783efc2214bSMaciej Fijalkowski */ 2784efc2214bSMaciej Fijalkowski static int 2785efc2214bSMaciej Fijalkowski ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2786efc2214bSMaciej Fijalkowski struct netlink_ext_ack *extack) 2787efc2214bSMaciej Fijalkowski { 2788efc2214bSMaciej Fijalkowski int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2789efc2214bSMaciej Fijalkowski bool if_running = netif_running(vsi->netdev); 2790efc2214bSMaciej Fijalkowski int ret = 0, xdp_ring_err = 0; 2791efc2214bSMaciej Fijalkowski 2792efc2214bSMaciej Fijalkowski if (frame_size > vsi->rx_buf_len) { 2793efc2214bSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 2794efc2214bSMaciej Fijalkowski return -EOPNOTSUPP; 2795efc2214bSMaciej Fijalkowski } 2796efc2214bSMaciej Fijalkowski 2797efc2214bSMaciej Fijalkowski /* need to stop netdev while setting up the program for Rx rings */ 2798e97fb1aeSAnirudh Venkataramanan if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2799efc2214bSMaciej Fijalkowski ret = ice_down(vsi); 2800efc2214bSMaciej Fijalkowski if (ret) { 2801af23635aSJesse Brandeburg NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2802efc2214bSMaciej Fijalkowski return ret; 2803efc2214bSMaciej Fijalkowski } 2804efc2214bSMaciej Fijalkowski } 2805efc2214bSMaciej Fijalkowski 2806efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) && prog) { 280722bf877eSMaciej Fijalkowski xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 280822bf877eSMaciej Fijalkowski if (xdp_ring_err) { 280922bf877eSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 281022bf877eSMaciej Fijalkowski } else { 2811efc2214bSMaciej Fijalkowski xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2812efc2214bSMaciej Fijalkowski if (xdp_ring_err) 2813af23635aSJesse Brandeburg NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 281422bf877eSMaciej Fijalkowski } 2815efc2214bSMaciej Fijalkowski } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2816efc2214bSMaciej Fijalkowski xdp_ring_err = ice_destroy_xdp_rings(vsi); 2817efc2214bSMaciej Fijalkowski if (xdp_ring_err) 2818af23635aSJesse Brandeburg NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2819efc2214bSMaciej Fijalkowski } else { 2820f65ee535SMarta Plantykow /* safe to call even when prog == vsi->xdp_prog as 2821f65ee535SMarta Plantykow * dev_xdp_install in net/core/dev.c incremented prog's 2822f65ee535SMarta Plantykow * refcount so corresponding bpf_prog_put won't cause 2823f65ee535SMarta Plantykow * underflow 2824f65ee535SMarta Plantykow */ 2825efc2214bSMaciej Fijalkowski ice_vsi_assign_bpf_prog(vsi, prog); 2826efc2214bSMaciej Fijalkowski } 2827efc2214bSMaciej Fijalkowski 2828efc2214bSMaciej Fijalkowski if (if_running) 2829efc2214bSMaciej Fijalkowski ret = ice_up(vsi); 2830efc2214bSMaciej Fijalkowski 2831c7a21904SMichal Swiatkowski if (!ret && prog) 2832c7a21904SMichal Swiatkowski ice_vsi_rx_napi_schedule(vsi); 28332d4238f5SKrzysztof Kazimierczak 2834efc2214bSMaciej Fijalkowski return (ret || xdp_ring_err) ? -ENOMEM : 0; 2835efc2214bSMaciej Fijalkowski } 2836efc2214bSMaciej Fijalkowski 2837efc2214bSMaciej Fijalkowski /** 2838ebc5399eSMaciej Fijalkowski * ice_xdp_safe_mode - XDP handler for safe mode 2839ebc5399eSMaciej Fijalkowski * @dev: netdevice 2840ebc5399eSMaciej Fijalkowski * @xdp: XDP command 2841ebc5399eSMaciej Fijalkowski */ 2842ebc5399eSMaciej Fijalkowski static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 2843ebc5399eSMaciej Fijalkowski struct netdev_bpf *xdp) 2844ebc5399eSMaciej Fijalkowski { 2845ebc5399eSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(xdp->extack, 2846ebc5399eSMaciej Fijalkowski "Please provide working DDP firmware package in order to use XDP\n" 2847ebc5399eSMaciej Fijalkowski "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 2848ebc5399eSMaciej Fijalkowski return -EOPNOTSUPP; 2849ebc5399eSMaciej Fijalkowski } 2850ebc5399eSMaciej Fijalkowski 2851ebc5399eSMaciej Fijalkowski /** 2852efc2214bSMaciej Fijalkowski * ice_xdp - implements XDP handler 2853efc2214bSMaciej Fijalkowski * @dev: netdevice 2854efc2214bSMaciej Fijalkowski * @xdp: XDP command 2855efc2214bSMaciej Fijalkowski */ 2856efc2214bSMaciej Fijalkowski static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2857efc2214bSMaciej Fijalkowski { 2858efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 2859efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 2860efc2214bSMaciej Fijalkowski 2861efc2214bSMaciej Fijalkowski if (vsi->type != ICE_VSI_PF) { 2862af23635aSJesse Brandeburg NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 2863efc2214bSMaciej Fijalkowski return -EINVAL; 2864efc2214bSMaciej Fijalkowski } 2865efc2214bSMaciej Fijalkowski 2866efc2214bSMaciej Fijalkowski switch (xdp->command) { 2867efc2214bSMaciej Fijalkowski case XDP_SETUP_PROG: 2868efc2214bSMaciej Fijalkowski return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 28691742b3d5SMagnus Karlsson case XDP_SETUP_XSK_POOL: 28701742b3d5SMagnus Karlsson return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 28712d4238f5SKrzysztof Kazimierczak xdp->xsk.queue_id); 2872efc2214bSMaciej Fijalkowski default: 2873efc2214bSMaciej Fijalkowski return -EINVAL; 2874efc2214bSMaciej Fijalkowski } 2875efc2214bSMaciej Fijalkowski } 2876efc2214bSMaciej Fijalkowski 2877efc2214bSMaciej Fijalkowski /** 2878940b61afSAnirudh Venkataramanan * ice_ena_misc_vector - enable the non-queue interrupts 2879940b61afSAnirudh Venkataramanan * @pf: board private structure 2880940b61afSAnirudh Venkataramanan */ 2881940b61afSAnirudh Venkataramanan static void ice_ena_misc_vector(struct ice_pf *pf) 2882940b61afSAnirudh Venkataramanan { 2883940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 2884940b61afSAnirudh Venkataramanan u32 val; 2885940b61afSAnirudh Venkataramanan 28869d5c5a52SPaul Greenwalt /* Disable anti-spoof detection interrupt to prevent spurious event 28879d5c5a52SPaul Greenwalt * interrupts during a function reset. Anti-spoof functionally is 28889d5c5a52SPaul Greenwalt * still supported. 28899d5c5a52SPaul Greenwalt */ 28909d5c5a52SPaul Greenwalt val = rd32(hw, GL_MDCK_TX_TDPU); 28919d5c5a52SPaul Greenwalt val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 28929d5c5a52SPaul Greenwalt wr32(hw, GL_MDCK_TX_TDPU, val); 28939d5c5a52SPaul Greenwalt 2894940b61afSAnirudh Venkataramanan /* clear things first */ 2895940b61afSAnirudh Venkataramanan wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2896940b61afSAnirudh Venkataramanan rd32(hw, PFINT_OICR); /* read to clear */ 2897940b61afSAnirudh Venkataramanan 28983bcd7fa3SBruce Allan val = (PFINT_OICR_ECC_ERR_M | 2899940b61afSAnirudh Venkataramanan PFINT_OICR_MAL_DETECT_M | 2900940b61afSAnirudh Venkataramanan PFINT_OICR_GRST_M | 2901940b61afSAnirudh Venkataramanan PFINT_OICR_PCI_EXCEPTION_M | 2902007676b4SAnirudh Venkataramanan PFINT_OICR_VFLR_M | 29033bcd7fa3SBruce Allan PFINT_OICR_HMC_ERR_M | 2904348048e7SDave Ertman PFINT_OICR_PE_PUSH_M | 29053bcd7fa3SBruce Allan PFINT_OICR_PE_CRITERR_M); 2906940b61afSAnirudh Venkataramanan 2907940b61afSAnirudh Venkataramanan wr32(hw, PFINT_OICR_ENA, val); 2908940b61afSAnirudh Venkataramanan 2909940b61afSAnirudh Venkataramanan /* SW_ITR_IDX = 0, but don't change INTENA */ 2910cbe66bfeSBrett Creeley wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 2911940b61afSAnirudh Venkataramanan GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 2912940b61afSAnirudh Venkataramanan } 2913940b61afSAnirudh Venkataramanan 2914940b61afSAnirudh Venkataramanan /** 2915940b61afSAnirudh Venkataramanan * ice_misc_intr - misc interrupt handler 2916940b61afSAnirudh Venkataramanan * @irq: interrupt number 2917940b61afSAnirudh Venkataramanan * @data: pointer to a q_vector 2918940b61afSAnirudh Venkataramanan */ 2919940b61afSAnirudh Venkataramanan static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 2920940b61afSAnirudh Venkataramanan { 2921940b61afSAnirudh Venkataramanan struct ice_pf *pf = (struct ice_pf *)data; 2922940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 2923940b61afSAnirudh Venkataramanan irqreturn_t ret = IRQ_NONE; 29244015d11eSBrett Creeley struct device *dev; 2925940b61afSAnirudh Venkataramanan u32 oicr, ena_mask; 2926940b61afSAnirudh Venkataramanan 29274015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 29287e408e07SAnirudh Venkataramanan set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 29297e408e07SAnirudh Venkataramanan set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 29308f5ee3c4SJacob Keller set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 2931940b61afSAnirudh Venkataramanan 2932940b61afSAnirudh Venkataramanan oicr = rd32(hw, PFINT_OICR); 2933940b61afSAnirudh Venkataramanan ena_mask = rd32(hw, PFINT_OICR_ENA); 2934940b61afSAnirudh Venkataramanan 29350e674aebSAnirudh Venkataramanan if (oicr & PFINT_OICR_SWINT_M) { 29360e674aebSAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_SWINT_M; 29370e674aebSAnirudh Venkataramanan pf->sw_int_count++; 29380e674aebSAnirudh Venkataramanan } 29390e674aebSAnirudh Venkataramanan 2940b3969fd7SSudheer Mogilappagari if (oicr & PFINT_OICR_MAL_DETECT_M) { 2941b3969fd7SSudheer Mogilappagari ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 29427e408e07SAnirudh Venkataramanan set_bit(ICE_MDD_EVENT_PENDING, pf->state); 2943b3969fd7SSudheer Mogilappagari } 2944007676b4SAnirudh Venkataramanan if (oicr & PFINT_OICR_VFLR_M) { 2945f844d521SBrett Creeley /* disable any further VFLR event notifications */ 29467e408e07SAnirudh Venkataramanan if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 2947f844d521SBrett Creeley u32 reg = rd32(hw, PFINT_OICR_ENA); 2948f844d521SBrett Creeley 2949f844d521SBrett Creeley reg &= ~PFINT_OICR_VFLR_M; 2950f844d521SBrett Creeley wr32(hw, PFINT_OICR_ENA, reg); 2951f844d521SBrett Creeley } else { 2952007676b4SAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_VFLR_M; 29537e408e07SAnirudh Venkataramanan set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 2954007676b4SAnirudh Venkataramanan } 2955f844d521SBrett Creeley } 2956b3969fd7SSudheer Mogilappagari 29570b28b702SAnirudh Venkataramanan if (oicr & PFINT_OICR_GRST_M) { 29580b28b702SAnirudh Venkataramanan u32 reset; 2959b3969fd7SSudheer Mogilappagari 29600b28b702SAnirudh Venkataramanan /* we have a reset warning */ 29610b28b702SAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_GRST_M; 29620b28b702SAnirudh Venkataramanan reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 29630b28b702SAnirudh Venkataramanan GLGEN_RSTAT_RESET_TYPE_S; 29640b28b702SAnirudh Venkataramanan 29650b28b702SAnirudh Venkataramanan if (reset == ICE_RESET_CORER) 29660b28b702SAnirudh Venkataramanan pf->corer_count++; 29670b28b702SAnirudh Venkataramanan else if (reset == ICE_RESET_GLOBR) 29680b28b702SAnirudh Venkataramanan pf->globr_count++; 2969ca4929b6SBrett Creeley else if (reset == ICE_RESET_EMPR) 29700b28b702SAnirudh Venkataramanan pf->empr_count++; 2971ca4929b6SBrett Creeley else 29724015d11eSBrett Creeley dev_dbg(dev, "Invalid reset type %d\n", reset); 29730b28b702SAnirudh Venkataramanan 29740b28b702SAnirudh Venkataramanan /* If a reset cycle isn't already in progress, we set a bit in 29750b28b702SAnirudh Venkataramanan * pf->state so that the service task can start a reset/rebuild. 29760b28b702SAnirudh Venkataramanan */ 29777e408e07SAnirudh Venkataramanan if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 29780b28b702SAnirudh Venkataramanan if (reset == ICE_RESET_CORER) 29797e408e07SAnirudh Venkataramanan set_bit(ICE_CORER_RECV, pf->state); 29800b28b702SAnirudh Venkataramanan else if (reset == ICE_RESET_GLOBR) 29817e408e07SAnirudh Venkataramanan set_bit(ICE_GLOBR_RECV, pf->state); 29820b28b702SAnirudh Venkataramanan else 29837e408e07SAnirudh Venkataramanan set_bit(ICE_EMPR_RECV, pf->state); 29840b28b702SAnirudh Venkataramanan 2985fd2a9817SAnirudh Venkataramanan /* There are couple of different bits at play here. 2986fd2a9817SAnirudh Venkataramanan * hw->reset_ongoing indicates whether the hardware is 2987fd2a9817SAnirudh Venkataramanan * in reset. This is set to true when a reset interrupt 2988fd2a9817SAnirudh Venkataramanan * is received and set back to false after the driver 2989fd2a9817SAnirudh Venkataramanan * has determined that the hardware is out of reset. 2990fd2a9817SAnirudh Venkataramanan * 29917e408e07SAnirudh Venkataramanan * ICE_RESET_OICR_RECV in pf->state indicates 2992fd2a9817SAnirudh Venkataramanan * that a post reset rebuild is required before the 2993fd2a9817SAnirudh Venkataramanan * driver is operational again. This is set above. 2994fd2a9817SAnirudh Venkataramanan * 2995fd2a9817SAnirudh Venkataramanan * As this is the start of the reset/rebuild cycle, set 2996fd2a9817SAnirudh Venkataramanan * both to indicate that. 2997fd2a9817SAnirudh Venkataramanan */ 2998fd2a9817SAnirudh Venkataramanan hw->reset_ongoing = true; 29990b28b702SAnirudh Venkataramanan } 30000b28b702SAnirudh Venkataramanan } 30010b28b702SAnirudh Venkataramanan 3002ea9b847cSJacob Keller if (oicr & PFINT_OICR_TSYN_TX_M) { 3003ea9b847cSJacob Keller ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3004ea9b847cSJacob Keller ice_ptp_process_ts(pf); 3005ea9b847cSJacob Keller } 3006ea9b847cSJacob Keller 3007172db5f9SMaciej Machnikowski if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3008172db5f9SMaciej Machnikowski u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3009172db5f9SMaciej Machnikowski u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3010172db5f9SMaciej Machnikowski 3011172db5f9SMaciej Machnikowski /* Save EVENTs from GTSYN register */ 3012172db5f9SMaciej Machnikowski pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | 3013172db5f9SMaciej Machnikowski GLTSYN_STAT_EVENT1_M | 3014172db5f9SMaciej Machnikowski GLTSYN_STAT_EVENT2_M); 3015172db5f9SMaciej Machnikowski ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3016172db5f9SMaciej Machnikowski kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); 3017172db5f9SMaciej Machnikowski } 3018172db5f9SMaciej Machnikowski 3019348048e7SDave Ertman #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3020348048e7SDave Ertman if (oicr & ICE_AUX_CRIT_ERR) { 3021348048e7SDave Ertman struct iidc_event *event; 3022348048e7SDave Ertman 3023348048e7SDave Ertman ena_mask &= ~ICE_AUX_CRIT_ERR; 3024348048e7SDave Ertman event = kzalloc(sizeof(*event), GFP_KERNEL); 3025348048e7SDave Ertman if (event) { 3026348048e7SDave Ertman set_bit(IIDC_EVENT_CRIT_ERR, event->type); 3027348048e7SDave Ertman /* report the entire OICR value to AUX driver */ 3028348048e7SDave Ertman event->reg = oicr; 3029348048e7SDave Ertman ice_send_event_to_aux(pf, event); 3030348048e7SDave Ertman kfree(event); 3031348048e7SDave Ertman } 3032940b61afSAnirudh Venkataramanan } 3033940b61afSAnirudh Venkataramanan 30348d7189d2SMd Fahad Iqbal Polash /* Report any remaining unexpected interrupts */ 3035940b61afSAnirudh Venkataramanan oicr &= ena_mask; 3036940b61afSAnirudh Venkataramanan if (oicr) { 30374015d11eSBrett Creeley dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3038940b61afSAnirudh Venkataramanan /* If a critical error is pending there is no choice but to 3039940b61afSAnirudh Venkataramanan * reset the device. 3040940b61afSAnirudh Venkataramanan */ 3041348048e7SDave Ertman if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 30420b28b702SAnirudh Venkataramanan PFINT_OICR_ECC_ERR_M)) { 30437e408e07SAnirudh Venkataramanan set_bit(ICE_PFR_REQ, pf->state); 30440b28b702SAnirudh Venkataramanan ice_service_task_schedule(pf); 30450b28b702SAnirudh Venkataramanan } 3046940b61afSAnirudh Venkataramanan } 3047940b61afSAnirudh Venkataramanan ret = IRQ_HANDLED; 3048940b61afSAnirudh Venkataramanan 3049940b61afSAnirudh Venkataramanan ice_service_task_schedule(pf); 3050cdedef59SAnirudh Venkataramanan ice_irq_dynamic_ena(hw, NULL, NULL); 3051940b61afSAnirudh Venkataramanan 3052940b61afSAnirudh Venkataramanan return ret; 3053940b61afSAnirudh Venkataramanan } 3054940b61afSAnirudh Venkataramanan 3055940b61afSAnirudh Venkataramanan /** 30560e04e8e1SBrett Creeley * ice_dis_ctrlq_interrupts - disable control queue interrupts 30570e04e8e1SBrett Creeley * @hw: pointer to HW structure 30580e04e8e1SBrett Creeley */ 30590e04e8e1SBrett Creeley static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 30600e04e8e1SBrett Creeley { 30610e04e8e1SBrett Creeley /* disable Admin queue Interrupt causes */ 30620e04e8e1SBrett Creeley wr32(hw, PFINT_FW_CTL, 30630e04e8e1SBrett Creeley rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 30640e04e8e1SBrett Creeley 30650e04e8e1SBrett Creeley /* disable Mailbox queue Interrupt causes */ 30660e04e8e1SBrett Creeley wr32(hw, PFINT_MBX_CTL, 30670e04e8e1SBrett Creeley rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 30680e04e8e1SBrett Creeley 30698f5ee3c4SJacob Keller wr32(hw, PFINT_SB_CTL, 30708f5ee3c4SJacob Keller rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 30718f5ee3c4SJacob Keller 30720e04e8e1SBrett Creeley /* disable Control queue Interrupt causes */ 30730e04e8e1SBrett Creeley wr32(hw, PFINT_OICR_CTL, 30740e04e8e1SBrett Creeley rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 30750e04e8e1SBrett Creeley 30760e04e8e1SBrett Creeley ice_flush(hw); 30770e04e8e1SBrett Creeley } 30780e04e8e1SBrett Creeley 30790e04e8e1SBrett Creeley /** 3080940b61afSAnirudh Venkataramanan * ice_free_irq_msix_misc - Unroll misc vector setup 3081940b61afSAnirudh Venkataramanan * @pf: board private structure 3082940b61afSAnirudh Venkataramanan */ 3083940b61afSAnirudh Venkataramanan static void ice_free_irq_msix_misc(struct ice_pf *pf) 3084940b61afSAnirudh Venkataramanan { 30850e04e8e1SBrett Creeley struct ice_hw *hw = &pf->hw; 30860e04e8e1SBrett Creeley 30870e04e8e1SBrett Creeley ice_dis_ctrlq_interrupts(hw); 30880e04e8e1SBrett Creeley 3089940b61afSAnirudh Venkataramanan /* disable OICR interrupt */ 30900e04e8e1SBrett Creeley wr32(hw, PFINT_OICR_ENA, 0); 30910e04e8e1SBrett Creeley ice_flush(hw); 3092940b61afSAnirudh Venkataramanan 3093ba880734SBrett Creeley if (pf->msix_entries) { 3094cbe66bfeSBrett Creeley synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 30954015d11eSBrett Creeley devm_free_irq(ice_pf_to_dev(pf), 3096cbe66bfeSBrett Creeley pf->msix_entries[pf->oicr_idx].vector, pf); 3097940b61afSAnirudh Venkataramanan } 3098940b61afSAnirudh Venkataramanan 3099eb0208ecSPreethi Banala pf->num_avail_sw_msix += 1; 3100cbe66bfeSBrett Creeley ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 3101940b61afSAnirudh Venkataramanan } 3102940b61afSAnirudh Venkataramanan 3103940b61afSAnirudh Venkataramanan /** 31040e04e8e1SBrett Creeley * ice_ena_ctrlq_interrupts - enable control queue interrupts 31050e04e8e1SBrett Creeley * @hw: pointer to HW structure 3106b07833a0SBrett Creeley * @reg_idx: HW vector index to associate the control queue interrupts with 31070e04e8e1SBrett Creeley */ 3108b07833a0SBrett Creeley static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 31090e04e8e1SBrett Creeley { 31100e04e8e1SBrett Creeley u32 val; 31110e04e8e1SBrett Creeley 3112b07833a0SBrett Creeley val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 31130e04e8e1SBrett Creeley PFINT_OICR_CTL_CAUSE_ENA_M); 31140e04e8e1SBrett Creeley wr32(hw, PFINT_OICR_CTL, val); 31150e04e8e1SBrett Creeley 31160e04e8e1SBrett Creeley /* enable Admin queue Interrupt causes */ 3117b07833a0SBrett Creeley val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 31180e04e8e1SBrett Creeley PFINT_FW_CTL_CAUSE_ENA_M); 31190e04e8e1SBrett Creeley wr32(hw, PFINT_FW_CTL, val); 31200e04e8e1SBrett Creeley 31210e04e8e1SBrett Creeley /* enable Mailbox queue Interrupt causes */ 3122b07833a0SBrett Creeley val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 31230e04e8e1SBrett Creeley PFINT_MBX_CTL_CAUSE_ENA_M); 31240e04e8e1SBrett Creeley wr32(hw, PFINT_MBX_CTL, val); 31250e04e8e1SBrett Creeley 31268f5ee3c4SJacob Keller /* This enables Sideband queue Interrupt causes */ 31278f5ee3c4SJacob Keller val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 31288f5ee3c4SJacob Keller PFINT_SB_CTL_CAUSE_ENA_M); 31298f5ee3c4SJacob Keller wr32(hw, PFINT_SB_CTL, val); 31308f5ee3c4SJacob Keller 31310e04e8e1SBrett Creeley ice_flush(hw); 31320e04e8e1SBrett Creeley } 31330e04e8e1SBrett Creeley 31340e04e8e1SBrett Creeley /** 3135940b61afSAnirudh Venkataramanan * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3136940b61afSAnirudh Venkataramanan * @pf: board private structure 3137940b61afSAnirudh Venkataramanan * 3138940b61afSAnirudh Venkataramanan * This sets up the handler for MSIX 0, which is used to manage the 3139940b61afSAnirudh Venkataramanan * non-queue interrupts, e.g. AdminQ and errors. This is not used 3140940b61afSAnirudh Venkataramanan * when in MSI or Legacy interrupt mode. 3141940b61afSAnirudh Venkataramanan */ 3142940b61afSAnirudh Venkataramanan static int ice_req_irq_msix_misc(struct ice_pf *pf) 3143940b61afSAnirudh Venkataramanan { 31444015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 3145940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 3146940b61afSAnirudh Venkataramanan int oicr_idx, err = 0; 3147940b61afSAnirudh Venkataramanan 3148940b61afSAnirudh Venkataramanan if (!pf->int_name[0]) 3149940b61afSAnirudh Venkataramanan snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 31504015d11eSBrett Creeley dev_driver_string(dev), dev_name(dev)); 3151940b61afSAnirudh Venkataramanan 31520b28b702SAnirudh Venkataramanan /* Do not request IRQ but do enable OICR interrupt since settings are 31530b28b702SAnirudh Venkataramanan * lost during reset. Note that this function is called only during 31540b28b702SAnirudh Venkataramanan * rebuild path and not while reset is in progress. 31550b28b702SAnirudh Venkataramanan */ 31565df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state)) 31570b28b702SAnirudh Venkataramanan goto skip_req_irq; 31580b28b702SAnirudh Venkataramanan 3159cbe66bfeSBrett Creeley /* reserve one vector in irq_tracker for misc interrupts */ 3160cbe66bfeSBrett Creeley oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3161940b61afSAnirudh Venkataramanan if (oicr_idx < 0) 3162940b61afSAnirudh Venkataramanan return oicr_idx; 3163940b61afSAnirudh Venkataramanan 3164eb0208ecSPreethi Banala pf->num_avail_sw_msix -= 1; 316588865fc4SKarol Kolacinski pf->oicr_idx = (u16)oicr_idx; 3166940b61afSAnirudh Venkataramanan 31674015d11eSBrett Creeley err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, 3168940b61afSAnirudh Venkataramanan ice_misc_intr, 0, pf->int_name, pf); 3169940b61afSAnirudh Venkataramanan if (err) { 31704015d11eSBrett Creeley dev_err(dev, "devm_request_irq for %s failed: %d\n", 3171940b61afSAnirudh Venkataramanan pf->int_name, err); 3172cbe66bfeSBrett Creeley ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3173eb0208ecSPreethi Banala pf->num_avail_sw_msix += 1; 3174940b61afSAnirudh Venkataramanan return err; 3175940b61afSAnirudh Venkataramanan } 3176940b61afSAnirudh Venkataramanan 31770b28b702SAnirudh Venkataramanan skip_req_irq: 3178940b61afSAnirudh Venkataramanan ice_ena_misc_vector(pf); 3179940b61afSAnirudh Venkataramanan 3180cbe66bfeSBrett Creeley ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 3181cbe66bfeSBrett Creeley wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 318263f545edSBrett Creeley ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3183940b61afSAnirudh Venkataramanan 3184940b61afSAnirudh Venkataramanan ice_flush(hw); 3185cdedef59SAnirudh Venkataramanan ice_irq_dynamic_ena(hw, NULL, NULL); 3186940b61afSAnirudh Venkataramanan 3187940b61afSAnirudh Venkataramanan return 0; 3188940b61afSAnirudh Venkataramanan } 3189940b61afSAnirudh Venkataramanan 3190940b61afSAnirudh Venkataramanan /** 3191df0f8479SAnirudh Venkataramanan * ice_napi_add - register NAPI handler for the VSI 3192df0f8479SAnirudh Venkataramanan * @vsi: VSI for which NAPI handler is to be registered 3193df0f8479SAnirudh Venkataramanan * 3194df0f8479SAnirudh Venkataramanan * This function is only called in the driver's load path. Registering the NAPI 3195df0f8479SAnirudh Venkataramanan * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 3196df0f8479SAnirudh Venkataramanan * reset/rebuild, etc.) 3197df0f8479SAnirudh Venkataramanan */ 3198df0f8479SAnirudh Venkataramanan static void ice_napi_add(struct ice_vsi *vsi) 3199df0f8479SAnirudh Venkataramanan { 3200df0f8479SAnirudh Venkataramanan int v_idx; 3201df0f8479SAnirudh Venkataramanan 3202df0f8479SAnirudh Venkataramanan if (!vsi->netdev) 3203df0f8479SAnirudh Venkataramanan return; 3204df0f8479SAnirudh Venkataramanan 32050c2561c8SBrett Creeley ice_for_each_q_vector(vsi, v_idx) 3206df0f8479SAnirudh Venkataramanan netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 3207df0f8479SAnirudh Venkataramanan ice_napi_poll, NAPI_POLL_WEIGHT); 3208df0f8479SAnirudh Venkataramanan } 3209df0f8479SAnirudh Venkataramanan 3210df0f8479SAnirudh Venkataramanan /** 3211462acf6aSTony Nguyen * ice_set_ops - set netdev and ethtools ops for the given netdev 3212462acf6aSTony Nguyen * @netdev: netdev instance 32133a858ba3SAnirudh Venkataramanan */ 3214462acf6aSTony Nguyen static void ice_set_ops(struct net_device *netdev) 32153a858ba3SAnirudh Venkataramanan { 3216462acf6aSTony Nguyen struct ice_pf *pf = ice_netdev_to_pf(netdev); 3217462acf6aSTony Nguyen 3218462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 3219462acf6aSTony Nguyen netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3220462acf6aSTony Nguyen ice_set_ethtool_safe_mode_ops(netdev); 3221462acf6aSTony Nguyen return; 3222462acf6aSTony Nguyen } 3223462acf6aSTony Nguyen 3224462acf6aSTony Nguyen netdev->netdev_ops = &ice_netdev_ops; 3225b20e6c17SJakub Kicinski netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3226462acf6aSTony Nguyen ice_set_ethtool_ops(netdev); 3227462acf6aSTony Nguyen } 3228462acf6aSTony Nguyen 3229462acf6aSTony Nguyen /** 3230462acf6aSTony Nguyen * ice_set_netdev_features - set features for the given netdev 3231462acf6aSTony Nguyen * @netdev: netdev instance 3232462acf6aSTony Nguyen */ 3233462acf6aSTony Nguyen static void ice_set_netdev_features(struct net_device *netdev) 3234462acf6aSTony Nguyen { 3235462acf6aSTony Nguyen struct ice_pf *pf = ice_netdev_to_pf(netdev); 3236d76a60baSAnirudh Venkataramanan netdev_features_t csumo_features; 3237d76a60baSAnirudh Venkataramanan netdev_features_t vlano_features; 3238d76a60baSAnirudh Venkataramanan netdev_features_t dflt_features; 3239d76a60baSAnirudh Venkataramanan netdev_features_t tso_features; 32403a858ba3SAnirudh Venkataramanan 3241462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 3242462acf6aSTony Nguyen /* safe mode */ 3243462acf6aSTony Nguyen netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3244462acf6aSTony Nguyen netdev->hw_features = netdev->features; 3245462acf6aSTony Nguyen return; 3246462acf6aSTony Nguyen } 32473a858ba3SAnirudh Venkataramanan 3248d76a60baSAnirudh Venkataramanan dflt_features = NETIF_F_SG | 32493a858ba3SAnirudh Venkataramanan NETIF_F_HIGHDMA | 3250148beb61SHenry Tieman NETIF_F_NTUPLE | 32513a858ba3SAnirudh Venkataramanan NETIF_F_RXHASH; 32523a858ba3SAnirudh Venkataramanan 3253d76a60baSAnirudh Venkataramanan csumo_features = NETIF_F_RXCSUM | 3254d76a60baSAnirudh Venkataramanan NETIF_F_IP_CSUM | 3255cf909e19SAnirudh Venkataramanan NETIF_F_SCTP_CRC | 3256d76a60baSAnirudh Venkataramanan NETIF_F_IPV6_CSUM; 3257d76a60baSAnirudh Venkataramanan 3258d76a60baSAnirudh Venkataramanan vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3259d76a60baSAnirudh Venkataramanan NETIF_F_HW_VLAN_CTAG_TX | 3260d76a60baSAnirudh Venkataramanan NETIF_F_HW_VLAN_CTAG_RX; 3261d76a60baSAnirudh Venkataramanan 3262a54e3b8cSBrett Creeley tso_features = NETIF_F_TSO | 3263a4e82a81STony Nguyen NETIF_F_TSO_ECN | 3264a4e82a81STony Nguyen NETIF_F_TSO6 | 3265a4e82a81STony Nguyen NETIF_F_GSO_GRE | 3266a4e82a81STony Nguyen NETIF_F_GSO_UDP_TUNNEL | 3267a4e82a81STony Nguyen NETIF_F_GSO_GRE_CSUM | 3268a4e82a81STony Nguyen NETIF_F_GSO_UDP_TUNNEL_CSUM | 3269a4e82a81STony Nguyen NETIF_F_GSO_PARTIAL | 3270a4e82a81STony Nguyen NETIF_F_GSO_IPXIP4 | 3271a4e82a81STony Nguyen NETIF_F_GSO_IPXIP6 | 3272a54e3b8cSBrett Creeley NETIF_F_GSO_UDP_L4; 3273d76a60baSAnirudh Venkataramanan 3274a4e82a81STony Nguyen netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3275a4e82a81STony Nguyen NETIF_F_GSO_GRE_CSUM; 3276d76a60baSAnirudh Venkataramanan /* set features that user can change */ 3277d76a60baSAnirudh Venkataramanan netdev->hw_features = dflt_features | csumo_features | 3278d76a60baSAnirudh Venkataramanan vlano_features | tso_features; 3279d76a60baSAnirudh Venkataramanan 3280a4e82a81STony Nguyen /* add support for HW_CSUM on packets with MPLS header */ 3281a4e82a81STony Nguyen netdev->mpls_features = NETIF_F_HW_CSUM; 3282a4e82a81STony Nguyen 32833a858ba3SAnirudh Venkataramanan /* enable features */ 32843a858ba3SAnirudh Venkataramanan netdev->features |= netdev->hw_features; 32850d08a441SKiran Patil 32860d08a441SKiran Patil netdev->hw_features |= NETIF_F_HW_TC; 32870d08a441SKiran Patil 3288d76a60baSAnirudh Venkataramanan /* encap and VLAN devices inherit default, csumo and tso features */ 3289d76a60baSAnirudh Venkataramanan netdev->hw_enc_features |= dflt_features | csumo_features | 3290d76a60baSAnirudh Venkataramanan tso_features; 3291d76a60baSAnirudh Venkataramanan netdev->vlan_features |= dflt_features | csumo_features | 3292d76a60baSAnirudh Venkataramanan tso_features; 3293462acf6aSTony Nguyen } 3294462acf6aSTony Nguyen 3295462acf6aSTony Nguyen /** 3296462acf6aSTony Nguyen * ice_cfg_netdev - Allocate, configure and register a netdev 3297462acf6aSTony Nguyen * @vsi: the VSI associated with the new netdev 3298462acf6aSTony Nguyen * 3299462acf6aSTony Nguyen * Returns 0 on success, negative value on failure 3300462acf6aSTony Nguyen */ 3301462acf6aSTony Nguyen static int ice_cfg_netdev(struct ice_vsi *vsi) 3302462acf6aSTony Nguyen { 3303462acf6aSTony Nguyen struct ice_netdev_priv *np; 3304462acf6aSTony Nguyen struct net_device *netdev; 3305462acf6aSTony Nguyen u8 mac_addr[ETH_ALEN]; 33061adf7eadSJacob Keller 3307462acf6aSTony Nguyen netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 3308462acf6aSTony Nguyen vsi->alloc_rxq); 33091e23f076SAnirudh Venkataramanan if (!netdev) 33101e23f076SAnirudh Venkataramanan return -ENOMEM; 3311462acf6aSTony Nguyen 3312a476d72aSAnirudh Venkataramanan set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3313462acf6aSTony Nguyen vsi->netdev = netdev; 3314462acf6aSTony Nguyen np = netdev_priv(netdev); 3315462acf6aSTony Nguyen np->vsi = vsi; 3316462acf6aSTony Nguyen 3317462acf6aSTony Nguyen ice_set_netdev_features(netdev); 3318462acf6aSTony Nguyen 3319462acf6aSTony Nguyen ice_set_ops(netdev); 33203a858ba3SAnirudh Venkataramanan 33213a858ba3SAnirudh Venkataramanan if (vsi->type == ICE_VSI_PF) { 3322c73bf3bdSPaul M Stillwell Jr SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 33233a858ba3SAnirudh Venkataramanan ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3324f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, mac_addr); 33253a858ba3SAnirudh Venkataramanan ether_addr_copy(netdev->perm_addr, mac_addr); 33263a858ba3SAnirudh Venkataramanan } 33273a858ba3SAnirudh Venkataramanan 33283a858ba3SAnirudh Venkataramanan netdev->priv_flags |= IFF_UNICAST_FLT; 33293a858ba3SAnirudh Venkataramanan 3330462acf6aSTony Nguyen /* Setup netdev TC information */ 3331462acf6aSTony Nguyen ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 3332cdedef59SAnirudh Venkataramanan 33333a858ba3SAnirudh Venkataramanan /* setup watchdog timeout value to be 5 second */ 33343a858ba3SAnirudh Venkataramanan netdev->watchdog_timeo = 5 * HZ; 33353a858ba3SAnirudh Venkataramanan 33363a858ba3SAnirudh Venkataramanan netdev->min_mtu = ETH_MIN_MTU; 33373a858ba3SAnirudh Venkataramanan netdev->max_mtu = ICE_MAX_MTU; 33383a858ba3SAnirudh Venkataramanan 33393a858ba3SAnirudh Venkataramanan return 0; 33403a858ba3SAnirudh Venkataramanan } 33413a858ba3SAnirudh Venkataramanan 33423a858ba3SAnirudh Venkataramanan /** 3343d76a60baSAnirudh Venkataramanan * ice_fill_rss_lut - Fill the RSS lookup table with default values 3344d76a60baSAnirudh Venkataramanan * @lut: Lookup table 3345d76a60baSAnirudh Venkataramanan * @rss_table_size: Lookup table size 3346d76a60baSAnirudh Venkataramanan * @rss_size: Range of queue number for hashing 3347d76a60baSAnirudh Venkataramanan */ 3348d76a60baSAnirudh Venkataramanan void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3349d76a60baSAnirudh Venkataramanan { 3350d76a60baSAnirudh Venkataramanan u16 i; 3351d76a60baSAnirudh Venkataramanan 3352d76a60baSAnirudh Venkataramanan for (i = 0; i < rss_table_size; i++) 3353d76a60baSAnirudh Venkataramanan lut[i] = i % rss_size; 3354d76a60baSAnirudh Venkataramanan } 3355d76a60baSAnirudh Venkataramanan 3356d76a60baSAnirudh Venkataramanan /** 33570f9d5027SAnirudh Venkataramanan * ice_pf_vsi_setup - Set up a PF VSI 33580f9d5027SAnirudh Venkataramanan * @pf: board private structure 33590f9d5027SAnirudh Venkataramanan * @pi: pointer to the port_info instance 33600f9d5027SAnirudh Venkataramanan * 33610e674aebSAnirudh Venkataramanan * Returns pointer to the successfully allocated VSI software struct 33620e674aebSAnirudh Venkataramanan * on success, otherwise returns NULL on failure. 33630f9d5027SAnirudh Venkataramanan */ 33640f9d5027SAnirudh Venkataramanan static struct ice_vsi * 33650f9d5027SAnirudh Venkataramanan ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 33660f9d5027SAnirudh Venkataramanan { 33670754d65bSKiran Patil return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL); 33680f9d5027SAnirudh Venkataramanan } 33690f9d5027SAnirudh Venkataramanan 3370fbc7b27aSKiran Patil static struct ice_vsi * 3371fbc7b27aSKiran Patil ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3372fbc7b27aSKiran Patil struct ice_channel *ch) 3373fbc7b27aSKiran Patil { 3374fbc7b27aSKiran Patil return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch); 3375fbc7b27aSKiran Patil } 3376fbc7b27aSKiran Patil 33770f9d5027SAnirudh Venkataramanan /** 3378148beb61SHenry Tieman * ice_ctrl_vsi_setup - Set up a control VSI 3379148beb61SHenry Tieman * @pf: board private structure 3380148beb61SHenry Tieman * @pi: pointer to the port_info instance 3381148beb61SHenry Tieman * 3382148beb61SHenry Tieman * Returns pointer to the successfully allocated VSI software struct 3383148beb61SHenry Tieman * on success, otherwise returns NULL on failure. 3384148beb61SHenry Tieman */ 3385148beb61SHenry Tieman static struct ice_vsi * 3386148beb61SHenry Tieman ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3387148beb61SHenry Tieman { 33880754d65bSKiran Patil return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL); 3389148beb61SHenry Tieman } 3390148beb61SHenry Tieman 3391148beb61SHenry Tieman /** 33920e674aebSAnirudh Venkataramanan * ice_lb_vsi_setup - Set up a loopback VSI 33930e674aebSAnirudh Venkataramanan * @pf: board private structure 33940e674aebSAnirudh Venkataramanan * @pi: pointer to the port_info instance 33950e674aebSAnirudh Venkataramanan * 33960e674aebSAnirudh Venkataramanan * Returns pointer to the successfully allocated VSI software struct 33970e674aebSAnirudh Venkataramanan * on success, otherwise returns NULL on failure. 33980e674aebSAnirudh Venkataramanan */ 33990e674aebSAnirudh Venkataramanan struct ice_vsi * 34000e674aebSAnirudh Venkataramanan ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 34010e674aebSAnirudh Venkataramanan { 34020754d65bSKiran Patil return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL); 34030e674aebSAnirudh Venkataramanan } 34040e674aebSAnirudh Venkataramanan 34050e674aebSAnirudh Venkataramanan /** 3406f9867df6SAnirudh Venkataramanan * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3407d76a60baSAnirudh Venkataramanan * @netdev: network interface to be adjusted 3408d76a60baSAnirudh Venkataramanan * @proto: unused protocol 3409f9867df6SAnirudh Venkataramanan * @vid: VLAN ID to be added 3410d76a60baSAnirudh Venkataramanan * 3411f9867df6SAnirudh Venkataramanan * net_device_ops implementation for adding VLAN IDs 3412d76a60baSAnirudh Venkataramanan */ 3413c8b7abddSBruce Allan static int 3414c8b7abddSBruce Allan ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, 3415c8b7abddSBruce Allan u16 vid) 3416d76a60baSAnirudh Venkataramanan { 3417d76a60baSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 3418d76a60baSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 34195eda8afdSAkeem G Abodunrin int ret; 3420d76a60baSAnirudh Venkataramanan 342142f3efefSBrett Creeley /* VLAN 0 is added by default during load/reset */ 342242f3efefSBrett Creeley if (!vid) 342342f3efefSBrett Creeley return 0; 342442f3efefSBrett Creeley 342542f3efefSBrett Creeley /* Enable VLAN pruning when a VLAN other than 0 is added */ 342642f3efefSBrett Creeley if (!ice_vsi_is_vlan_pruning_ena(vsi)) { 342729e71f41SBrett Creeley ret = ice_cfg_vlan_pruning(vsi, true); 34284f74dcc1SBrett Creeley if (ret) 34294f74dcc1SBrett Creeley return ret; 34304f74dcc1SBrett Creeley } 34314f74dcc1SBrett Creeley 343242f3efefSBrett Creeley /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 343342f3efefSBrett Creeley * packets aren't pruned by the device's internal switch on Rx 3434d76a60baSAnirudh Venkataramanan */ 34351b8f15b6SMichal Swiatkowski ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); 3436bcf68ea1SNick Nunley if (!ret) 3437e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 34385eda8afdSAkeem G Abodunrin 34395eda8afdSAkeem G Abodunrin return ret; 3440d76a60baSAnirudh Venkataramanan } 3441d76a60baSAnirudh Venkataramanan 3442d76a60baSAnirudh Venkataramanan /** 3443f9867df6SAnirudh Venkataramanan * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3444d76a60baSAnirudh Venkataramanan * @netdev: network interface to be adjusted 3445d76a60baSAnirudh Venkataramanan * @proto: unused protocol 3446f9867df6SAnirudh Venkataramanan * @vid: VLAN ID to be removed 3447d76a60baSAnirudh Venkataramanan * 3448f9867df6SAnirudh Venkataramanan * net_device_ops implementation for removing VLAN IDs 3449d76a60baSAnirudh Venkataramanan */ 3450c8b7abddSBruce Allan static int 3451c8b7abddSBruce Allan ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, 3452c8b7abddSBruce Allan u16 vid) 3453d76a60baSAnirudh Venkataramanan { 3454d76a60baSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 3455d76a60baSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 34565eda8afdSAkeem G Abodunrin int ret; 3457d76a60baSAnirudh Venkataramanan 345842f3efefSBrett Creeley /* don't allow removal of VLAN 0 */ 345942f3efefSBrett Creeley if (!vid) 346042f3efefSBrett Creeley return 0; 346142f3efefSBrett Creeley 34624f74dcc1SBrett Creeley /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 34634f74dcc1SBrett Creeley * information 3464d76a60baSAnirudh Venkataramanan */ 34655eda8afdSAkeem G Abodunrin ret = ice_vsi_kill_vlan(vsi, vid); 34665eda8afdSAkeem G Abodunrin if (ret) 34675eda8afdSAkeem G Abodunrin return ret; 3468d76a60baSAnirudh Venkataramanan 346942f3efefSBrett Creeley /* Disable pruning when VLAN 0 is the only VLAN rule */ 347042f3efefSBrett Creeley if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) 347129e71f41SBrett Creeley ret = ice_cfg_vlan_pruning(vsi, false); 34724f74dcc1SBrett Creeley 3473e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 34745eda8afdSAkeem G Abodunrin return ret; 3475d76a60baSAnirudh Venkataramanan } 3476d76a60baSAnirudh Venkataramanan 3477d76a60baSAnirudh Venkataramanan /** 3478195bb48fSMichal Swiatkowski * ice_rep_indr_tc_block_unbind 3479195bb48fSMichal Swiatkowski * @cb_priv: indirection block private data 3480195bb48fSMichal Swiatkowski */ 3481195bb48fSMichal Swiatkowski static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3482195bb48fSMichal Swiatkowski { 3483195bb48fSMichal Swiatkowski struct ice_indr_block_priv *indr_priv = cb_priv; 3484195bb48fSMichal Swiatkowski 3485195bb48fSMichal Swiatkowski list_del(&indr_priv->list); 3486195bb48fSMichal Swiatkowski kfree(indr_priv); 3487195bb48fSMichal Swiatkowski } 3488195bb48fSMichal Swiatkowski 3489195bb48fSMichal Swiatkowski /** 3490195bb48fSMichal Swiatkowski * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3491195bb48fSMichal Swiatkowski * @vsi: VSI struct which has the netdev 3492195bb48fSMichal Swiatkowski */ 3493195bb48fSMichal Swiatkowski static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3494195bb48fSMichal Swiatkowski { 3495195bb48fSMichal Swiatkowski struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3496195bb48fSMichal Swiatkowski 3497195bb48fSMichal Swiatkowski flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3498195bb48fSMichal Swiatkowski ice_rep_indr_tc_block_unbind); 3499195bb48fSMichal Swiatkowski } 3500195bb48fSMichal Swiatkowski 3501195bb48fSMichal Swiatkowski /** 3502195bb48fSMichal Swiatkowski * ice_tc_indir_block_remove - clean indirect TC block notifications 3503195bb48fSMichal Swiatkowski * @pf: PF structure 3504195bb48fSMichal Swiatkowski */ 3505195bb48fSMichal Swiatkowski static void ice_tc_indir_block_remove(struct ice_pf *pf) 3506195bb48fSMichal Swiatkowski { 3507195bb48fSMichal Swiatkowski struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 3508195bb48fSMichal Swiatkowski 3509195bb48fSMichal Swiatkowski if (!pf_vsi) 3510195bb48fSMichal Swiatkowski return; 3511195bb48fSMichal Swiatkowski 3512195bb48fSMichal Swiatkowski ice_tc_indir_block_unregister(pf_vsi); 3513195bb48fSMichal Swiatkowski } 3514195bb48fSMichal Swiatkowski 3515195bb48fSMichal Swiatkowski /** 3516195bb48fSMichal Swiatkowski * ice_tc_indir_block_register - Register TC indirect block notifications 3517195bb48fSMichal Swiatkowski * @vsi: VSI struct which has the netdev 3518195bb48fSMichal Swiatkowski * 3519195bb48fSMichal Swiatkowski * Returns 0 on success, negative value on failure 3520195bb48fSMichal Swiatkowski */ 3521195bb48fSMichal Swiatkowski static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3522195bb48fSMichal Swiatkowski { 3523195bb48fSMichal Swiatkowski struct ice_netdev_priv *np; 3524195bb48fSMichal Swiatkowski 3525195bb48fSMichal Swiatkowski if (!vsi || !vsi->netdev) 3526195bb48fSMichal Swiatkowski return -EINVAL; 3527195bb48fSMichal Swiatkowski 3528195bb48fSMichal Swiatkowski np = netdev_priv(vsi->netdev); 3529195bb48fSMichal Swiatkowski 3530195bb48fSMichal Swiatkowski INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3531195bb48fSMichal Swiatkowski return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3532195bb48fSMichal Swiatkowski } 3533195bb48fSMichal Swiatkowski 3534195bb48fSMichal Swiatkowski /** 35353a858ba3SAnirudh Venkataramanan * ice_setup_pf_sw - Setup the HW switch on startup or after reset 35363a858ba3SAnirudh Venkataramanan * @pf: board private structure 35373a858ba3SAnirudh Venkataramanan * 35383a858ba3SAnirudh Venkataramanan * Returns 0 on success, negative value on failure 35393a858ba3SAnirudh Venkataramanan */ 35403a858ba3SAnirudh Venkataramanan static int ice_setup_pf_sw(struct ice_pf *pf) 35413a858ba3SAnirudh Venkataramanan { 3542195bb48fSMichal Swiatkowski struct device *dev = ice_pf_to_dev(pf); 35433a858ba3SAnirudh Venkataramanan struct ice_vsi *vsi; 35442ccc1c1cSTony Nguyen int status; 35453a858ba3SAnirudh Venkataramanan 35465df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state)) 35470f9d5027SAnirudh Venkataramanan return -EBUSY; 35480f9d5027SAnirudh Venkataramanan 35490f9d5027SAnirudh Venkataramanan vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 3550135f4b9eSJacob Keller if (!vsi) 3551135f4b9eSJacob Keller return -ENOMEM; 35523a858ba3SAnirudh Venkataramanan 3553fbc7b27aSKiran Patil /* init channel list */ 3554fbc7b27aSKiran Patil INIT_LIST_HEAD(&vsi->ch_list); 3555fbc7b27aSKiran Patil 3556df0f8479SAnirudh Venkataramanan status = ice_cfg_netdev(vsi); 3557c1484691STony Nguyen if (status) 3558df0f8479SAnirudh Venkataramanan goto unroll_vsi_setup; 3559efc2214bSMaciej Fijalkowski /* netdev has to be configured before setting frame size */ 3560efc2214bSMaciej Fijalkowski ice_vsi_cfg_frame_size(vsi); 3561df0f8479SAnirudh Venkataramanan 3562195bb48fSMichal Swiatkowski /* init indirect block notifications */ 3563195bb48fSMichal Swiatkowski status = ice_tc_indir_block_register(vsi); 3564195bb48fSMichal Swiatkowski if (status) { 3565195bb48fSMichal Swiatkowski dev_err(dev, "Failed to register netdev notifier\n"); 3566195bb48fSMichal Swiatkowski goto unroll_cfg_netdev; 3567195bb48fSMichal Swiatkowski } 3568195bb48fSMichal Swiatkowski 3569b94b013eSDave Ertman /* Setup DCB netlink interface */ 3570b94b013eSDave Ertman ice_dcbnl_setup(vsi); 3571b94b013eSDave Ertman 3572df0f8479SAnirudh Venkataramanan /* registering the NAPI handler requires both the queues and 3573df0f8479SAnirudh Venkataramanan * netdev to be created, which are done in ice_pf_vsi_setup() 3574df0f8479SAnirudh Venkataramanan * and ice_cfg_netdev() respectively 3575df0f8479SAnirudh Venkataramanan */ 3576df0f8479SAnirudh Venkataramanan ice_napi_add(vsi); 3577df0f8479SAnirudh Venkataramanan 357828bf2672SBrett Creeley status = ice_set_cpu_rx_rmap(vsi); 357928bf2672SBrett Creeley if (status) { 3580195bb48fSMichal Swiatkowski dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", 358128bf2672SBrett Creeley vsi->vsi_num, status); 358228bf2672SBrett Creeley goto unroll_napi_add; 358328bf2672SBrett Creeley } 3584561f4379STony Nguyen status = ice_init_mac_fltr(pf); 35859daf8208SAnirudh Venkataramanan if (status) 358628bf2672SBrett Creeley goto free_cpu_rx_map; 35879daf8208SAnirudh Venkataramanan 35882ccc1c1cSTony Nguyen return 0; 35899daf8208SAnirudh Venkataramanan 359028bf2672SBrett Creeley free_cpu_rx_map: 359128bf2672SBrett Creeley ice_free_cpu_rx_rmap(vsi); 3592df0f8479SAnirudh Venkataramanan unroll_napi_add: 3593195bb48fSMichal Swiatkowski ice_tc_indir_block_unregister(vsi); 3594195bb48fSMichal Swiatkowski unroll_cfg_netdev: 35953a858ba3SAnirudh Venkataramanan if (vsi) { 3596df0f8479SAnirudh Venkataramanan ice_napi_del(vsi); 35973a858ba3SAnirudh Venkataramanan if (vsi->netdev) { 3598a476d72aSAnirudh Venkataramanan clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 35993a858ba3SAnirudh Venkataramanan free_netdev(vsi->netdev); 36003a858ba3SAnirudh Venkataramanan vsi->netdev = NULL; 36013a858ba3SAnirudh Venkataramanan } 3602df0f8479SAnirudh Venkataramanan } 36039daf8208SAnirudh Venkataramanan 3604df0f8479SAnirudh Venkataramanan unroll_vsi_setup: 3605135f4b9eSJacob Keller ice_vsi_release(vsi); 36063a858ba3SAnirudh Venkataramanan return status; 36073a858ba3SAnirudh Venkataramanan } 36083a858ba3SAnirudh Venkataramanan 36093a858ba3SAnirudh Venkataramanan /** 36108c243700SAnirudh Venkataramanan * ice_get_avail_q_count - Get count of queues in use 36118c243700SAnirudh Venkataramanan * @pf_qmap: bitmap to get queue use count from 36128c243700SAnirudh Venkataramanan * @lock: pointer to a mutex that protects access to pf_qmap 36138c243700SAnirudh Venkataramanan * @size: size of the bitmap 3614940b61afSAnirudh Venkataramanan */ 36158c243700SAnirudh Venkataramanan static u16 36168c243700SAnirudh Venkataramanan ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3617940b61afSAnirudh Venkataramanan { 361888865fc4SKarol Kolacinski unsigned long bit; 361988865fc4SKarol Kolacinski u16 count = 0; 3620940b61afSAnirudh Venkataramanan 36218c243700SAnirudh Venkataramanan mutex_lock(lock); 36228c243700SAnirudh Venkataramanan for_each_clear_bit(bit, pf_qmap, size) 36238c243700SAnirudh Venkataramanan count++; 36248c243700SAnirudh Venkataramanan mutex_unlock(lock); 3625940b61afSAnirudh Venkataramanan 36268c243700SAnirudh Venkataramanan return count; 36278c243700SAnirudh Venkataramanan } 3628d76a60baSAnirudh Venkataramanan 36298c243700SAnirudh Venkataramanan /** 36308c243700SAnirudh Venkataramanan * ice_get_avail_txq_count - Get count of Tx queues in use 36318c243700SAnirudh Venkataramanan * @pf: pointer to an ice_pf instance 36328c243700SAnirudh Venkataramanan */ 36338c243700SAnirudh Venkataramanan u16 ice_get_avail_txq_count(struct ice_pf *pf) 36348c243700SAnirudh Venkataramanan { 36358c243700SAnirudh Venkataramanan return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 36368c243700SAnirudh Venkataramanan pf->max_pf_txqs); 36378c243700SAnirudh Venkataramanan } 3638940b61afSAnirudh Venkataramanan 36398c243700SAnirudh Venkataramanan /** 36408c243700SAnirudh Venkataramanan * ice_get_avail_rxq_count - Get count of Rx queues in use 36418c243700SAnirudh Venkataramanan * @pf: pointer to an ice_pf instance 36428c243700SAnirudh Venkataramanan */ 36438c243700SAnirudh Venkataramanan u16 ice_get_avail_rxq_count(struct ice_pf *pf) 36448c243700SAnirudh Venkataramanan { 36458c243700SAnirudh Venkataramanan return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 36468c243700SAnirudh Venkataramanan pf->max_pf_rxqs); 3647940b61afSAnirudh Venkataramanan } 3648940b61afSAnirudh Venkataramanan 3649940b61afSAnirudh Venkataramanan /** 3650940b61afSAnirudh Venkataramanan * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3651940b61afSAnirudh Venkataramanan * @pf: board private structure to initialize 3652940b61afSAnirudh Venkataramanan */ 3653940b61afSAnirudh Venkataramanan static void ice_deinit_pf(struct ice_pf *pf) 3654940b61afSAnirudh Venkataramanan { 36558d81fa55SAkeem G Abodunrin ice_service_task_stop(pf); 3656940b61afSAnirudh Venkataramanan mutex_destroy(&pf->sw_mutex); 3657b94b013eSDave Ertman mutex_destroy(&pf->tc_mutex); 3658940b61afSAnirudh Venkataramanan mutex_destroy(&pf->avail_q_mutex); 365978b5713aSAnirudh Venkataramanan 366078b5713aSAnirudh Venkataramanan if (pf->avail_txqs) { 366178b5713aSAnirudh Venkataramanan bitmap_free(pf->avail_txqs); 366278b5713aSAnirudh Venkataramanan pf->avail_txqs = NULL; 366378b5713aSAnirudh Venkataramanan } 366478b5713aSAnirudh Venkataramanan 366578b5713aSAnirudh Venkataramanan if (pf->avail_rxqs) { 366678b5713aSAnirudh Venkataramanan bitmap_free(pf->avail_rxqs); 366778b5713aSAnirudh Venkataramanan pf->avail_rxqs = NULL; 366878b5713aSAnirudh Venkataramanan } 366906c16d89SJacob Keller 367006c16d89SJacob Keller if (pf->ptp.clock) 367106c16d89SJacob Keller ptp_clock_unregister(pf->ptp.clock); 3672940b61afSAnirudh Venkataramanan } 3673940b61afSAnirudh Venkataramanan 3674940b61afSAnirudh Venkataramanan /** 3675462acf6aSTony Nguyen * ice_set_pf_caps - set PFs capability flags 3676462acf6aSTony Nguyen * @pf: pointer to the PF instance 3677462acf6aSTony Nguyen */ 3678462acf6aSTony Nguyen static void ice_set_pf_caps(struct ice_pf *pf) 3679462acf6aSTony Nguyen { 3680462acf6aSTony Nguyen struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3681462acf6aSTony Nguyen 3682d25a0fc4SDave Ertman clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3683d25a0fc4SDave Ertman clear_bit(ICE_FLAG_AUX_ENA, pf->flags); 3684d25a0fc4SDave Ertman if (func_caps->common_cap.rdma) { 3685d25a0fc4SDave Ertman set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3686d25a0fc4SDave Ertman set_bit(ICE_FLAG_AUX_ENA, pf->flags); 3687d25a0fc4SDave Ertman } 3688462acf6aSTony Nguyen clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3689462acf6aSTony Nguyen if (func_caps->common_cap.dcb) 3690462acf6aSTony Nguyen set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3691462acf6aSTony Nguyen clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3692462acf6aSTony Nguyen if (func_caps->common_cap.sr_iov_1_1) { 3693462acf6aSTony Nguyen set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3694462acf6aSTony Nguyen pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, 3695462acf6aSTony Nguyen ICE_MAX_VF_COUNT); 3696462acf6aSTony Nguyen } 3697462acf6aSTony Nguyen clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3698462acf6aSTony Nguyen if (func_caps->common_cap.rss_table_size) 3699462acf6aSTony Nguyen set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3700462acf6aSTony Nguyen 3701148beb61SHenry Tieman clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3702148beb61SHenry Tieman if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3703148beb61SHenry Tieman u16 unused; 3704148beb61SHenry Tieman 3705148beb61SHenry Tieman /* ctrl_vsi_idx will be set to a valid value when flow director 3706148beb61SHenry Tieman * is setup by ice_init_fdir 3707148beb61SHenry Tieman */ 3708148beb61SHenry Tieman pf->ctrl_vsi_idx = ICE_NO_VSI; 3709148beb61SHenry Tieman set_bit(ICE_FLAG_FD_ENA, pf->flags); 3710148beb61SHenry Tieman /* force guaranteed filter pool for PF */ 3711148beb61SHenry Tieman ice_alloc_fd_guar_item(&pf->hw, &unused, 3712148beb61SHenry Tieman func_caps->fd_fltr_guar); 3713148beb61SHenry Tieman /* force shared filter pool for PF */ 3714148beb61SHenry Tieman ice_alloc_fd_shrd_item(&pf->hw, &unused, 3715148beb61SHenry Tieman func_caps->fd_fltr_best_effort); 3716148beb61SHenry Tieman } 3717148beb61SHenry Tieman 371806c16d89SJacob Keller clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 371906c16d89SJacob Keller if (func_caps->common_cap.ieee_1588) 372006c16d89SJacob Keller set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 372106c16d89SJacob Keller 3722462acf6aSTony Nguyen pf->max_pf_txqs = func_caps->common_cap.num_txq; 3723462acf6aSTony Nguyen pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3724462acf6aSTony Nguyen } 3725462acf6aSTony Nguyen 3726462acf6aSTony Nguyen /** 3727940b61afSAnirudh Venkataramanan * ice_init_pf - Initialize general software structures (struct ice_pf) 3728940b61afSAnirudh Venkataramanan * @pf: board private structure to initialize 3729940b61afSAnirudh Venkataramanan */ 373078b5713aSAnirudh Venkataramanan static int ice_init_pf(struct ice_pf *pf) 3731940b61afSAnirudh Venkataramanan { 3732462acf6aSTony Nguyen ice_set_pf_caps(pf); 3733940b61afSAnirudh Venkataramanan 3734940b61afSAnirudh Venkataramanan mutex_init(&pf->sw_mutex); 3735b94b013eSDave Ertman mutex_init(&pf->tc_mutex); 3736d76a60baSAnirudh Venkataramanan 3737d69ea414SJacob Keller INIT_HLIST_HEAD(&pf->aq_wait_list); 3738d69ea414SJacob Keller spin_lock_init(&pf->aq_wait_lock); 3739d69ea414SJacob Keller init_waitqueue_head(&pf->aq_wait_queue); 3740d69ea414SJacob Keller 37411c08052eSJacob Keller init_waitqueue_head(&pf->reset_wait_queue); 37421c08052eSJacob Keller 3743940b61afSAnirudh Venkataramanan /* setup service timer and periodic service task */ 3744940b61afSAnirudh Venkataramanan timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3745940b61afSAnirudh Venkataramanan pf->serv_tmr_period = HZ; 3746940b61afSAnirudh Venkataramanan INIT_WORK(&pf->serv_task, ice_service_task); 37477e408e07SAnirudh Venkataramanan clear_bit(ICE_SERVICE_SCHED, pf->state); 374878b5713aSAnirudh Venkataramanan 3749462acf6aSTony Nguyen mutex_init(&pf->avail_q_mutex); 375078b5713aSAnirudh Venkataramanan pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 375178b5713aSAnirudh Venkataramanan if (!pf->avail_txqs) 375278b5713aSAnirudh Venkataramanan return -ENOMEM; 375378b5713aSAnirudh Venkataramanan 375478b5713aSAnirudh Venkataramanan pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 375578b5713aSAnirudh Venkataramanan if (!pf->avail_rxqs) { 37564015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); 375778b5713aSAnirudh Venkataramanan pf->avail_txqs = NULL; 375878b5713aSAnirudh Venkataramanan return -ENOMEM; 375978b5713aSAnirudh Venkataramanan } 376078b5713aSAnirudh Venkataramanan 376178b5713aSAnirudh Venkataramanan return 0; 3762940b61afSAnirudh Venkataramanan } 3763940b61afSAnirudh Venkataramanan 3764940b61afSAnirudh Venkataramanan /** 3765940b61afSAnirudh Venkataramanan * ice_ena_msix_range - Request a range of MSIX vectors from the OS 3766940b61afSAnirudh Venkataramanan * @pf: board private structure 3767940b61afSAnirudh Venkataramanan * 3768940b61afSAnirudh Venkataramanan * compute the number of MSIX vectors required (v_budget) and request from 3769940b61afSAnirudh Venkataramanan * the OS. Return the number of vectors reserved or negative on failure 3770940b61afSAnirudh Venkataramanan */ 3771940b61afSAnirudh Venkataramanan static int ice_ena_msix_range(struct ice_pf *pf) 3772940b61afSAnirudh Venkataramanan { 3773d25a0fc4SDave Ertman int num_cpus, v_left, v_actual, v_other, v_budget = 0; 37744015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 3775940b61afSAnirudh Venkataramanan int needed, err, i; 3776940b61afSAnirudh Venkataramanan 3777940b61afSAnirudh Venkataramanan v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 3778d25a0fc4SDave Ertman num_cpus = num_online_cpus(); 3779940b61afSAnirudh Venkataramanan 3780741106f7STony Nguyen /* reserve for LAN miscellaneous handler */ 3781741106f7STony Nguyen needed = ICE_MIN_LAN_OICR_MSIX; 3782152b978aSAnirudh Venkataramanan if (v_left < needed) 3783152b978aSAnirudh Venkataramanan goto no_hw_vecs_left_err; 3784940b61afSAnirudh Venkataramanan v_budget += needed; 3785940b61afSAnirudh Venkataramanan v_left -= needed; 3786940b61afSAnirudh Venkataramanan 3787741106f7STony Nguyen /* reserve for flow director */ 3788741106f7STony Nguyen if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 3789741106f7STony Nguyen needed = ICE_FDIR_MSIX; 3790741106f7STony Nguyen if (v_left < needed) 3791741106f7STony Nguyen goto no_hw_vecs_left_err; 3792741106f7STony Nguyen v_budget += needed; 3793741106f7STony Nguyen v_left -= needed; 3794741106f7STony Nguyen } 3795741106f7STony Nguyen 3796f66756e0SGrzegorz Nitka /* reserve for switchdev */ 3797f66756e0SGrzegorz Nitka needed = ICE_ESWITCH_MSIX; 3798f66756e0SGrzegorz Nitka if (v_left < needed) 3799f66756e0SGrzegorz Nitka goto no_hw_vecs_left_err; 3800f66756e0SGrzegorz Nitka v_budget += needed; 3801f66756e0SGrzegorz Nitka v_left -= needed; 3802f66756e0SGrzegorz Nitka 3803741106f7STony Nguyen /* total used for non-traffic vectors */ 3804741106f7STony Nguyen v_other = v_budget; 3805741106f7STony Nguyen 3806940b61afSAnirudh Venkataramanan /* reserve vectors for LAN traffic */ 3807d25a0fc4SDave Ertman needed = num_cpus; 3808152b978aSAnirudh Venkataramanan if (v_left < needed) 3809152b978aSAnirudh Venkataramanan goto no_hw_vecs_left_err; 3810152b978aSAnirudh Venkataramanan pf->num_lan_msix = needed; 3811152b978aSAnirudh Venkataramanan v_budget += needed; 3812152b978aSAnirudh Venkataramanan v_left -= needed; 3813940b61afSAnirudh Venkataramanan 3814d25a0fc4SDave Ertman /* reserve vectors for RDMA auxiliary driver */ 3815d25a0fc4SDave Ertman if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3816d25a0fc4SDave Ertman needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; 3817d25a0fc4SDave Ertman if (v_left < needed) 3818d25a0fc4SDave Ertman goto no_hw_vecs_left_err; 3819d25a0fc4SDave Ertman pf->num_rdma_msix = needed; 3820d25a0fc4SDave Ertman v_budget += needed; 3821d25a0fc4SDave Ertman v_left -= needed; 3822d25a0fc4SDave Ertman } 3823d25a0fc4SDave Ertman 38244015d11eSBrett Creeley pf->msix_entries = devm_kcalloc(dev, v_budget, 3825c6dfd690SBruce Allan sizeof(*pf->msix_entries), GFP_KERNEL); 3826940b61afSAnirudh Venkataramanan if (!pf->msix_entries) { 3827940b61afSAnirudh Venkataramanan err = -ENOMEM; 3828940b61afSAnirudh Venkataramanan goto exit_err; 3829940b61afSAnirudh Venkataramanan } 3830940b61afSAnirudh Venkataramanan 3831940b61afSAnirudh Venkataramanan for (i = 0; i < v_budget; i++) 3832940b61afSAnirudh Venkataramanan pf->msix_entries[i].entry = i; 3833940b61afSAnirudh Venkataramanan 3834940b61afSAnirudh Venkataramanan /* actually reserve the vectors */ 3835940b61afSAnirudh Venkataramanan v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 3836940b61afSAnirudh Venkataramanan ICE_MIN_MSIX, v_budget); 3837940b61afSAnirudh Venkataramanan if (v_actual < 0) { 38384015d11eSBrett Creeley dev_err(dev, "unable to reserve MSI-X vectors\n"); 3839940b61afSAnirudh Venkataramanan err = v_actual; 3840940b61afSAnirudh Venkataramanan goto msix_err; 3841940b61afSAnirudh Venkataramanan } 3842940b61afSAnirudh Venkataramanan 3843940b61afSAnirudh Venkataramanan if (v_actual < v_budget) { 384419cce2c6SAnirudh Venkataramanan dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 3845940b61afSAnirudh Venkataramanan v_budget, v_actual); 3846152b978aSAnirudh Venkataramanan 3847f3fe97f6SBrett Creeley if (v_actual < ICE_MIN_MSIX) { 3848152b978aSAnirudh Venkataramanan /* error if we can't get minimum vectors */ 3849940b61afSAnirudh Venkataramanan pci_disable_msix(pf->pdev); 3850940b61afSAnirudh Venkataramanan err = -ERANGE; 3851940b61afSAnirudh Venkataramanan goto msix_err; 3852152b978aSAnirudh Venkataramanan } else { 3853d25a0fc4SDave Ertman int v_remain = v_actual - v_other; 3854d25a0fc4SDave Ertman int v_rdma = 0, v_min_rdma = 0; 3855d25a0fc4SDave Ertman 3856d25a0fc4SDave Ertman if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3857d25a0fc4SDave Ertman /* Need at least 1 interrupt in addition to 3858d25a0fc4SDave Ertman * AEQ MSIX 3859d25a0fc4SDave Ertman */ 3860d25a0fc4SDave Ertman v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; 3861d25a0fc4SDave Ertman v_min_rdma = ICE_MIN_RDMA_MSIX; 3862d25a0fc4SDave Ertman } 3863741106f7STony Nguyen 3864741106f7STony Nguyen if (v_actual == ICE_MIN_MSIX || 3865d25a0fc4SDave Ertman v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { 3866d25a0fc4SDave Ertman dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); 3867d25a0fc4SDave Ertman clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3868d25a0fc4SDave Ertman 3869d25a0fc4SDave Ertman pf->num_rdma_msix = 0; 3870f3fe97f6SBrett Creeley pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; 3871d25a0fc4SDave Ertman } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || 3872d25a0fc4SDave Ertman (v_remain - v_rdma < v_rdma)) { 3873d25a0fc4SDave Ertman /* Support minimum RDMA and give remaining 3874d25a0fc4SDave Ertman * vectors to LAN MSIX 3875d25a0fc4SDave Ertman */ 3876d25a0fc4SDave Ertman pf->num_rdma_msix = v_min_rdma; 3877d25a0fc4SDave Ertman pf->num_lan_msix = v_remain - v_min_rdma; 3878d25a0fc4SDave Ertman } else { 3879d25a0fc4SDave Ertman /* Split remaining MSIX with RDMA after 3880d25a0fc4SDave Ertman * accounting for AEQ MSIX 3881d25a0fc4SDave Ertman */ 3882d25a0fc4SDave Ertman pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + 3883d25a0fc4SDave Ertman ICE_RDMA_NUM_AEQ_MSIX; 3884d25a0fc4SDave Ertman pf->num_lan_msix = v_remain - pf->num_rdma_msix; 3885d25a0fc4SDave Ertman } 3886741106f7STony Nguyen 3887741106f7STony Nguyen dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", 3888741106f7STony Nguyen pf->num_lan_msix); 3889d25a0fc4SDave Ertman 3890d25a0fc4SDave Ertman if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 3891d25a0fc4SDave Ertman dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", 3892d25a0fc4SDave Ertman pf->num_rdma_msix); 3893940b61afSAnirudh Venkataramanan } 3894940b61afSAnirudh Venkataramanan } 3895940b61afSAnirudh Venkataramanan 3896940b61afSAnirudh Venkataramanan return v_actual; 3897940b61afSAnirudh Venkataramanan 3898940b61afSAnirudh Venkataramanan msix_err: 38994015d11eSBrett Creeley devm_kfree(dev, pf->msix_entries); 3900940b61afSAnirudh Venkataramanan goto exit_err; 3901940b61afSAnirudh Venkataramanan 3902152b978aSAnirudh Venkataramanan no_hw_vecs_left_err: 390319cce2c6SAnirudh Venkataramanan dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", 3904152b978aSAnirudh Venkataramanan needed, v_left); 3905152b978aSAnirudh Venkataramanan err = -ERANGE; 3906940b61afSAnirudh Venkataramanan exit_err: 3907d25a0fc4SDave Ertman pf->num_rdma_msix = 0; 3908940b61afSAnirudh Venkataramanan pf->num_lan_msix = 0; 3909940b61afSAnirudh Venkataramanan return err; 3910940b61afSAnirudh Venkataramanan } 3911940b61afSAnirudh Venkataramanan 3912940b61afSAnirudh Venkataramanan /** 3913940b61afSAnirudh Venkataramanan * ice_dis_msix - Disable MSI-X interrupt setup in OS 3914940b61afSAnirudh Venkataramanan * @pf: board private structure 3915940b61afSAnirudh Venkataramanan */ 3916940b61afSAnirudh Venkataramanan static void ice_dis_msix(struct ice_pf *pf) 3917940b61afSAnirudh Venkataramanan { 3918940b61afSAnirudh Venkataramanan pci_disable_msix(pf->pdev); 39194015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 3920940b61afSAnirudh Venkataramanan pf->msix_entries = NULL; 3921940b61afSAnirudh Venkataramanan } 3922940b61afSAnirudh Venkataramanan 3923940b61afSAnirudh Venkataramanan /** 3924eb0208ecSPreethi Banala * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 3925eb0208ecSPreethi Banala * @pf: board private structure 3926eb0208ecSPreethi Banala */ 3927eb0208ecSPreethi Banala static void ice_clear_interrupt_scheme(struct ice_pf *pf) 3928eb0208ecSPreethi Banala { 3929eb0208ecSPreethi Banala ice_dis_msix(pf); 3930eb0208ecSPreethi Banala 3931cbe66bfeSBrett Creeley if (pf->irq_tracker) { 39324015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 3933cbe66bfeSBrett Creeley pf->irq_tracker = NULL; 3934eb0208ecSPreethi Banala } 3935eb0208ecSPreethi Banala } 3936eb0208ecSPreethi Banala 3937eb0208ecSPreethi Banala /** 3938940b61afSAnirudh Venkataramanan * ice_init_interrupt_scheme - Determine proper interrupt scheme 3939940b61afSAnirudh Venkataramanan * @pf: board private structure to initialize 3940940b61afSAnirudh Venkataramanan */ 3941940b61afSAnirudh Venkataramanan static int ice_init_interrupt_scheme(struct ice_pf *pf) 3942940b61afSAnirudh Venkataramanan { 3943cbe66bfeSBrett Creeley int vectors; 3944940b61afSAnirudh Venkataramanan 3945940b61afSAnirudh Venkataramanan vectors = ice_ena_msix_range(pf); 3946940b61afSAnirudh Venkataramanan 3947940b61afSAnirudh Venkataramanan if (vectors < 0) 3948940b61afSAnirudh Venkataramanan return vectors; 3949940b61afSAnirudh Venkataramanan 3950940b61afSAnirudh Venkataramanan /* set up vector assignment tracking */ 3951e94c0df9SGustavo A. R. Silva pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), 3952e94c0df9SGustavo A. R. Silva struct_size(pf->irq_tracker, list, vectors), 3953e94c0df9SGustavo A. R. Silva GFP_KERNEL); 3954cbe66bfeSBrett Creeley if (!pf->irq_tracker) { 3955940b61afSAnirudh Venkataramanan ice_dis_msix(pf); 3956940b61afSAnirudh Venkataramanan return -ENOMEM; 3957940b61afSAnirudh Venkataramanan } 3958940b61afSAnirudh Venkataramanan 3959eb0208ecSPreethi Banala /* populate SW interrupts pool with number of OS granted IRQs. */ 396088865fc4SKarol Kolacinski pf->num_avail_sw_msix = (u16)vectors; 396188865fc4SKarol Kolacinski pf->irq_tracker->num_entries = (u16)vectors; 3962cbe66bfeSBrett Creeley pf->irq_tracker->end = pf->irq_tracker->num_entries; 3963940b61afSAnirudh Venkataramanan 3964940b61afSAnirudh Venkataramanan return 0; 3965940b61afSAnirudh Venkataramanan } 3966940b61afSAnirudh Venkataramanan 3967940b61afSAnirudh Venkataramanan /** 396831765519SAnirudh Venkataramanan * ice_is_wol_supported - check if WoL is supported 396931765519SAnirudh Venkataramanan * @hw: pointer to hardware info 3970769c500dSAkeem G Abodunrin * 3971769c500dSAkeem G Abodunrin * Check if WoL is supported based on the HW configuration. 3972769c500dSAkeem G Abodunrin * Returns true if NVM supports and enables WoL for this port, false otherwise 3973769c500dSAkeem G Abodunrin */ 397431765519SAnirudh Venkataramanan bool ice_is_wol_supported(struct ice_hw *hw) 3975769c500dSAkeem G Abodunrin { 3976769c500dSAkeem G Abodunrin u16 wol_ctrl; 3977769c500dSAkeem G Abodunrin 3978769c500dSAkeem G Abodunrin /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 3979769c500dSAkeem G Abodunrin * word) indicates WoL is not supported on the corresponding PF ID. 3980769c500dSAkeem G Abodunrin */ 3981769c500dSAkeem G Abodunrin if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 3982769c500dSAkeem G Abodunrin return false; 3983769c500dSAkeem G Abodunrin 398431765519SAnirudh Venkataramanan return !(BIT(hw->port_info->lport) & wol_ctrl); 3985769c500dSAkeem G Abodunrin } 3986769c500dSAkeem G Abodunrin 3987769c500dSAkeem G Abodunrin /** 398887324e74SHenry Tieman * ice_vsi_recfg_qs - Change the number of queues on a VSI 398987324e74SHenry Tieman * @vsi: VSI being changed 399087324e74SHenry Tieman * @new_rx: new number of Rx queues 399187324e74SHenry Tieman * @new_tx: new number of Tx queues 399287324e74SHenry Tieman * 399387324e74SHenry Tieman * Only change the number of queues if new_tx, or new_rx is non-0. 399487324e74SHenry Tieman * 399587324e74SHenry Tieman * Returns 0 on success. 399687324e74SHenry Tieman */ 399787324e74SHenry Tieman int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 399887324e74SHenry Tieman { 399987324e74SHenry Tieman struct ice_pf *pf = vsi->back; 400087324e74SHenry Tieman int err = 0, timeout = 50; 400187324e74SHenry Tieman 400287324e74SHenry Tieman if (!new_rx && !new_tx) 400387324e74SHenry Tieman return -EINVAL; 400487324e74SHenry Tieman 40057e408e07SAnirudh Venkataramanan while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 400687324e74SHenry Tieman timeout--; 400787324e74SHenry Tieman if (!timeout) 400887324e74SHenry Tieman return -EBUSY; 400987324e74SHenry Tieman usleep_range(1000, 2000); 401087324e74SHenry Tieman } 401187324e74SHenry Tieman 401287324e74SHenry Tieman if (new_tx) 401388865fc4SKarol Kolacinski vsi->req_txq = (u16)new_tx; 401487324e74SHenry Tieman if (new_rx) 401588865fc4SKarol Kolacinski vsi->req_rxq = (u16)new_rx; 401687324e74SHenry Tieman 401787324e74SHenry Tieman /* set for the next time the netdev is started */ 401887324e74SHenry Tieman if (!netif_running(vsi->netdev)) { 401987324e74SHenry Tieman ice_vsi_rebuild(vsi, false); 402087324e74SHenry Tieman dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 402187324e74SHenry Tieman goto done; 402287324e74SHenry Tieman } 402387324e74SHenry Tieman 402487324e74SHenry Tieman ice_vsi_close(vsi); 402587324e74SHenry Tieman ice_vsi_rebuild(vsi, false); 402687324e74SHenry Tieman ice_pf_dcb_recfg(pf); 402787324e74SHenry Tieman ice_vsi_open(vsi); 402887324e74SHenry Tieman done: 40297e408e07SAnirudh Venkataramanan clear_bit(ICE_CFG_BUSY, pf->state); 403087324e74SHenry Tieman return err; 403187324e74SHenry Tieman } 403287324e74SHenry Tieman 403387324e74SHenry Tieman /** 4034cd1f56f4SBrett Creeley * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 4035cd1f56f4SBrett Creeley * @pf: PF to configure 4036cd1f56f4SBrett Creeley * 4037cd1f56f4SBrett Creeley * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4038cd1f56f4SBrett Creeley * VSI can still Tx/Rx VLAN tagged packets. 4039cd1f56f4SBrett Creeley */ 4040cd1f56f4SBrett Creeley static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4041cd1f56f4SBrett Creeley { 4042cd1f56f4SBrett Creeley struct ice_vsi *vsi = ice_get_main_vsi(pf); 4043cd1f56f4SBrett Creeley struct ice_vsi_ctx *ctxt; 4044cd1f56f4SBrett Creeley struct ice_hw *hw; 40455518ac2aSTony Nguyen int status; 4046cd1f56f4SBrett Creeley 4047cd1f56f4SBrett Creeley if (!vsi) 4048cd1f56f4SBrett Creeley return; 4049cd1f56f4SBrett Creeley 4050cd1f56f4SBrett Creeley ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4051cd1f56f4SBrett Creeley if (!ctxt) 4052cd1f56f4SBrett Creeley return; 4053cd1f56f4SBrett Creeley 4054cd1f56f4SBrett Creeley hw = &pf->hw; 4055cd1f56f4SBrett Creeley ctxt->info = vsi->info; 4056cd1f56f4SBrett Creeley 4057cd1f56f4SBrett Creeley ctxt->info.valid_sections = 4058cd1f56f4SBrett Creeley cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4059cd1f56f4SBrett Creeley ICE_AQ_VSI_PROP_SECURITY_VALID | 4060cd1f56f4SBrett Creeley ICE_AQ_VSI_PROP_SW_VALID); 4061cd1f56f4SBrett Creeley 4062cd1f56f4SBrett Creeley /* disable VLAN anti-spoof */ 4063cd1f56f4SBrett Creeley ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4064cd1f56f4SBrett Creeley ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4065cd1f56f4SBrett Creeley 4066cd1f56f4SBrett Creeley /* disable VLAN pruning and keep all other settings */ 4067cd1f56f4SBrett Creeley ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4068cd1f56f4SBrett Creeley 4069cd1f56f4SBrett Creeley /* allow all VLANs on Tx and don't strip on Rx */ 4070cd1f56f4SBrett Creeley ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | 4071cd1f56f4SBrett Creeley ICE_AQ_VSI_VLAN_EMOD_NOTHING; 4072cd1f56f4SBrett Creeley 4073cd1f56f4SBrett Creeley status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4074cd1f56f4SBrett Creeley if (status) { 40755f87ec48STony Nguyen dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 40765518ac2aSTony Nguyen status, ice_aq_str(hw->adminq.sq_last_status)); 4077cd1f56f4SBrett Creeley } else { 4078cd1f56f4SBrett Creeley vsi->info.sec_flags = ctxt->info.sec_flags; 4079cd1f56f4SBrett Creeley vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4080cd1f56f4SBrett Creeley vsi->info.vlan_flags = ctxt->info.vlan_flags; 4081cd1f56f4SBrett Creeley } 4082cd1f56f4SBrett Creeley 4083cd1f56f4SBrett Creeley kfree(ctxt); 4084cd1f56f4SBrett Creeley } 4085cd1f56f4SBrett Creeley 4086cd1f56f4SBrett Creeley /** 4087462acf6aSTony Nguyen * ice_log_pkg_init - log result of DDP package load 4088462acf6aSTony Nguyen * @hw: pointer to hardware info 4089247dd97dSWojciech Drewek * @state: state of package load 4090462acf6aSTony Nguyen */ 4091247dd97dSWojciech Drewek static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4092462acf6aSTony Nguyen { 4093247dd97dSWojciech Drewek struct ice_pf *pf = hw->back; 4094247dd97dSWojciech Drewek struct device *dev; 4095462acf6aSTony Nguyen 4096247dd97dSWojciech Drewek dev = ice_pf_to_dev(pf); 4097247dd97dSWojciech Drewek 4098247dd97dSWojciech Drewek switch (state) { 4099247dd97dSWojciech Drewek case ICE_DDP_PKG_SUCCESS: 410019cce2c6SAnirudh Venkataramanan dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4101462acf6aSTony Nguyen hw->active_pkg_name, 4102462acf6aSTony Nguyen hw->active_pkg_ver.major, 4103462acf6aSTony Nguyen hw->active_pkg_ver.minor, 4104462acf6aSTony Nguyen hw->active_pkg_ver.update, 4105462acf6aSTony Nguyen hw->active_pkg_ver.draft); 4106247dd97dSWojciech Drewek break; 4107247dd97dSWojciech Drewek case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4108247dd97dSWojciech Drewek dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4109247dd97dSWojciech Drewek hw->active_pkg_name, 4110247dd97dSWojciech Drewek hw->active_pkg_ver.major, 4111247dd97dSWojciech Drewek hw->active_pkg_ver.minor, 4112247dd97dSWojciech Drewek hw->active_pkg_ver.update, 4113247dd97dSWojciech Drewek hw->active_pkg_ver.draft); 4114247dd97dSWojciech Drewek break; 4115247dd97dSWojciech Drewek case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 411619cce2c6SAnirudh Venkataramanan dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4117462acf6aSTony Nguyen hw->active_pkg_name, 4118462acf6aSTony Nguyen hw->active_pkg_ver.major, 4119462acf6aSTony Nguyen hw->active_pkg_ver.minor, 4120462acf6aSTony Nguyen ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4121247dd97dSWojciech Drewek break; 4122247dd97dSWojciech Drewek case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 412319cce2c6SAnirudh Venkataramanan dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4124462acf6aSTony Nguyen hw->active_pkg_name, 4125462acf6aSTony Nguyen hw->active_pkg_ver.major, 4126462acf6aSTony Nguyen hw->active_pkg_ver.minor, 4127462acf6aSTony Nguyen hw->active_pkg_ver.update, 4128462acf6aSTony Nguyen hw->active_pkg_ver.draft, 4129462acf6aSTony Nguyen hw->pkg_name, 4130462acf6aSTony Nguyen hw->pkg_ver.major, 4131462acf6aSTony Nguyen hw->pkg_ver.minor, 4132462acf6aSTony Nguyen hw->pkg_ver.update, 4133462acf6aSTony Nguyen hw->pkg_ver.draft); 4134462acf6aSTony Nguyen break; 4135247dd97dSWojciech Drewek case ICE_DDP_PKG_FW_MISMATCH: 4136b8272919SVictor Raj dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4137b8272919SVictor Raj break; 4138247dd97dSWojciech Drewek case ICE_DDP_PKG_INVALID_FILE: 413919cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4140462acf6aSTony Nguyen break; 4141247dd97dSWojciech Drewek case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 414219cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4143247dd97dSWojciech Drewek break; 4144247dd97dSWojciech Drewek case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 414519cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4146462acf6aSTony Nguyen ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4147462acf6aSTony Nguyen break; 4148247dd97dSWojciech Drewek case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 414919cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4150247dd97dSWojciech Drewek break; 4151247dd97dSWojciech Drewek case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 415219cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4153247dd97dSWojciech Drewek break; 4154247dd97dSWojciech Drewek case ICE_DDP_PKG_LOAD_ERROR: 415519cce2c6SAnirudh Venkataramanan dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 41569918f2d2SAnirudh Venkataramanan /* poll for reset to complete */ 41579918f2d2SAnirudh Venkataramanan if (ice_check_reset(hw)) 41589918f2d2SAnirudh Venkataramanan dev_err(dev, "Error resetting device. Please reload the driver\n"); 4159462acf6aSTony Nguyen break; 4160247dd97dSWojciech Drewek case ICE_DDP_PKG_ERR: 4161462acf6aSTony Nguyen default: 4162247dd97dSWojciech Drewek dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 41630092db5fSJesse Brandeburg break; 4164462acf6aSTony Nguyen } 4165462acf6aSTony Nguyen } 4166462acf6aSTony Nguyen 4167462acf6aSTony Nguyen /** 4168462acf6aSTony Nguyen * ice_load_pkg - load/reload the DDP Package file 4169462acf6aSTony Nguyen * @firmware: firmware structure when firmware requested or NULL for reload 4170462acf6aSTony Nguyen * @pf: pointer to the PF instance 4171462acf6aSTony Nguyen * 4172462acf6aSTony Nguyen * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4173462acf6aSTony Nguyen * initialize HW tables. 4174462acf6aSTony Nguyen */ 4175462acf6aSTony Nguyen static void 4176462acf6aSTony Nguyen ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4177462acf6aSTony Nguyen { 4178247dd97dSWojciech Drewek enum ice_ddp_state state = ICE_DDP_PKG_ERR; 41794015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 4180462acf6aSTony Nguyen struct ice_hw *hw = &pf->hw; 4181462acf6aSTony Nguyen 4182462acf6aSTony Nguyen /* Load DDP Package */ 4183462acf6aSTony Nguyen if (firmware && !hw->pkg_copy) { 4184247dd97dSWojciech Drewek state = ice_copy_and_init_pkg(hw, firmware->data, 4185462acf6aSTony Nguyen firmware->size); 4186247dd97dSWojciech Drewek ice_log_pkg_init(hw, state); 4187462acf6aSTony Nguyen } else if (!firmware && hw->pkg_copy) { 4188462acf6aSTony Nguyen /* Reload package during rebuild after CORER/GLOBR reset */ 4189247dd97dSWojciech Drewek state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4190247dd97dSWojciech Drewek ice_log_pkg_init(hw, state); 4191462acf6aSTony Nguyen } else { 419219cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4193462acf6aSTony Nguyen } 4194462acf6aSTony Nguyen 4195247dd97dSWojciech Drewek if (!ice_is_init_pkg_successful(state)) { 4196462acf6aSTony Nguyen /* Safe Mode */ 4197462acf6aSTony Nguyen clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4198462acf6aSTony Nguyen return; 4199462acf6aSTony Nguyen } 4200462acf6aSTony Nguyen 4201462acf6aSTony Nguyen /* Successful download package is the precondition for advanced 4202462acf6aSTony Nguyen * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4203462acf6aSTony Nguyen */ 4204462acf6aSTony Nguyen set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4205462acf6aSTony Nguyen } 4206462acf6aSTony Nguyen 4207462acf6aSTony Nguyen /** 4208c585ea42SBrett Creeley * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4209c585ea42SBrett Creeley * @pf: pointer to the PF structure 4210c585ea42SBrett Creeley * 4211c585ea42SBrett Creeley * There is no error returned here because the driver should be able to handle 4212c585ea42SBrett Creeley * 128 Byte cache lines, so we only print a warning in case issues are seen, 4213c585ea42SBrett Creeley * specifically with Tx. 4214c585ea42SBrett Creeley */ 4215c585ea42SBrett Creeley static void ice_verify_cacheline_size(struct ice_pf *pf) 4216c585ea42SBrett Creeley { 4217c585ea42SBrett Creeley if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 421819cce2c6SAnirudh Venkataramanan dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4219c585ea42SBrett Creeley ICE_CACHE_LINE_BYTES); 4220c585ea42SBrett Creeley } 4221c585ea42SBrett Creeley 4222c585ea42SBrett Creeley /** 4223e3710a01SPaul M Stillwell Jr * ice_send_version - update firmware with driver version 4224e3710a01SPaul M Stillwell Jr * @pf: PF struct 4225e3710a01SPaul M Stillwell Jr * 4226d54699e2STony Nguyen * Returns 0 on success, else error code 4227e3710a01SPaul M Stillwell Jr */ 42285e24d598STony Nguyen static int ice_send_version(struct ice_pf *pf) 4229e3710a01SPaul M Stillwell Jr { 4230e3710a01SPaul M Stillwell Jr struct ice_driver_ver dv; 4231e3710a01SPaul M Stillwell Jr 423234a2a3b8SJeff Kirsher dv.major_ver = 0xff; 423334a2a3b8SJeff Kirsher dv.minor_ver = 0xff; 423434a2a3b8SJeff Kirsher dv.build_ver = 0xff; 4235e3710a01SPaul M Stillwell Jr dv.subbuild_ver = 0; 423634a2a3b8SJeff Kirsher strscpy((char *)dv.driver_string, UTS_RELEASE, 4237e3710a01SPaul M Stillwell Jr sizeof(dv.driver_string)); 4238e3710a01SPaul M Stillwell Jr return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4239e3710a01SPaul M Stillwell Jr } 4240e3710a01SPaul M Stillwell Jr 4241e3710a01SPaul M Stillwell Jr /** 4242148beb61SHenry Tieman * ice_init_fdir - Initialize flow director VSI and configuration 4243148beb61SHenry Tieman * @pf: pointer to the PF instance 4244148beb61SHenry Tieman * 4245148beb61SHenry Tieman * returns 0 on success, negative on error 4246148beb61SHenry Tieman */ 4247148beb61SHenry Tieman static int ice_init_fdir(struct ice_pf *pf) 4248148beb61SHenry Tieman { 4249148beb61SHenry Tieman struct device *dev = ice_pf_to_dev(pf); 4250148beb61SHenry Tieman struct ice_vsi *ctrl_vsi; 4251148beb61SHenry Tieman int err; 4252148beb61SHenry Tieman 4253148beb61SHenry Tieman /* Side Band Flow Director needs to have a control VSI. 4254148beb61SHenry Tieman * Allocate it and store it in the PF. 4255148beb61SHenry Tieman */ 4256148beb61SHenry Tieman ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4257148beb61SHenry Tieman if (!ctrl_vsi) { 4258148beb61SHenry Tieman dev_dbg(dev, "could not create control VSI\n"); 4259148beb61SHenry Tieman return -ENOMEM; 4260148beb61SHenry Tieman } 4261148beb61SHenry Tieman 4262148beb61SHenry Tieman err = ice_vsi_open_ctrl(ctrl_vsi); 4263148beb61SHenry Tieman if (err) { 4264148beb61SHenry Tieman dev_dbg(dev, "could not open control VSI\n"); 4265148beb61SHenry Tieman goto err_vsi_open; 4266148beb61SHenry Tieman } 4267148beb61SHenry Tieman 4268148beb61SHenry Tieman mutex_init(&pf->hw.fdir_fltr_lock); 4269148beb61SHenry Tieman 4270148beb61SHenry Tieman err = ice_fdir_create_dflt_rules(pf); 4271148beb61SHenry Tieman if (err) 4272148beb61SHenry Tieman goto err_fdir_rule; 4273148beb61SHenry Tieman 4274148beb61SHenry Tieman return 0; 4275148beb61SHenry Tieman 4276148beb61SHenry Tieman err_fdir_rule: 4277148beb61SHenry Tieman ice_fdir_release_flows(&pf->hw); 4278148beb61SHenry Tieman ice_vsi_close(ctrl_vsi); 4279148beb61SHenry Tieman err_vsi_open: 4280148beb61SHenry Tieman ice_vsi_release(ctrl_vsi); 4281148beb61SHenry Tieman if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4282148beb61SHenry Tieman pf->vsi[pf->ctrl_vsi_idx] = NULL; 4283148beb61SHenry Tieman pf->ctrl_vsi_idx = ICE_NO_VSI; 4284148beb61SHenry Tieman } 4285148beb61SHenry Tieman return err; 4286148beb61SHenry Tieman } 4287148beb61SHenry Tieman 4288148beb61SHenry Tieman /** 4289462acf6aSTony Nguyen * ice_get_opt_fw_name - return optional firmware file name or NULL 4290462acf6aSTony Nguyen * @pf: pointer to the PF instance 4291462acf6aSTony Nguyen */ 4292462acf6aSTony Nguyen static char *ice_get_opt_fw_name(struct ice_pf *pf) 4293462acf6aSTony Nguyen { 4294462acf6aSTony Nguyen /* Optional firmware name same as default with additional dash 4295462acf6aSTony Nguyen * followed by a EUI-64 identifier (PCIe Device Serial Number) 4296462acf6aSTony Nguyen */ 4297462acf6aSTony Nguyen struct pci_dev *pdev = pf->pdev; 4298ceb2f007SJacob Keller char *opt_fw_filename; 4299ceb2f007SJacob Keller u64 dsn; 4300462acf6aSTony Nguyen 4301462acf6aSTony Nguyen /* Determine the name of the optional file using the DSN (two 4302462acf6aSTony Nguyen * dwords following the start of the DSN Capability). 4303462acf6aSTony Nguyen */ 4304ceb2f007SJacob Keller dsn = pci_get_dsn(pdev); 4305ceb2f007SJacob Keller if (!dsn) 4306ceb2f007SJacob Keller return NULL; 4307ceb2f007SJacob Keller 4308462acf6aSTony Nguyen opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4309462acf6aSTony Nguyen if (!opt_fw_filename) 4310462acf6aSTony Nguyen return NULL; 4311462acf6aSTony Nguyen 43121a9c561aSPaul M Stillwell Jr snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4313ceb2f007SJacob Keller ICE_DDP_PKG_PATH, dsn); 4314462acf6aSTony Nguyen 4315462acf6aSTony Nguyen return opt_fw_filename; 4316462acf6aSTony Nguyen } 4317462acf6aSTony Nguyen 4318462acf6aSTony Nguyen /** 4319462acf6aSTony Nguyen * ice_request_fw - Device initialization routine 4320462acf6aSTony Nguyen * @pf: pointer to the PF instance 4321462acf6aSTony Nguyen */ 4322462acf6aSTony Nguyen static void ice_request_fw(struct ice_pf *pf) 4323462acf6aSTony Nguyen { 4324462acf6aSTony Nguyen char *opt_fw_filename = ice_get_opt_fw_name(pf); 4325462acf6aSTony Nguyen const struct firmware *firmware = NULL; 43264015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 4327462acf6aSTony Nguyen int err = 0; 4328462acf6aSTony Nguyen 4329462acf6aSTony Nguyen /* optional device-specific DDP (if present) overrides the default DDP 4330462acf6aSTony Nguyen * package file. kernel logs a debug message if the file doesn't exist, 4331462acf6aSTony Nguyen * and warning messages for other errors. 4332462acf6aSTony Nguyen */ 4333462acf6aSTony Nguyen if (opt_fw_filename) { 4334462acf6aSTony Nguyen err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4335462acf6aSTony Nguyen if (err) { 4336462acf6aSTony Nguyen kfree(opt_fw_filename); 4337462acf6aSTony Nguyen goto dflt_pkg_load; 4338462acf6aSTony Nguyen } 4339462acf6aSTony Nguyen 4340462acf6aSTony Nguyen /* request for firmware was successful. Download to device */ 4341462acf6aSTony Nguyen ice_load_pkg(firmware, pf); 4342462acf6aSTony Nguyen kfree(opt_fw_filename); 4343462acf6aSTony Nguyen release_firmware(firmware); 4344462acf6aSTony Nguyen return; 4345462acf6aSTony Nguyen } 4346462acf6aSTony Nguyen 4347462acf6aSTony Nguyen dflt_pkg_load: 4348462acf6aSTony Nguyen err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4349462acf6aSTony Nguyen if (err) { 435019cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4351462acf6aSTony Nguyen return; 4352462acf6aSTony Nguyen } 4353462acf6aSTony Nguyen 4354462acf6aSTony Nguyen /* request for firmware was successful. Download to device */ 4355462acf6aSTony Nguyen ice_load_pkg(firmware, pf); 4356462acf6aSTony Nguyen release_firmware(firmware); 4357462acf6aSTony Nguyen } 4358462acf6aSTony Nguyen 4359462acf6aSTony Nguyen /** 4360769c500dSAkeem G Abodunrin * ice_print_wake_reason - show the wake up cause in the log 4361769c500dSAkeem G Abodunrin * @pf: pointer to the PF struct 4362769c500dSAkeem G Abodunrin */ 4363769c500dSAkeem G Abodunrin static void ice_print_wake_reason(struct ice_pf *pf) 4364769c500dSAkeem G Abodunrin { 4365769c500dSAkeem G Abodunrin u32 wus = pf->wakeup_reason; 4366769c500dSAkeem G Abodunrin const char *wake_str; 4367769c500dSAkeem G Abodunrin 4368769c500dSAkeem G Abodunrin /* if no wake event, nothing to print */ 4369769c500dSAkeem G Abodunrin if (!wus) 4370769c500dSAkeem G Abodunrin return; 4371769c500dSAkeem G Abodunrin 4372769c500dSAkeem G Abodunrin if (wus & PFPM_WUS_LNKC_M) 4373769c500dSAkeem G Abodunrin wake_str = "Link\n"; 4374769c500dSAkeem G Abodunrin else if (wus & PFPM_WUS_MAG_M) 4375769c500dSAkeem G Abodunrin wake_str = "Magic Packet\n"; 4376769c500dSAkeem G Abodunrin else if (wus & PFPM_WUS_MNG_M) 4377769c500dSAkeem G Abodunrin wake_str = "Management\n"; 4378769c500dSAkeem G Abodunrin else if (wus & PFPM_WUS_FW_RST_WK_M) 4379769c500dSAkeem G Abodunrin wake_str = "Firmware Reset\n"; 4380769c500dSAkeem G Abodunrin else 4381769c500dSAkeem G Abodunrin wake_str = "Unknown\n"; 4382769c500dSAkeem G Abodunrin 4383769c500dSAkeem G Abodunrin dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4384769c500dSAkeem G Abodunrin } 4385769c500dSAkeem G Abodunrin 4386769c500dSAkeem G Abodunrin /** 43871e23f076SAnirudh Venkataramanan * ice_register_netdev - register netdev and devlink port 43881e23f076SAnirudh Venkataramanan * @pf: pointer to the PF struct 43891e23f076SAnirudh Venkataramanan */ 43901e23f076SAnirudh Venkataramanan static int ice_register_netdev(struct ice_pf *pf) 43911e23f076SAnirudh Venkataramanan { 43921e23f076SAnirudh Venkataramanan struct ice_vsi *vsi; 43931e23f076SAnirudh Venkataramanan int err = 0; 43941e23f076SAnirudh Venkataramanan 43951e23f076SAnirudh Venkataramanan vsi = ice_get_main_vsi(pf); 43961e23f076SAnirudh Venkataramanan if (!vsi || !vsi->netdev) 43971e23f076SAnirudh Venkataramanan return -EIO; 43981e23f076SAnirudh Venkataramanan 43991e23f076SAnirudh Venkataramanan err = register_netdev(vsi->netdev); 44001e23f076SAnirudh Venkataramanan if (err) 44011e23f076SAnirudh Venkataramanan goto err_register_netdev; 44021e23f076SAnirudh Venkataramanan 4403a476d72aSAnirudh Venkataramanan set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 44041e23f076SAnirudh Venkataramanan netif_carrier_off(vsi->netdev); 44051e23f076SAnirudh Venkataramanan netif_tx_stop_all_queues(vsi->netdev); 44062ae0aa47SWojciech Drewek err = ice_devlink_create_pf_port(pf); 44071e23f076SAnirudh Venkataramanan if (err) 44081e23f076SAnirudh Venkataramanan goto err_devlink_create; 44091e23f076SAnirudh Venkataramanan 44102ae0aa47SWojciech Drewek devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); 44111e23f076SAnirudh Venkataramanan 44121e23f076SAnirudh Venkataramanan return 0; 44131e23f076SAnirudh Venkataramanan err_devlink_create: 44141e23f076SAnirudh Venkataramanan unregister_netdev(vsi->netdev); 4415a476d72aSAnirudh Venkataramanan clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 44161e23f076SAnirudh Venkataramanan err_register_netdev: 44171e23f076SAnirudh Venkataramanan free_netdev(vsi->netdev); 44181e23f076SAnirudh Venkataramanan vsi->netdev = NULL; 4419a476d72aSAnirudh Venkataramanan clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 44201e23f076SAnirudh Venkataramanan return err; 44211e23f076SAnirudh Venkataramanan } 44221e23f076SAnirudh Venkataramanan 44231e23f076SAnirudh Venkataramanan /** 4424837f08fdSAnirudh Venkataramanan * ice_probe - Device initialization routine 4425837f08fdSAnirudh Venkataramanan * @pdev: PCI device information struct 4426837f08fdSAnirudh Venkataramanan * @ent: entry in ice_pci_tbl 4427837f08fdSAnirudh Venkataramanan * 4428837f08fdSAnirudh Venkataramanan * Returns 0 on success, negative on failure 4429837f08fdSAnirudh Venkataramanan */ 4430c8b7abddSBruce Allan static int 4431c8b7abddSBruce Allan ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4432837f08fdSAnirudh Venkataramanan { 443377ed84f4SBruce Allan struct device *dev = &pdev->dev; 4434837f08fdSAnirudh Venkataramanan struct ice_pf *pf; 4435837f08fdSAnirudh Venkataramanan struct ice_hw *hw; 4436b20e6c17SJakub Kicinski int i, err; 4437837f08fdSAnirudh Venkataramanan 443850ac7479SAnirudh Venkataramanan if (pdev->is_virtfn) { 443950ac7479SAnirudh Venkataramanan dev_err(dev, "can't probe a virtual function\n"); 444050ac7479SAnirudh Venkataramanan return -EINVAL; 444150ac7479SAnirudh Venkataramanan } 444250ac7479SAnirudh Venkataramanan 44434ee656bbSTony Nguyen /* this driver uses devres, see 44444ee656bbSTony Nguyen * Documentation/driver-api/driver-model/devres.rst 44454ee656bbSTony Nguyen */ 4446837f08fdSAnirudh Venkataramanan err = pcim_enable_device(pdev); 4447837f08fdSAnirudh Venkataramanan if (err) 4448837f08fdSAnirudh Venkataramanan return err; 4449837f08fdSAnirudh Venkataramanan 445080ad6ddeSJesse Brandeburg err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 4451837f08fdSAnirudh Venkataramanan if (err) { 445277ed84f4SBruce Allan dev_err(dev, "BAR0 I/O map error %d\n", err); 4453837f08fdSAnirudh Venkataramanan return err; 4454837f08fdSAnirudh Venkataramanan } 4455837f08fdSAnirudh Venkataramanan 44561adf7eadSJacob Keller pf = ice_allocate_pf(dev); 4457837f08fdSAnirudh Venkataramanan if (!pf) 4458837f08fdSAnirudh Venkataramanan return -ENOMEM; 4459837f08fdSAnirudh Venkataramanan 446073e30a62SDave Ertman /* initialize Auxiliary index to invalid value */ 446173e30a62SDave Ertman pf->aux_idx = -1; 446273e30a62SDave Ertman 44632f2da36eSAnirudh Venkataramanan /* set up for high or low DMA */ 446477ed84f4SBruce Allan err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4465837f08fdSAnirudh Venkataramanan if (err) 446677ed84f4SBruce Allan err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4467837f08fdSAnirudh Venkataramanan if (err) { 446877ed84f4SBruce Allan dev_err(dev, "DMA configuration failed: 0x%x\n", err); 4469837f08fdSAnirudh Venkataramanan return err; 4470837f08fdSAnirudh Venkataramanan } 4471837f08fdSAnirudh Venkataramanan 4472837f08fdSAnirudh Venkataramanan pci_enable_pcie_error_reporting(pdev); 4473837f08fdSAnirudh Venkataramanan pci_set_master(pdev); 4474837f08fdSAnirudh Venkataramanan 4475837f08fdSAnirudh Venkataramanan pf->pdev = pdev; 4476837f08fdSAnirudh Venkataramanan pci_set_drvdata(pdev, pf); 44777e408e07SAnirudh Venkataramanan set_bit(ICE_DOWN, pf->state); 44788d81fa55SAkeem G Abodunrin /* Disable service task until DOWN bit is cleared */ 44797e408e07SAnirudh Venkataramanan set_bit(ICE_SERVICE_DIS, pf->state); 4480837f08fdSAnirudh Venkataramanan 4481837f08fdSAnirudh Venkataramanan hw = &pf->hw; 4482837f08fdSAnirudh Venkataramanan hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 44834e56802eSMichal Swiatkowski pci_save_state(pdev); 44844e56802eSMichal Swiatkowski 4485837f08fdSAnirudh Venkataramanan hw->back = pf; 4486837f08fdSAnirudh Venkataramanan hw->vendor_id = pdev->vendor; 4487837f08fdSAnirudh Venkataramanan hw->device_id = pdev->device; 4488837f08fdSAnirudh Venkataramanan pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4489837f08fdSAnirudh Venkataramanan hw->subsystem_vendor_id = pdev->subsystem_vendor; 4490837f08fdSAnirudh Venkataramanan hw->subsystem_device_id = pdev->subsystem_device; 4491837f08fdSAnirudh Venkataramanan hw->bus.device = PCI_SLOT(pdev->devfn); 4492837f08fdSAnirudh Venkataramanan hw->bus.func = PCI_FUNC(pdev->devfn); 4493f31e4b6fSAnirudh Venkataramanan ice_set_ctrlq_len(hw); 4494f31e4b6fSAnirudh Venkataramanan 4495837f08fdSAnirudh Venkataramanan pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 4496837f08fdSAnirudh Venkataramanan 44977ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG 44987ec59eeaSAnirudh Venkataramanan if (debug < -1) 44997ec59eeaSAnirudh Venkataramanan hw->debug_mask = debug; 45007ec59eeaSAnirudh Venkataramanan #endif 45017ec59eeaSAnirudh Venkataramanan 4502f31e4b6fSAnirudh Venkataramanan err = ice_init_hw(hw); 4503f31e4b6fSAnirudh Venkataramanan if (err) { 450477ed84f4SBruce Allan dev_err(dev, "ice_init_hw failed: %d\n", err); 4505f31e4b6fSAnirudh Venkataramanan err = -EIO; 4506f31e4b6fSAnirudh Venkataramanan goto err_exit_unroll; 4507f31e4b6fSAnirudh Venkataramanan } 4508f31e4b6fSAnirudh Venkataramanan 450940b24760SAnirudh Venkataramanan ice_init_feature_support(pf); 451040b24760SAnirudh Venkataramanan 4511462acf6aSTony Nguyen ice_request_fw(pf); 4512462acf6aSTony Nguyen 4513462acf6aSTony Nguyen /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4514462acf6aSTony Nguyen * set in pf->state, which will cause ice_is_safe_mode to return 4515462acf6aSTony Nguyen * true 4516462acf6aSTony Nguyen */ 4517462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 4518462acf6aSTony Nguyen /* we already got function/device capabilities but these don't 4519462acf6aSTony Nguyen * reflect what the driver needs to do in safe mode. Instead of 4520462acf6aSTony Nguyen * adding conditional logic everywhere to ignore these 4521462acf6aSTony Nguyen * device/function capabilities, override them. 4522462acf6aSTony Nguyen */ 4523462acf6aSTony Nguyen ice_set_safe_mode_caps(hw); 4524462acf6aSTony Nguyen } 4525462acf6aSTony Nguyen 452678b5713aSAnirudh Venkataramanan err = ice_init_pf(pf); 452778b5713aSAnirudh Venkataramanan if (err) { 452878b5713aSAnirudh Venkataramanan dev_err(dev, "ice_init_pf failed: %d\n", err); 452978b5713aSAnirudh Venkataramanan goto err_init_pf_unroll; 453078b5713aSAnirudh Venkataramanan } 4531940b61afSAnirudh Venkataramanan 4532dce730f1SJacob Keller ice_devlink_init_regions(pf); 4533dce730f1SJacob Keller 4534b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4535b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4536b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4537b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4538b20e6c17SJakub Kicinski i = 0; 4539b20e6c17SJakub Kicinski if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4540b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.tables[i].n_entries = 4541b20e6c17SJakub Kicinski pf->hw.tnl.valid_count[TNL_VXLAN]; 4542b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4543b20e6c17SJakub Kicinski UDP_TUNNEL_TYPE_VXLAN; 4544b20e6c17SJakub Kicinski i++; 4545b20e6c17SJakub Kicinski } 4546b20e6c17SJakub Kicinski if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4547b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.tables[i].n_entries = 4548b20e6c17SJakub Kicinski pf->hw.tnl.valid_count[TNL_GENEVE]; 4549b20e6c17SJakub Kicinski pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4550b20e6c17SJakub Kicinski UDP_TUNNEL_TYPE_GENEVE; 4551b20e6c17SJakub Kicinski i++; 4552b20e6c17SJakub Kicinski } 4553b20e6c17SJakub Kicinski 4554995c90f2SAnirudh Venkataramanan pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 4555940b61afSAnirudh Venkataramanan if (!pf->num_alloc_vsi) { 4556940b61afSAnirudh Venkataramanan err = -EIO; 4557940b61afSAnirudh Venkataramanan goto err_init_pf_unroll; 4558940b61afSAnirudh Venkataramanan } 4559b20e6c17SJakub Kicinski if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4560b20e6c17SJakub Kicinski dev_warn(&pf->pdev->dev, 4561b20e6c17SJakub Kicinski "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4562b20e6c17SJakub Kicinski pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4563b20e6c17SJakub Kicinski pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4564b20e6c17SJakub Kicinski } 4565940b61afSAnirudh Venkataramanan 456677ed84f4SBruce Allan pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 456777ed84f4SBruce Allan GFP_KERNEL); 4568940b61afSAnirudh Venkataramanan if (!pf->vsi) { 4569940b61afSAnirudh Venkataramanan err = -ENOMEM; 4570940b61afSAnirudh Venkataramanan goto err_init_pf_unroll; 4571940b61afSAnirudh Venkataramanan } 4572940b61afSAnirudh Venkataramanan 4573940b61afSAnirudh Venkataramanan err = ice_init_interrupt_scheme(pf); 4574940b61afSAnirudh Venkataramanan if (err) { 457577ed84f4SBruce Allan dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4576940b61afSAnirudh Venkataramanan err = -EIO; 4577bc3a0241SJacob Keller goto err_init_vsi_unroll; 4578940b61afSAnirudh Venkataramanan } 4579940b61afSAnirudh Venkataramanan 4580940b61afSAnirudh Venkataramanan /* In case of MSIX we are going to setup the misc vector right here 4581940b61afSAnirudh Venkataramanan * to handle admin queue events etc. In case of legacy and MSI 4582940b61afSAnirudh Venkataramanan * the misc functionality and queue processing is combined in 4583940b61afSAnirudh Venkataramanan * the same vector and that gets setup at open. 4584940b61afSAnirudh Venkataramanan */ 4585940b61afSAnirudh Venkataramanan err = ice_req_irq_msix_misc(pf); 4586940b61afSAnirudh Venkataramanan if (err) { 458777ed84f4SBruce Allan dev_err(dev, "setup of misc vector failed: %d\n", err); 4588940b61afSAnirudh Venkataramanan goto err_init_interrupt_unroll; 4589940b61afSAnirudh Venkataramanan } 4590940b61afSAnirudh Venkataramanan 4591940b61afSAnirudh Venkataramanan /* create switch struct for the switch element created by FW on boot */ 459277ed84f4SBruce Allan pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 4593940b61afSAnirudh Venkataramanan if (!pf->first_sw) { 4594940b61afSAnirudh Venkataramanan err = -ENOMEM; 4595940b61afSAnirudh Venkataramanan goto err_msix_misc_unroll; 4596940b61afSAnirudh Venkataramanan } 4597940b61afSAnirudh Venkataramanan 4598b1edc14aSMd Fahad Iqbal Polash if (hw->evb_veb) 4599940b61afSAnirudh Venkataramanan pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4600b1edc14aSMd Fahad Iqbal Polash else 4601b1edc14aSMd Fahad Iqbal Polash pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4602b1edc14aSMd Fahad Iqbal Polash 4603940b61afSAnirudh Venkataramanan pf->first_sw->pf = pf; 4604940b61afSAnirudh Venkataramanan 4605940b61afSAnirudh Venkataramanan /* record the sw_id available for later use */ 4606940b61afSAnirudh Venkataramanan pf->first_sw->sw_id = hw->port_info->sw_id; 4607940b61afSAnirudh Venkataramanan 46083a858ba3SAnirudh Venkataramanan err = ice_setup_pf_sw(pf); 46093a858ba3SAnirudh Venkataramanan if (err) { 46102f2da36eSAnirudh Venkataramanan dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 46113a858ba3SAnirudh Venkataramanan goto err_alloc_sw_unroll; 46123a858ba3SAnirudh Venkataramanan } 46139daf8208SAnirudh Venkataramanan 46147e408e07SAnirudh Venkataramanan clear_bit(ICE_SERVICE_DIS, pf->state); 46159daf8208SAnirudh Venkataramanan 4616e3710a01SPaul M Stillwell Jr /* tell the firmware we are up */ 4617e3710a01SPaul M Stillwell Jr err = ice_send_version(pf); 4618e3710a01SPaul M Stillwell Jr if (err) { 461919cce2c6SAnirudh Venkataramanan dev_err(dev, "probe failed sending driver version %s. error: %d\n", 462034a2a3b8SJeff Kirsher UTS_RELEASE, err); 462178116e97SMarcin Szycik goto err_send_version_unroll; 4622e3710a01SPaul M Stillwell Jr } 4623e3710a01SPaul M Stillwell Jr 46249daf8208SAnirudh Venkataramanan /* since everything is good, start the service timer */ 46259daf8208SAnirudh Venkataramanan mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 46269daf8208SAnirudh Venkataramanan 4627250c3b3eSBrett Creeley err = ice_init_link_events(pf->hw.port_info); 4628250c3b3eSBrett Creeley if (err) { 4629250c3b3eSBrett Creeley dev_err(dev, "ice_init_link_events failed: %d\n", err); 463078116e97SMarcin Szycik goto err_send_version_unroll; 4631250c3b3eSBrett Creeley } 4632250c3b3eSBrett Creeley 463308771bceSAnirudh Venkataramanan /* not a fatal error if this fails */ 46341a3571b5SPaul Greenwalt err = ice_init_nvm_phy_type(pf->hw.port_info); 463508771bceSAnirudh Venkataramanan if (err) 46361a3571b5SPaul Greenwalt dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 46371a3571b5SPaul Greenwalt 463808771bceSAnirudh Venkataramanan /* not a fatal error if this fails */ 46391a3571b5SPaul Greenwalt err = ice_update_link_info(pf->hw.port_info); 464008771bceSAnirudh Venkataramanan if (err) 46411a3571b5SPaul Greenwalt dev_err(dev, "ice_update_link_info failed: %d\n", err); 46421a3571b5SPaul Greenwalt 4643ea78ce4dSPaul Greenwalt ice_init_link_dflt_override(pf->hw.port_info); 4644ea78ce4dSPaul Greenwalt 464599d40752SBrett Creeley ice_check_link_cfg_err(pf, 464699d40752SBrett Creeley pf->hw.port_info->phy.link_info.link_cfg_err); 4647c77849f5SAnirudh Venkataramanan 46481a3571b5SPaul Greenwalt /* if media available, initialize PHY settings */ 46491a3571b5SPaul Greenwalt if (pf->hw.port_info->phy.link_info.link_info & 46501a3571b5SPaul Greenwalt ICE_AQ_MEDIA_AVAILABLE) { 465108771bceSAnirudh Venkataramanan /* not a fatal error if this fails */ 46521a3571b5SPaul Greenwalt err = ice_init_phy_user_cfg(pf->hw.port_info); 465308771bceSAnirudh Venkataramanan if (err) 46541a3571b5SPaul Greenwalt dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 46551a3571b5SPaul Greenwalt 46561a3571b5SPaul Greenwalt if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 46571a3571b5SPaul Greenwalt struct ice_vsi *vsi = ice_get_main_vsi(pf); 46581a3571b5SPaul Greenwalt 46591a3571b5SPaul Greenwalt if (vsi) 46601a3571b5SPaul Greenwalt ice_configure_phy(vsi); 46611a3571b5SPaul Greenwalt } 46621a3571b5SPaul Greenwalt } else { 46631a3571b5SPaul Greenwalt set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 46641a3571b5SPaul Greenwalt } 46651a3571b5SPaul Greenwalt 4666c585ea42SBrett Creeley ice_verify_cacheline_size(pf); 4667c585ea42SBrett Creeley 4668769c500dSAkeem G Abodunrin /* Save wakeup reason register for later use */ 4669769c500dSAkeem G Abodunrin pf->wakeup_reason = rd32(hw, PFPM_WUS); 4670769c500dSAkeem G Abodunrin 4671769c500dSAkeem G Abodunrin /* check for a power management event */ 4672769c500dSAkeem G Abodunrin ice_print_wake_reason(pf); 4673769c500dSAkeem G Abodunrin 4674769c500dSAkeem G Abodunrin /* clear wake status, all bits */ 4675769c500dSAkeem G Abodunrin wr32(hw, PFPM_WUS, U32_MAX); 4676769c500dSAkeem G Abodunrin 4677769c500dSAkeem G Abodunrin /* Disable WoL at init, wait for user to enable */ 4678769c500dSAkeem G Abodunrin device_set_wakeup_enable(dev, false); 4679769c500dSAkeem G Abodunrin 4680cd1f56f4SBrett Creeley if (ice_is_safe_mode(pf)) { 4681cd1f56f4SBrett Creeley ice_set_safe_mode_vlan_cfg(pf); 4682de75135bSAnirudh Venkataramanan goto probe_done; 4683cd1f56f4SBrett Creeley } 4684462acf6aSTony Nguyen 4685462acf6aSTony Nguyen /* initialize DDP driven features */ 468606c16d89SJacob Keller if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 468706c16d89SJacob Keller ice_ptp_init(pf); 4688462acf6aSTony Nguyen 4689148beb61SHenry Tieman /* Note: Flow director init failure is non-fatal to load */ 4690148beb61SHenry Tieman if (ice_init_fdir(pf)) 4691148beb61SHenry Tieman dev_err(dev, "could not initialize flow director\n"); 4692148beb61SHenry Tieman 4693462acf6aSTony Nguyen /* Note: DCB init failure is non-fatal to load */ 4694462acf6aSTony Nguyen if (ice_init_pf_dcb(pf, false)) { 4695462acf6aSTony Nguyen clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4696462acf6aSTony Nguyen clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4697462acf6aSTony Nguyen } else { 4698462acf6aSTony Nguyen ice_cfg_lldp_mib_change(&pf->hw, true); 4699462acf6aSTony Nguyen } 4700462acf6aSTony Nguyen 4701df006dd4SDave Ertman if (ice_init_lag(pf)) 4702df006dd4SDave Ertman dev_warn(dev, "Failed to init link aggregation support\n"); 4703df006dd4SDave Ertman 4704e18ff118SPaul Greenwalt /* print PCI link speed and width */ 4705e18ff118SPaul Greenwalt pcie_print_link_status(pf->pdev); 4706e18ff118SPaul Greenwalt 4707de75135bSAnirudh Venkataramanan probe_done: 47081e23f076SAnirudh Venkataramanan err = ice_register_netdev(pf); 47091e23f076SAnirudh Venkataramanan if (err) 47101e23f076SAnirudh Venkataramanan goto err_netdev_reg; 47111e23f076SAnirudh Venkataramanan 4712e523af4eSShiraz Saleem err = ice_devlink_register_params(pf); 4713e523af4eSShiraz Saleem if (err) 4714e523af4eSShiraz Saleem goto err_netdev_reg; 4715e523af4eSShiraz Saleem 4716de75135bSAnirudh Venkataramanan /* ready to go, so clear down state bit */ 47177e408e07SAnirudh Venkataramanan clear_bit(ICE_DOWN, pf->state); 4718d25a0fc4SDave Ertman if (ice_is_aux_ena(pf)) { 4719d25a0fc4SDave Ertman pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); 4720d25a0fc4SDave Ertman if (pf->aux_idx < 0) { 4721d25a0fc4SDave Ertman dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 4722d25a0fc4SDave Ertman err = -ENOMEM; 4723e523af4eSShiraz Saleem goto err_devlink_reg_param; 4724d25a0fc4SDave Ertman } 4725d25a0fc4SDave Ertman 4726d25a0fc4SDave Ertman err = ice_init_rdma(pf); 4727d25a0fc4SDave Ertman if (err) { 4728d25a0fc4SDave Ertman dev_err(dev, "Failed to initialize RDMA: %d\n", err); 4729d25a0fc4SDave Ertman err = -EIO; 4730d25a0fc4SDave Ertman goto err_init_aux_unroll; 4731d25a0fc4SDave Ertman } 4732d25a0fc4SDave Ertman } else { 4733d25a0fc4SDave Ertman dev_warn(dev, "RDMA is not supported on this device\n"); 4734d25a0fc4SDave Ertman } 4735d25a0fc4SDave Ertman 4736838cefd5SLeon Romanovsky ice_devlink_register(pf); 4737837f08fdSAnirudh Venkataramanan return 0; 4738f31e4b6fSAnirudh Venkataramanan 4739d25a0fc4SDave Ertman err_init_aux_unroll: 4740d25a0fc4SDave Ertman pf->adev = NULL; 4741d25a0fc4SDave Ertman ida_free(&ice_aux_ida, pf->aux_idx); 4742e523af4eSShiraz Saleem err_devlink_reg_param: 4743e523af4eSShiraz Saleem ice_devlink_unregister_params(pf); 47441e23f076SAnirudh Venkataramanan err_netdev_reg: 474578116e97SMarcin Szycik err_send_version_unroll: 474678116e97SMarcin Szycik ice_vsi_release_all(pf); 47473a858ba3SAnirudh Venkataramanan err_alloc_sw_unroll: 47487e408e07SAnirudh Venkataramanan set_bit(ICE_SERVICE_DIS, pf->state); 47497e408e07SAnirudh Venkataramanan set_bit(ICE_DOWN, pf->state); 47504015d11eSBrett Creeley devm_kfree(dev, pf->first_sw); 4751940b61afSAnirudh Venkataramanan err_msix_misc_unroll: 4752940b61afSAnirudh Venkataramanan ice_free_irq_msix_misc(pf); 4753940b61afSAnirudh Venkataramanan err_init_interrupt_unroll: 4754940b61afSAnirudh Venkataramanan ice_clear_interrupt_scheme(pf); 4755bc3a0241SJacob Keller err_init_vsi_unroll: 475677ed84f4SBruce Allan devm_kfree(dev, pf->vsi); 4757940b61afSAnirudh Venkataramanan err_init_pf_unroll: 4758940b61afSAnirudh Venkataramanan ice_deinit_pf(pf); 4759dce730f1SJacob Keller ice_devlink_destroy_regions(pf); 4760940b61afSAnirudh Venkataramanan ice_deinit_hw(hw); 4761f31e4b6fSAnirudh Venkataramanan err_exit_unroll: 4762f31e4b6fSAnirudh Venkataramanan pci_disable_pcie_error_reporting(pdev); 4763769c500dSAkeem G Abodunrin pci_disable_device(pdev); 4764f31e4b6fSAnirudh Venkataramanan return err; 4765837f08fdSAnirudh Venkataramanan } 4766837f08fdSAnirudh Venkataramanan 4767837f08fdSAnirudh Venkataramanan /** 4768769c500dSAkeem G Abodunrin * ice_set_wake - enable or disable Wake on LAN 4769769c500dSAkeem G Abodunrin * @pf: pointer to the PF struct 4770769c500dSAkeem G Abodunrin * 4771769c500dSAkeem G Abodunrin * Simple helper for WoL control 4772769c500dSAkeem G Abodunrin */ 4773769c500dSAkeem G Abodunrin static void ice_set_wake(struct ice_pf *pf) 4774769c500dSAkeem G Abodunrin { 4775769c500dSAkeem G Abodunrin struct ice_hw *hw = &pf->hw; 4776769c500dSAkeem G Abodunrin bool wol = pf->wol_ena; 4777769c500dSAkeem G Abodunrin 4778769c500dSAkeem G Abodunrin /* clear wake state, otherwise new wake events won't fire */ 4779769c500dSAkeem G Abodunrin wr32(hw, PFPM_WUS, U32_MAX); 4780769c500dSAkeem G Abodunrin 4781769c500dSAkeem G Abodunrin /* enable / disable APM wake up, no RMW needed */ 4782769c500dSAkeem G Abodunrin wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 4783769c500dSAkeem G Abodunrin 4784769c500dSAkeem G Abodunrin /* set magic packet filter enabled */ 4785769c500dSAkeem G Abodunrin wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 4786769c500dSAkeem G Abodunrin } 4787769c500dSAkeem G Abodunrin 4788769c500dSAkeem G Abodunrin /** 4789ef860480STony Nguyen * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 4790769c500dSAkeem G Abodunrin * @pf: pointer to the PF struct 4791769c500dSAkeem G Abodunrin * 4792769c500dSAkeem G Abodunrin * Issue firmware command to enable multicast magic wake, making 4793769c500dSAkeem G Abodunrin * sure that any locally administered address (LAA) is used for 4794769c500dSAkeem G Abodunrin * wake, and that PF reset doesn't undo the LAA. 4795769c500dSAkeem G Abodunrin */ 4796769c500dSAkeem G Abodunrin static void ice_setup_mc_magic_wake(struct ice_pf *pf) 4797769c500dSAkeem G Abodunrin { 4798769c500dSAkeem G Abodunrin struct device *dev = ice_pf_to_dev(pf); 4799769c500dSAkeem G Abodunrin struct ice_hw *hw = &pf->hw; 4800769c500dSAkeem G Abodunrin u8 mac_addr[ETH_ALEN]; 4801769c500dSAkeem G Abodunrin struct ice_vsi *vsi; 48025518ac2aSTony Nguyen int status; 4803769c500dSAkeem G Abodunrin u8 flags; 4804769c500dSAkeem G Abodunrin 4805769c500dSAkeem G Abodunrin if (!pf->wol_ena) 4806769c500dSAkeem G Abodunrin return; 4807769c500dSAkeem G Abodunrin 4808769c500dSAkeem G Abodunrin vsi = ice_get_main_vsi(pf); 4809769c500dSAkeem G Abodunrin if (!vsi) 4810769c500dSAkeem G Abodunrin return; 4811769c500dSAkeem G Abodunrin 4812769c500dSAkeem G Abodunrin /* Get current MAC address in case it's an LAA */ 4813769c500dSAkeem G Abodunrin if (vsi->netdev) 4814769c500dSAkeem G Abodunrin ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 4815769c500dSAkeem G Abodunrin else 4816769c500dSAkeem G Abodunrin ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4817769c500dSAkeem G Abodunrin 4818769c500dSAkeem G Abodunrin flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 4819769c500dSAkeem G Abodunrin ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 4820769c500dSAkeem G Abodunrin ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 4821769c500dSAkeem G Abodunrin 4822769c500dSAkeem G Abodunrin status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 4823769c500dSAkeem G Abodunrin if (status) 48245f87ec48STony Nguyen dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 48255518ac2aSTony Nguyen status, ice_aq_str(hw->adminq.sq_last_status)); 4826769c500dSAkeem G Abodunrin } 4827769c500dSAkeem G Abodunrin 4828769c500dSAkeem G Abodunrin /** 4829837f08fdSAnirudh Venkataramanan * ice_remove - Device removal routine 4830837f08fdSAnirudh Venkataramanan * @pdev: PCI device information struct 4831837f08fdSAnirudh Venkataramanan */ 4832837f08fdSAnirudh Venkataramanan static void ice_remove(struct pci_dev *pdev) 4833837f08fdSAnirudh Venkataramanan { 4834837f08fdSAnirudh Venkataramanan struct ice_pf *pf = pci_get_drvdata(pdev); 483581b23589SDave Ertman int i; 4836837f08fdSAnirudh Venkataramanan 4837838cefd5SLeon Romanovsky ice_devlink_unregister(pf); 4838afd9d4abSAnirudh Venkataramanan for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 4839afd9d4abSAnirudh Venkataramanan if (!ice_is_reset_in_progress(pf->state)) 4840afd9d4abSAnirudh Venkataramanan break; 4841afd9d4abSAnirudh Venkataramanan msleep(100); 4842afd9d4abSAnirudh Venkataramanan } 4843afd9d4abSAnirudh Venkataramanan 4844195bb48fSMichal Swiatkowski ice_tc_indir_block_remove(pf); 4845195bb48fSMichal Swiatkowski 4846f844d521SBrett Creeley if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 48477e408e07SAnirudh Venkataramanan set_bit(ICE_VF_RESETS_DISABLED, pf->state); 4848f844d521SBrett Creeley ice_free_vfs(pf); 4849f844d521SBrett Creeley } 4850f844d521SBrett Creeley 48518d81fa55SAkeem G Abodunrin ice_service_task_stop(pf); 4852f31e4b6fSAnirudh Venkataramanan 4853d69ea414SJacob Keller ice_aq_cancel_waiting_tasks(pf); 4854f9f5301eSDave Ertman ice_unplug_aux_dev(pf); 485573e30a62SDave Ertman if (pf->aux_idx >= 0) 4856d25a0fc4SDave Ertman ida_free(&ice_aux_ida, pf->aux_idx); 4857e523af4eSShiraz Saleem ice_devlink_unregister_params(pf); 4858f9f5301eSDave Ertman set_bit(ICE_DOWN, pf->state); 4859d69ea414SJacob Keller 4860148beb61SHenry Tieman mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4861df006dd4SDave Ertman ice_deinit_lag(pf); 486206c16d89SJacob Keller if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 486306c16d89SJacob Keller ice_ptp_release(pf); 486428bf2672SBrett Creeley if (!ice_is_safe_mode(pf)) 486528bf2672SBrett Creeley ice_remove_arfs(pf); 4866769c500dSAkeem G Abodunrin ice_setup_mc_magic_wake(pf); 48670f9d5027SAnirudh Venkataramanan ice_vsi_release_all(pf); 4868769c500dSAkeem G Abodunrin ice_set_wake(pf); 4869940b61afSAnirudh Venkataramanan ice_free_irq_msix_misc(pf); 487081b23589SDave Ertman ice_for_each_vsi(pf, i) { 487181b23589SDave Ertman if (!pf->vsi[i]) 487281b23589SDave Ertman continue; 487381b23589SDave Ertman ice_vsi_free_q_vectors(pf->vsi[i]); 487481b23589SDave Ertman } 4875940b61afSAnirudh Venkataramanan ice_deinit_pf(pf); 4876dce730f1SJacob Keller ice_devlink_destroy_regions(pf); 4877f31e4b6fSAnirudh Venkataramanan ice_deinit_hw(&pf->hw); 48781adf7eadSJacob Keller 487918057cb3SBruce Allan /* Issue a PFR as part of the prescribed driver unload flow. Do not 488018057cb3SBruce Allan * do it via ice_schedule_reset() since there is no need to rebuild 488118057cb3SBruce Allan * and the service task is already stopped. 488218057cb3SBruce Allan */ 488318057cb3SBruce Allan ice_reset(&pf->hw, ICE_RESET_PFR); 4884c6012ac1SBruce Allan pci_wait_for_pending_transaction(pdev); 4885c6012ac1SBruce Allan ice_clear_interrupt_scheme(pf); 4886837f08fdSAnirudh Venkataramanan pci_disable_pcie_error_reporting(pdev); 4887769c500dSAkeem G Abodunrin pci_disable_device(pdev); 4888837f08fdSAnirudh Venkataramanan } 4889837f08fdSAnirudh Venkataramanan 48905995b6d0SBrett Creeley /** 4891769c500dSAkeem G Abodunrin * ice_shutdown - PCI callback for shutting down device 4892769c500dSAkeem G Abodunrin * @pdev: PCI device information struct 4893769c500dSAkeem G Abodunrin */ 4894769c500dSAkeem G Abodunrin static void ice_shutdown(struct pci_dev *pdev) 4895769c500dSAkeem G Abodunrin { 4896769c500dSAkeem G Abodunrin struct ice_pf *pf = pci_get_drvdata(pdev); 4897769c500dSAkeem G Abodunrin 4898769c500dSAkeem G Abodunrin ice_remove(pdev); 4899769c500dSAkeem G Abodunrin 4900769c500dSAkeem G Abodunrin if (system_state == SYSTEM_POWER_OFF) { 4901769c500dSAkeem G Abodunrin pci_wake_from_d3(pdev, pf->wol_ena); 4902769c500dSAkeem G Abodunrin pci_set_power_state(pdev, PCI_D3hot); 4903769c500dSAkeem G Abodunrin } 4904769c500dSAkeem G Abodunrin } 4905769c500dSAkeem G Abodunrin 4906769c500dSAkeem G Abodunrin #ifdef CONFIG_PM 4907769c500dSAkeem G Abodunrin /** 4908769c500dSAkeem G Abodunrin * ice_prepare_for_shutdown - prep for PCI shutdown 4909769c500dSAkeem G Abodunrin * @pf: board private structure 4910769c500dSAkeem G Abodunrin * 4911769c500dSAkeem G Abodunrin * Inform or close all dependent features in prep for PCI device shutdown 4912769c500dSAkeem G Abodunrin */ 4913769c500dSAkeem G Abodunrin static void ice_prepare_for_shutdown(struct ice_pf *pf) 4914769c500dSAkeem G Abodunrin { 4915769c500dSAkeem G Abodunrin struct ice_hw *hw = &pf->hw; 4916769c500dSAkeem G Abodunrin u32 v; 4917769c500dSAkeem G Abodunrin 4918769c500dSAkeem G Abodunrin /* Notify VFs of impending reset */ 4919769c500dSAkeem G Abodunrin if (ice_check_sq_alive(hw, &hw->mailboxq)) 4920769c500dSAkeem G Abodunrin ice_vc_notify_reset(pf); 4921769c500dSAkeem G Abodunrin 4922769c500dSAkeem G Abodunrin dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 4923769c500dSAkeem G Abodunrin 4924769c500dSAkeem G Abodunrin /* disable the VSIs and their queues that are not already DOWN */ 4925769c500dSAkeem G Abodunrin ice_pf_dis_all_vsi(pf, false); 4926769c500dSAkeem G Abodunrin 4927769c500dSAkeem G Abodunrin ice_for_each_vsi(pf, v) 4928769c500dSAkeem G Abodunrin if (pf->vsi[v]) 4929769c500dSAkeem G Abodunrin pf->vsi[v]->vsi_num = 0; 4930769c500dSAkeem G Abodunrin 4931769c500dSAkeem G Abodunrin ice_shutdown_all_ctrlq(hw); 4932769c500dSAkeem G Abodunrin } 4933769c500dSAkeem G Abodunrin 4934769c500dSAkeem G Abodunrin /** 4935769c500dSAkeem G Abodunrin * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 4936769c500dSAkeem G Abodunrin * @pf: board private structure to reinitialize 4937769c500dSAkeem G Abodunrin * 4938769c500dSAkeem G Abodunrin * This routine reinitialize interrupt scheme that was cleared during 4939769c500dSAkeem G Abodunrin * power management suspend callback. 4940769c500dSAkeem G Abodunrin * 4941769c500dSAkeem G Abodunrin * This should be called during resume routine to re-allocate the q_vectors 4942769c500dSAkeem G Abodunrin * and reacquire interrupts. 4943769c500dSAkeem G Abodunrin */ 4944769c500dSAkeem G Abodunrin static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 4945769c500dSAkeem G Abodunrin { 4946769c500dSAkeem G Abodunrin struct device *dev = ice_pf_to_dev(pf); 4947769c500dSAkeem G Abodunrin int ret, v; 4948769c500dSAkeem G Abodunrin 4949769c500dSAkeem G Abodunrin /* Since we clear MSIX flag during suspend, we need to 4950769c500dSAkeem G Abodunrin * set it back during resume... 4951769c500dSAkeem G Abodunrin */ 4952769c500dSAkeem G Abodunrin 4953769c500dSAkeem G Abodunrin ret = ice_init_interrupt_scheme(pf); 4954769c500dSAkeem G Abodunrin if (ret) { 4955769c500dSAkeem G Abodunrin dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 4956769c500dSAkeem G Abodunrin return ret; 4957769c500dSAkeem G Abodunrin } 4958769c500dSAkeem G Abodunrin 4959769c500dSAkeem G Abodunrin /* Remap vectors and rings, after successful re-init interrupts */ 4960769c500dSAkeem G Abodunrin ice_for_each_vsi(pf, v) { 4961769c500dSAkeem G Abodunrin if (!pf->vsi[v]) 4962769c500dSAkeem G Abodunrin continue; 4963769c500dSAkeem G Abodunrin 4964769c500dSAkeem G Abodunrin ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 4965769c500dSAkeem G Abodunrin if (ret) 4966769c500dSAkeem G Abodunrin goto err_reinit; 4967769c500dSAkeem G Abodunrin ice_vsi_map_rings_to_vectors(pf->vsi[v]); 4968769c500dSAkeem G Abodunrin } 4969769c500dSAkeem G Abodunrin 4970769c500dSAkeem G Abodunrin ret = ice_req_irq_msix_misc(pf); 4971769c500dSAkeem G Abodunrin if (ret) { 4972769c500dSAkeem G Abodunrin dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 4973769c500dSAkeem G Abodunrin ret); 4974769c500dSAkeem G Abodunrin goto err_reinit; 4975769c500dSAkeem G Abodunrin } 4976769c500dSAkeem G Abodunrin 4977769c500dSAkeem G Abodunrin return 0; 4978769c500dSAkeem G Abodunrin 4979769c500dSAkeem G Abodunrin err_reinit: 4980769c500dSAkeem G Abodunrin while (v--) 4981769c500dSAkeem G Abodunrin if (pf->vsi[v]) 4982769c500dSAkeem G Abodunrin ice_vsi_free_q_vectors(pf->vsi[v]); 4983769c500dSAkeem G Abodunrin 4984769c500dSAkeem G Abodunrin return ret; 4985769c500dSAkeem G Abodunrin } 4986769c500dSAkeem G Abodunrin 4987769c500dSAkeem G Abodunrin /** 4988769c500dSAkeem G Abodunrin * ice_suspend 4989769c500dSAkeem G Abodunrin * @dev: generic device information structure 4990769c500dSAkeem G Abodunrin * 4991769c500dSAkeem G Abodunrin * Power Management callback to quiesce the device and prepare 4992769c500dSAkeem G Abodunrin * for D3 transition. 4993769c500dSAkeem G Abodunrin */ 499465c72291SWei Yongjun static int __maybe_unused ice_suspend(struct device *dev) 4995769c500dSAkeem G Abodunrin { 4996769c500dSAkeem G Abodunrin struct pci_dev *pdev = to_pci_dev(dev); 4997769c500dSAkeem G Abodunrin struct ice_pf *pf; 4998769c500dSAkeem G Abodunrin int disabled, v; 4999769c500dSAkeem G Abodunrin 5000769c500dSAkeem G Abodunrin pf = pci_get_drvdata(pdev); 5001769c500dSAkeem G Abodunrin 5002769c500dSAkeem G Abodunrin if (!ice_pf_state_is_nominal(pf)) { 5003769c500dSAkeem G Abodunrin dev_err(dev, "Device is not ready, no need to suspend it\n"); 5004769c500dSAkeem G Abodunrin return -EBUSY; 5005769c500dSAkeem G Abodunrin } 5006769c500dSAkeem G Abodunrin 5007769c500dSAkeem G Abodunrin /* Stop watchdog tasks until resume completion. 5008769c500dSAkeem G Abodunrin * Even though it is most likely that the service task is 5009769c500dSAkeem G Abodunrin * disabled if the device is suspended or down, the service task's 5010769c500dSAkeem G Abodunrin * state is controlled by a different state bit, and we should 5011769c500dSAkeem G Abodunrin * store and honor whatever state that bit is in at this point. 5012769c500dSAkeem G Abodunrin */ 5013769c500dSAkeem G Abodunrin disabled = ice_service_task_stop(pf); 5014769c500dSAkeem G Abodunrin 5015f9f5301eSDave Ertman ice_unplug_aux_dev(pf); 5016f9f5301eSDave Ertman 5017769c500dSAkeem G Abodunrin /* Already suspended?, then there is nothing to do */ 50187e408e07SAnirudh Venkataramanan if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5019769c500dSAkeem G Abodunrin if (!disabled) 5020769c500dSAkeem G Abodunrin ice_service_task_restart(pf); 5021769c500dSAkeem G Abodunrin return 0; 5022769c500dSAkeem G Abodunrin } 5023769c500dSAkeem G Abodunrin 50247e408e07SAnirudh Venkataramanan if (test_bit(ICE_DOWN, pf->state) || 5025769c500dSAkeem G Abodunrin ice_is_reset_in_progress(pf->state)) { 5026769c500dSAkeem G Abodunrin dev_err(dev, "can't suspend device in reset or already down\n"); 5027769c500dSAkeem G Abodunrin if (!disabled) 5028769c500dSAkeem G Abodunrin ice_service_task_restart(pf); 5029769c500dSAkeem G Abodunrin return 0; 5030769c500dSAkeem G Abodunrin } 5031769c500dSAkeem G Abodunrin 5032769c500dSAkeem G Abodunrin ice_setup_mc_magic_wake(pf); 5033769c500dSAkeem G Abodunrin 5034769c500dSAkeem G Abodunrin ice_prepare_for_shutdown(pf); 5035769c500dSAkeem G Abodunrin 5036769c500dSAkeem G Abodunrin ice_set_wake(pf); 5037769c500dSAkeem G Abodunrin 5038769c500dSAkeem G Abodunrin /* Free vectors, clear the interrupt scheme and release IRQs 5039769c500dSAkeem G Abodunrin * for proper hibernation, especially with large number of CPUs. 5040769c500dSAkeem G Abodunrin * Otherwise hibernation might fail when mapping all the vectors back 5041769c500dSAkeem G Abodunrin * to CPU0. 5042769c500dSAkeem G Abodunrin */ 5043769c500dSAkeem G Abodunrin ice_free_irq_msix_misc(pf); 5044769c500dSAkeem G Abodunrin ice_for_each_vsi(pf, v) { 5045769c500dSAkeem G Abodunrin if (!pf->vsi[v]) 5046769c500dSAkeem G Abodunrin continue; 5047769c500dSAkeem G Abodunrin ice_vsi_free_q_vectors(pf->vsi[v]); 5048769c500dSAkeem G Abodunrin } 50491831da7eSYongxin Liu ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); 5050769c500dSAkeem G Abodunrin ice_clear_interrupt_scheme(pf); 5051769c500dSAkeem G Abodunrin 5052466e4392SAnirudh Venkataramanan pci_save_state(pdev); 5053769c500dSAkeem G Abodunrin pci_wake_from_d3(pdev, pf->wol_ena); 5054769c500dSAkeem G Abodunrin pci_set_power_state(pdev, PCI_D3hot); 5055769c500dSAkeem G Abodunrin return 0; 5056769c500dSAkeem G Abodunrin } 5057769c500dSAkeem G Abodunrin 5058769c500dSAkeem G Abodunrin /** 5059769c500dSAkeem G Abodunrin * ice_resume - PM callback for waking up from D3 5060769c500dSAkeem G Abodunrin * @dev: generic device information structure 5061769c500dSAkeem G Abodunrin */ 506265c72291SWei Yongjun static int __maybe_unused ice_resume(struct device *dev) 5063769c500dSAkeem G Abodunrin { 5064769c500dSAkeem G Abodunrin struct pci_dev *pdev = to_pci_dev(dev); 5065769c500dSAkeem G Abodunrin enum ice_reset_req reset_type; 5066769c500dSAkeem G Abodunrin struct ice_pf *pf; 5067769c500dSAkeem G Abodunrin struct ice_hw *hw; 5068769c500dSAkeem G Abodunrin int ret; 5069769c500dSAkeem G Abodunrin 5070769c500dSAkeem G Abodunrin pci_set_power_state(pdev, PCI_D0); 5071769c500dSAkeem G Abodunrin pci_restore_state(pdev); 5072769c500dSAkeem G Abodunrin pci_save_state(pdev); 5073769c500dSAkeem G Abodunrin 5074769c500dSAkeem G Abodunrin if (!pci_device_is_present(pdev)) 5075769c500dSAkeem G Abodunrin return -ENODEV; 5076769c500dSAkeem G Abodunrin 5077769c500dSAkeem G Abodunrin ret = pci_enable_device_mem(pdev); 5078769c500dSAkeem G Abodunrin if (ret) { 5079769c500dSAkeem G Abodunrin dev_err(dev, "Cannot enable device after suspend\n"); 5080769c500dSAkeem G Abodunrin return ret; 5081769c500dSAkeem G Abodunrin } 5082769c500dSAkeem G Abodunrin 5083769c500dSAkeem G Abodunrin pf = pci_get_drvdata(pdev); 5084769c500dSAkeem G Abodunrin hw = &pf->hw; 5085769c500dSAkeem G Abodunrin 5086769c500dSAkeem G Abodunrin pf->wakeup_reason = rd32(hw, PFPM_WUS); 5087769c500dSAkeem G Abodunrin ice_print_wake_reason(pf); 5088769c500dSAkeem G Abodunrin 5089769c500dSAkeem G Abodunrin /* We cleared the interrupt scheme when we suspended, so we need to 5090769c500dSAkeem G Abodunrin * restore it now to resume device functionality. 5091769c500dSAkeem G Abodunrin */ 5092769c500dSAkeem G Abodunrin ret = ice_reinit_interrupt_scheme(pf); 5093769c500dSAkeem G Abodunrin if (ret) 5094769c500dSAkeem G Abodunrin dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5095769c500dSAkeem G Abodunrin 50967e408e07SAnirudh Venkataramanan clear_bit(ICE_DOWN, pf->state); 5097769c500dSAkeem G Abodunrin /* Now perform PF reset and rebuild */ 5098769c500dSAkeem G Abodunrin reset_type = ICE_RESET_PFR; 5099769c500dSAkeem G Abodunrin /* re-enable service task for reset, but allow reset to schedule it */ 51007e408e07SAnirudh Venkataramanan clear_bit(ICE_SERVICE_DIS, pf->state); 5101769c500dSAkeem G Abodunrin 5102769c500dSAkeem G Abodunrin if (ice_schedule_reset(pf, reset_type)) 5103769c500dSAkeem G Abodunrin dev_err(dev, "Reset during resume failed.\n"); 5104769c500dSAkeem G Abodunrin 51057e408e07SAnirudh Venkataramanan clear_bit(ICE_SUSPENDED, pf->state); 5106769c500dSAkeem G Abodunrin ice_service_task_restart(pf); 5107769c500dSAkeem G Abodunrin 5108769c500dSAkeem G Abodunrin /* Restart the service task */ 5109769c500dSAkeem G Abodunrin mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5110769c500dSAkeem G Abodunrin 5111769c500dSAkeem G Abodunrin return 0; 5112769c500dSAkeem G Abodunrin } 5113769c500dSAkeem G Abodunrin #endif /* CONFIG_PM */ 5114769c500dSAkeem G Abodunrin 5115769c500dSAkeem G Abodunrin /** 51165995b6d0SBrett Creeley * ice_pci_err_detected - warning that PCI error has been detected 51175995b6d0SBrett Creeley * @pdev: PCI device information struct 51185995b6d0SBrett Creeley * @err: the type of PCI error 51195995b6d0SBrett Creeley * 51205995b6d0SBrett Creeley * Called to warn that something happened on the PCI bus and the error handling 51215995b6d0SBrett Creeley * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 51225995b6d0SBrett Creeley */ 51235995b6d0SBrett Creeley static pci_ers_result_t 512416d79cd4SLuc Van Oostenryck ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 51255995b6d0SBrett Creeley { 51265995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 51275995b6d0SBrett Creeley 51285995b6d0SBrett Creeley if (!pf) { 51295995b6d0SBrett Creeley dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 51305995b6d0SBrett Creeley __func__, err); 51315995b6d0SBrett Creeley return PCI_ERS_RESULT_DISCONNECT; 51325995b6d0SBrett Creeley } 51335995b6d0SBrett Creeley 51347e408e07SAnirudh Venkataramanan if (!test_bit(ICE_SUSPENDED, pf->state)) { 51355995b6d0SBrett Creeley ice_service_task_stop(pf); 51365995b6d0SBrett Creeley 51377e408e07SAnirudh Venkataramanan if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 51387e408e07SAnirudh Venkataramanan set_bit(ICE_PFR_REQ, pf->state); 5139fbc7b27aSKiran Patil ice_prepare_for_reset(pf, ICE_RESET_PFR); 51405995b6d0SBrett Creeley } 51415995b6d0SBrett Creeley } 51425995b6d0SBrett Creeley 51435995b6d0SBrett Creeley return PCI_ERS_RESULT_NEED_RESET; 51445995b6d0SBrett Creeley } 51455995b6d0SBrett Creeley 51465995b6d0SBrett Creeley /** 51475995b6d0SBrett Creeley * ice_pci_err_slot_reset - a PCI slot reset has just happened 51485995b6d0SBrett Creeley * @pdev: PCI device information struct 51495995b6d0SBrett Creeley * 51505995b6d0SBrett Creeley * Called to determine if the driver can recover from the PCI slot reset by 51515995b6d0SBrett Creeley * using a register read to determine if the device is recoverable. 51525995b6d0SBrett Creeley */ 51535995b6d0SBrett Creeley static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 51545995b6d0SBrett Creeley { 51555995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 51565995b6d0SBrett Creeley pci_ers_result_t result; 51575995b6d0SBrett Creeley int err; 51585995b6d0SBrett Creeley u32 reg; 51595995b6d0SBrett Creeley 51605995b6d0SBrett Creeley err = pci_enable_device_mem(pdev); 51615995b6d0SBrett Creeley if (err) { 516219cce2c6SAnirudh Venkataramanan dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 51635995b6d0SBrett Creeley err); 51645995b6d0SBrett Creeley result = PCI_ERS_RESULT_DISCONNECT; 51655995b6d0SBrett Creeley } else { 51665995b6d0SBrett Creeley pci_set_master(pdev); 51675995b6d0SBrett Creeley pci_restore_state(pdev); 51685995b6d0SBrett Creeley pci_save_state(pdev); 51695995b6d0SBrett Creeley pci_wake_from_d3(pdev, false); 51705995b6d0SBrett Creeley 51715995b6d0SBrett Creeley /* Check for life */ 51725995b6d0SBrett Creeley reg = rd32(&pf->hw, GLGEN_RTRIG); 51735995b6d0SBrett Creeley if (!reg) 51745995b6d0SBrett Creeley result = PCI_ERS_RESULT_RECOVERED; 51755995b6d0SBrett Creeley else 51765995b6d0SBrett Creeley result = PCI_ERS_RESULT_DISCONNECT; 51775995b6d0SBrett Creeley } 51785995b6d0SBrett Creeley 5179894020fdSKuppuswamy Sathyanarayanan err = pci_aer_clear_nonfatal_status(pdev); 51805995b6d0SBrett Creeley if (err) 518186f26a77SLinus Torvalds dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", 51825995b6d0SBrett Creeley err); 51835995b6d0SBrett Creeley /* non-fatal, continue */ 51845995b6d0SBrett Creeley 51855995b6d0SBrett Creeley return result; 51865995b6d0SBrett Creeley } 51875995b6d0SBrett Creeley 51885995b6d0SBrett Creeley /** 51895995b6d0SBrett Creeley * ice_pci_err_resume - restart operations after PCI error recovery 51905995b6d0SBrett Creeley * @pdev: PCI device information struct 51915995b6d0SBrett Creeley * 51925995b6d0SBrett Creeley * Called to allow the driver to bring things back up after PCI error and/or 51935995b6d0SBrett Creeley * reset recovery have finished 51945995b6d0SBrett Creeley */ 51955995b6d0SBrett Creeley static void ice_pci_err_resume(struct pci_dev *pdev) 51965995b6d0SBrett Creeley { 51975995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 51985995b6d0SBrett Creeley 51995995b6d0SBrett Creeley if (!pf) { 520019cce2c6SAnirudh Venkataramanan dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 520119cce2c6SAnirudh Venkataramanan __func__); 52025995b6d0SBrett Creeley return; 52035995b6d0SBrett Creeley } 52045995b6d0SBrett Creeley 52057e408e07SAnirudh Venkataramanan if (test_bit(ICE_SUSPENDED, pf->state)) { 52065995b6d0SBrett Creeley dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 52075995b6d0SBrett Creeley __func__); 52085995b6d0SBrett Creeley return; 52095995b6d0SBrett Creeley } 52105995b6d0SBrett Creeley 5211a54a0b24SNick Nunley ice_restore_all_vfs_msi_state(pdev); 5212a54a0b24SNick Nunley 52135995b6d0SBrett Creeley ice_do_reset(pf, ICE_RESET_PFR); 52145995b6d0SBrett Creeley ice_service_task_restart(pf); 52155995b6d0SBrett Creeley mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 52165995b6d0SBrett Creeley } 52175995b6d0SBrett Creeley 52185995b6d0SBrett Creeley /** 52195995b6d0SBrett Creeley * ice_pci_err_reset_prepare - prepare device driver for PCI reset 52205995b6d0SBrett Creeley * @pdev: PCI device information struct 52215995b6d0SBrett Creeley */ 52225995b6d0SBrett Creeley static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 52235995b6d0SBrett Creeley { 52245995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 52255995b6d0SBrett Creeley 52267e408e07SAnirudh Venkataramanan if (!test_bit(ICE_SUSPENDED, pf->state)) { 52275995b6d0SBrett Creeley ice_service_task_stop(pf); 52285995b6d0SBrett Creeley 52297e408e07SAnirudh Venkataramanan if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 52307e408e07SAnirudh Venkataramanan set_bit(ICE_PFR_REQ, pf->state); 5231fbc7b27aSKiran Patil ice_prepare_for_reset(pf, ICE_RESET_PFR); 52325995b6d0SBrett Creeley } 52335995b6d0SBrett Creeley } 52345995b6d0SBrett Creeley } 52355995b6d0SBrett Creeley 52365995b6d0SBrett Creeley /** 52375995b6d0SBrett Creeley * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 52385995b6d0SBrett Creeley * @pdev: PCI device information struct 52395995b6d0SBrett Creeley */ 52405995b6d0SBrett Creeley static void ice_pci_err_reset_done(struct pci_dev *pdev) 52415995b6d0SBrett Creeley { 52425995b6d0SBrett Creeley ice_pci_err_resume(pdev); 52435995b6d0SBrett Creeley } 52445995b6d0SBrett Creeley 5245837f08fdSAnirudh Venkataramanan /* ice_pci_tbl - PCI Device ID Table 5246837f08fdSAnirudh Venkataramanan * 5247837f08fdSAnirudh Venkataramanan * Wildcard entries (PCI_ANY_ID) should come last 5248837f08fdSAnirudh Venkataramanan * Last entry must be all 0s 5249837f08fdSAnirudh Venkataramanan * 5250837f08fdSAnirudh Venkataramanan * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5251837f08fdSAnirudh Venkataramanan * Class, Class Mask, private data (not used) } 5252837f08fdSAnirudh Venkataramanan */ 5253837f08fdSAnirudh Venkataramanan static const struct pci_device_id ice_pci_tbl[] = { 5254633d7449SAnirudh Venkataramanan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 5255633d7449SAnirudh Venkataramanan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 5256633d7449SAnirudh Venkataramanan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 52577dcf78b8STony Nguyen { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, 52587dcf78b8STony Nguyen { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, 5259195fb977SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, 5260e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, 5261e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, 5262e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, 5263e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, 5264e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, 52655d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 52665d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 52675d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 52685d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 52695d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 52702fbfa966SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, 52715d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 52725d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 52735d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 5274e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, 5275e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, 5276e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, 5277e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, 5278e36aeec0SBruce Allan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, 5279837f08fdSAnirudh Venkataramanan /* required last entry */ 5280837f08fdSAnirudh Venkataramanan { 0, } 5281837f08fdSAnirudh Venkataramanan }; 5282837f08fdSAnirudh Venkataramanan MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5283837f08fdSAnirudh Venkataramanan 5284769c500dSAkeem G Abodunrin static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5285769c500dSAkeem G Abodunrin 52865995b6d0SBrett Creeley static const struct pci_error_handlers ice_pci_err_handler = { 52875995b6d0SBrett Creeley .error_detected = ice_pci_err_detected, 52885995b6d0SBrett Creeley .slot_reset = ice_pci_err_slot_reset, 52895995b6d0SBrett Creeley .reset_prepare = ice_pci_err_reset_prepare, 52905995b6d0SBrett Creeley .reset_done = ice_pci_err_reset_done, 52915995b6d0SBrett Creeley .resume = ice_pci_err_resume 52925995b6d0SBrett Creeley }; 52935995b6d0SBrett Creeley 5294837f08fdSAnirudh Venkataramanan static struct pci_driver ice_driver = { 5295837f08fdSAnirudh Venkataramanan .name = KBUILD_MODNAME, 5296837f08fdSAnirudh Venkataramanan .id_table = ice_pci_tbl, 5297837f08fdSAnirudh Venkataramanan .probe = ice_probe, 5298837f08fdSAnirudh Venkataramanan .remove = ice_remove, 5299769c500dSAkeem G Abodunrin #ifdef CONFIG_PM 5300769c500dSAkeem G Abodunrin .driver.pm = &ice_pm_ops, 5301769c500dSAkeem G Abodunrin #endif /* CONFIG_PM */ 5302769c500dSAkeem G Abodunrin .shutdown = ice_shutdown, 5303ddf30f7fSAnirudh Venkataramanan .sriov_configure = ice_sriov_configure, 53045995b6d0SBrett Creeley .err_handler = &ice_pci_err_handler 5305837f08fdSAnirudh Venkataramanan }; 5306837f08fdSAnirudh Venkataramanan 5307837f08fdSAnirudh Venkataramanan /** 5308837f08fdSAnirudh Venkataramanan * ice_module_init - Driver registration routine 5309837f08fdSAnirudh Venkataramanan * 5310837f08fdSAnirudh Venkataramanan * ice_module_init is the first routine called when the driver is 5311837f08fdSAnirudh Venkataramanan * loaded. All it does is register with the PCI subsystem. 5312837f08fdSAnirudh Venkataramanan */ 5313837f08fdSAnirudh Venkataramanan static int __init ice_module_init(void) 5314837f08fdSAnirudh Venkataramanan { 5315837f08fdSAnirudh Venkataramanan int status; 5316837f08fdSAnirudh Venkataramanan 531734a2a3b8SJeff Kirsher pr_info("%s\n", ice_driver_string); 5318837f08fdSAnirudh Venkataramanan pr_info("%s\n", ice_copyright); 5319837f08fdSAnirudh Venkataramanan 53200f9d5027SAnirudh Venkataramanan ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 5321940b61afSAnirudh Venkataramanan if (!ice_wq) { 5322940b61afSAnirudh Venkataramanan pr_err("Failed to create workqueue\n"); 5323940b61afSAnirudh Venkataramanan return -ENOMEM; 5324940b61afSAnirudh Venkataramanan } 5325940b61afSAnirudh Venkataramanan 5326837f08fdSAnirudh Venkataramanan status = pci_register_driver(&ice_driver); 5327940b61afSAnirudh Venkataramanan if (status) { 53282f2da36eSAnirudh Venkataramanan pr_err("failed to register PCI driver, err %d\n", status); 5329940b61afSAnirudh Venkataramanan destroy_workqueue(ice_wq); 5330940b61afSAnirudh Venkataramanan } 5331837f08fdSAnirudh Venkataramanan 5332837f08fdSAnirudh Venkataramanan return status; 5333837f08fdSAnirudh Venkataramanan } 5334837f08fdSAnirudh Venkataramanan module_init(ice_module_init); 5335837f08fdSAnirudh Venkataramanan 5336837f08fdSAnirudh Venkataramanan /** 5337837f08fdSAnirudh Venkataramanan * ice_module_exit - Driver exit cleanup routine 5338837f08fdSAnirudh Venkataramanan * 5339837f08fdSAnirudh Venkataramanan * ice_module_exit is called just before the driver is removed 5340837f08fdSAnirudh Venkataramanan * from memory. 5341837f08fdSAnirudh Venkataramanan */ 5342837f08fdSAnirudh Venkataramanan static void __exit ice_module_exit(void) 5343837f08fdSAnirudh Venkataramanan { 5344837f08fdSAnirudh Venkataramanan pci_unregister_driver(&ice_driver); 5345940b61afSAnirudh Venkataramanan destroy_workqueue(ice_wq); 5346837f08fdSAnirudh Venkataramanan pr_info("module unloaded\n"); 5347837f08fdSAnirudh Venkataramanan } 5348837f08fdSAnirudh Venkataramanan module_exit(ice_module_exit); 53493a858ba3SAnirudh Venkataramanan 53503a858ba3SAnirudh Venkataramanan /** 5351f9867df6SAnirudh Venkataramanan * ice_set_mac_address - NDO callback to set MAC address 5352e94d4478SAnirudh Venkataramanan * @netdev: network interface device structure 5353e94d4478SAnirudh Venkataramanan * @pi: pointer to an address structure 5354e94d4478SAnirudh Venkataramanan * 5355e94d4478SAnirudh Venkataramanan * Returns 0 on success, negative on failure 5356e94d4478SAnirudh Venkataramanan */ 5357e94d4478SAnirudh Venkataramanan static int ice_set_mac_address(struct net_device *netdev, void *pi) 5358e94d4478SAnirudh Venkataramanan { 5359e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 5360e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 5361e94d4478SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 5362e94d4478SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 5363e94d4478SAnirudh Venkataramanan struct sockaddr *addr = pi; 5364b357d971SBrett Creeley u8 old_mac[ETH_ALEN]; 5365e94d4478SAnirudh Venkataramanan u8 flags = 0; 5366e94d4478SAnirudh Venkataramanan u8 *mac; 53672ccc1c1cSTony Nguyen int err; 5368e94d4478SAnirudh Venkataramanan 5369e94d4478SAnirudh Venkataramanan mac = (u8 *)addr->sa_data; 5370e94d4478SAnirudh Venkataramanan 5371e94d4478SAnirudh Venkataramanan if (!is_valid_ether_addr(mac)) 5372e94d4478SAnirudh Venkataramanan return -EADDRNOTAVAIL; 5373e94d4478SAnirudh Venkataramanan 5374e94d4478SAnirudh Venkataramanan if (ether_addr_equal(netdev->dev_addr, mac)) { 53753ba7f53fSBrett Creeley netdev_dbg(netdev, "already using mac %pM\n", mac); 5376e94d4478SAnirudh Venkataramanan return 0; 5377e94d4478SAnirudh Venkataramanan } 5378e94d4478SAnirudh Venkataramanan 53797e408e07SAnirudh Venkataramanan if (test_bit(ICE_DOWN, pf->state) || 53805df7e45dSDave Ertman ice_is_reset_in_progress(pf->state)) { 5381e94d4478SAnirudh Venkataramanan netdev_err(netdev, "can't set mac %pM. device not ready\n", 5382e94d4478SAnirudh Venkataramanan mac); 5383e94d4478SAnirudh Venkataramanan return -EBUSY; 5384e94d4478SAnirudh Venkataramanan } 5385e94d4478SAnirudh Venkataramanan 53869fea7498SKiran Patil if (ice_chnl_dmac_fltr_cnt(pf)) { 53879fea7498SKiran Patil netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 53889fea7498SKiran Patil mac); 53899fea7498SKiran Patil return -EAGAIN; 53909fea7498SKiran Patil } 53919fea7498SKiran Patil 53923ba7f53fSBrett Creeley netif_addr_lock_bh(netdev); 5393b357d971SBrett Creeley ether_addr_copy(old_mac, netdev->dev_addr); 5394b357d971SBrett Creeley /* change the netdev's MAC address */ 5395a05e4c0aSJakub Kicinski eth_hw_addr_set(netdev, mac); 5396b357d971SBrett Creeley netif_addr_unlock_bh(netdev); 5397b357d971SBrett Creeley 5398757976abSLihong Yang /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 53992ccc1c1cSTony Nguyen err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 54002ccc1c1cSTony Nguyen if (err && err != -ENOENT) { 5401e94d4478SAnirudh Venkataramanan err = -EADDRNOTAVAIL; 5402bbb968e8SAkeem G Abodunrin goto err_update_filters; 5403e94d4478SAnirudh Venkataramanan } 5404e94d4478SAnirudh Venkataramanan 540513ed5e8aSNick Nunley /* Add filter for new MAC. If filter exists, return success */ 54062ccc1c1cSTony Nguyen err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 54072ccc1c1cSTony Nguyen if (err == -EEXIST) 540813ed5e8aSNick Nunley /* Although this MAC filter is already present in hardware it's 540913ed5e8aSNick Nunley * possible in some cases (e.g. bonding) that dev_addr was 541013ed5e8aSNick Nunley * modified outside of the driver and needs to be restored back 541113ed5e8aSNick Nunley * to this value. 541213ed5e8aSNick Nunley */ 5413757976abSLihong Yang netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 54142ccc1c1cSTony Nguyen else if (err) 5415757976abSLihong Yang /* error if the new filter addition failed */ 5416757976abSLihong Yang err = -EADDRNOTAVAIL; 5417757976abSLihong Yang 5418bbb968e8SAkeem G Abodunrin err_update_filters: 5419e94d4478SAnirudh Venkataramanan if (err) { 54202f2da36eSAnirudh Venkataramanan netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5421e94d4478SAnirudh Venkataramanan mac); 5422b357d971SBrett Creeley netif_addr_lock_bh(netdev); 5423f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, old_mac); 54243ba7f53fSBrett Creeley netif_addr_unlock_bh(netdev); 5425e94d4478SAnirudh Venkataramanan return err; 5426e94d4478SAnirudh Venkataramanan } 5427e94d4478SAnirudh Venkataramanan 54282f2da36eSAnirudh Venkataramanan netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5429e94d4478SAnirudh Venkataramanan netdev->dev_addr); 5430e94d4478SAnirudh Venkataramanan 5431f9867df6SAnirudh Venkataramanan /* write new MAC address to the firmware */ 5432e94d4478SAnirudh Venkataramanan flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 54332ccc1c1cSTony Nguyen err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 54342ccc1c1cSTony Nguyen if (err) { 54355f87ec48STony Nguyen netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 54362ccc1c1cSTony Nguyen mac, err); 5437e94d4478SAnirudh Venkataramanan } 5438e94d4478SAnirudh Venkataramanan return 0; 5439e94d4478SAnirudh Venkataramanan } 5440e94d4478SAnirudh Venkataramanan 5441e94d4478SAnirudh Venkataramanan /** 5442e94d4478SAnirudh Venkataramanan * ice_set_rx_mode - NDO callback to set the netdev filters 5443e94d4478SAnirudh Venkataramanan * @netdev: network interface device structure 5444e94d4478SAnirudh Venkataramanan */ 5445e94d4478SAnirudh Venkataramanan static void ice_set_rx_mode(struct net_device *netdev) 5446e94d4478SAnirudh Venkataramanan { 5447e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 5448e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 5449e94d4478SAnirudh Venkataramanan 5450e94d4478SAnirudh Venkataramanan if (!vsi) 5451e94d4478SAnirudh Venkataramanan return; 5452e94d4478SAnirudh Venkataramanan 5453e94d4478SAnirudh Venkataramanan /* Set the flags to synchronize filters 5454e94d4478SAnirudh Venkataramanan * ndo_set_rx_mode may be triggered even without a change in netdev 5455e94d4478SAnirudh Venkataramanan * flags 5456e94d4478SAnirudh Venkataramanan */ 5457e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5458e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5459e94d4478SAnirudh Venkataramanan set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5460e94d4478SAnirudh Venkataramanan 5461e94d4478SAnirudh Venkataramanan /* schedule our worker thread which will take care of 5462e94d4478SAnirudh Venkataramanan * applying the new filter changes 5463e94d4478SAnirudh Venkataramanan */ 5464e94d4478SAnirudh Venkataramanan ice_service_task_schedule(vsi->back); 5465e94d4478SAnirudh Venkataramanan } 5466e94d4478SAnirudh Venkataramanan 5467e94d4478SAnirudh Venkataramanan /** 54681ddef455SUsha Ketineni * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 54691ddef455SUsha Ketineni * @netdev: network interface device structure 54701ddef455SUsha Ketineni * @queue_index: Queue ID 54711ddef455SUsha Ketineni * @maxrate: maximum bandwidth in Mbps 54721ddef455SUsha Ketineni */ 54731ddef455SUsha Ketineni static int 54741ddef455SUsha Ketineni ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 54751ddef455SUsha Ketineni { 54761ddef455SUsha Ketineni struct ice_netdev_priv *np = netdev_priv(netdev); 54771ddef455SUsha Ketineni struct ice_vsi *vsi = np->vsi; 54781ddef455SUsha Ketineni u16 q_handle; 54795518ac2aSTony Nguyen int status; 54801ddef455SUsha Ketineni u8 tc; 54811ddef455SUsha Ketineni 54821ddef455SUsha Ketineni /* Validate maxrate requested is within permitted range */ 54831ddef455SUsha Ketineni if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 548419cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 54851ddef455SUsha Ketineni maxrate, queue_index); 54861ddef455SUsha Ketineni return -EINVAL; 54871ddef455SUsha Ketineni } 54881ddef455SUsha Ketineni 54891ddef455SUsha Ketineni q_handle = vsi->tx_rings[queue_index]->q_handle; 54901ddef455SUsha Ketineni tc = ice_dcb_get_tc(vsi, queue_index); 54911ddef455SUsha Ketineni 54921ddef455SUsha Ketineni /* Set BW back to default, when user set maxrate to 0 */ 54931ddef455SUsha Ketineni if (!maxrate) 54941ddef455SUsha Ketineni status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 54951ddef455SUsha Ketineni q_handle, ICE_MAX_BW); 54961ddef455SUsha Ketineni else 54971ddef455SUsha Ketineni status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 54981ddef455SUsha Ketineni q_handle, ICE_MAX_BW, maxrate * 1000); 5499c1484691STony Nguyen if (status) 55005f87ec48STony Nguyen netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 55015f87ec48STony Nguyen status); 55021ddef455SUsha Ketineni 5503c1484691STony Nguyen return status; 55041ddef455SUsha Ketineni } 55051ddef455SUsha Ketineni 55061ddef455SUsha Ketineni /** 5507e94d4478SAnirudh Venkataramanan * ice_fdb_add - add an entry to the hardware database 5508e94d4478SAnirudh Venkataramanan * @ndm: the input from the stack 5509e94d4478SAnirudh Venkataramanan * @tb: pointer to array of nladdr (unused) 5510e94d4478SAnirudh Venkataramanan * @dev: the net device pointer 5511e94d4478SAnirudh Venkataramanan * @addr: the MAC address entry being added 5512f9867df6SAnirudh Venkataramanan * @vid: VLAN ID 5513e94d4478SAnirudh Venkataramanan * @flags: instructions from stack about fdb operation 551499be37edSBruce Allan * @extack: netlink extended ack 5515e94d4478SAnirudh Venkataramanan */ 551699be37edSBruce Allan static int 551799be37edSBruce Allan ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 551899be37edSBruce Allan struct net_device *dev, const unsigned char *addr, u16 vid, 551999be37edSBruce Allan u16 flags, struct netlink_ext_ack __always_unused *extack) 5520e94d4478SAnirudh Venkataramanan { 5521e94d4478SAnirudh Venkataramanan int err; 5522e94d4478SAnirudh Venkataramanan 5523e94d4478SAnirudh Venkataramanan if (vid) { 5524e94d4478SAnirudh Venkataramanan netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5525e94d4478SAnirudh Venkataramanan return -EINVAL; 5526e94d4478SAnirudh Venkataramanan } 5527e94d4478SAnirudh Venkataramanan if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5528e94d4478SAnirudh Venkataramanan netdev_err(dev, "FDB only supports static addresses\n"); 5529e94d4478SAnirudh Venkataramanan return -EINVAL; 5530e94d4478SAnirudh Venkataramanan } 5531e94d4478SAnirudh Venkataramanan 5532e94d4478SAnirudh Venkataramanan if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5533e94d4478SAnirudh Venkataramanan err = dev_uc_add_excl(dev, addr); 5534e94d4478SAnirudh Venkataramanan else if (is_multicast_ether_addr(addr)) 5535e94d4478SAnirudh Venkataramanan err = dev_mc_add_excl(dev, addr); 5536e94d4478SAnirudh Venkataramanan else 5537e94d4478SAnirudh Venkataramanan err = -EINVAL; 5538e94d4478SAnirudh Venkataramanan 5539e94d4478SAnirudh Venkataramanan /* Only return duplicate errors if NLM_F_EXCL is set */ 5540e94d4478SAnirudh Venkataramanan if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5541e94d4478SAnirudh Venkataramanan err = 0; 5542e94d4478SAnirudh Venkataramanan 5543e94d4478SAnirudh Venkataramanan return err; 5544e94d4478SAnirudh Venkataramanan } 5545e94d4478SAnirudh Venkataramanan 5546e94d4478SAnirudh Venkataramanan /** 5547e94d4478SAnirudh Venkataramanan * ice_fdb_del - delete an entry from the hardware database 5548e94d4478SAnirudh Venkataramanan * @ndm: the input from the stack 5549e94d4478SAnirudh Venkataramanan * @tb: pointer to array of nladdr (unused) 5550e94d4478SAnirudh Venkataramanan * @dev: the net device pointer 5551e94d4478SAnirudh Venkataramanan * @addr: the MAC address entry being added 5552f9867df6SAnirudh Venkataramanan * @vid: VLAN ID 5553e94d4478SAnirudh Venkataramanan */ 5554c8b7abddSBruce Allan static int 5555c8b7abddSBruce Allan ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5556e94d4478SAnirudh Venkataramanan struct net_device *dev, const unsigned char *addr, 5557e94d4478SAnirudh Venkataramanan __always_unused u16 vid) 5558e94d4478SAnirudh Venkataramanan { 5559e94d4478SAnirudh Venkataramanan int err; 5560e94d4478SAnirudh Venkataramanan 5561e94d4478SAnirudh Venkataramanan if (ndm->ndm_state & NUD_PERMANENT) { 5562e94d4478SAnirudh Venkataramanan netdev_err(dev, "FDB only supports static addresses\n"); 5563e94d4478SAnirudh Venkataramanan return -EINVAL; 5564e94d4478SAnirudh Venkataramanan } 5565e94d4478SAnirudh Venkataramanan 5566e94d4478SAnirudh Venkataramanan if (is_unicast_ether_addr(addr)) 5567e94d4478SAnirudh Venkataramanan err = dev_uc_del(dev, addr); 5568e94d4478SAnirudh Venkataramanan else if (is_multicast_ether_addr(addr)) 5569e94d4478SAnirudh Venkataramanan err = dev_mc_del(dev, addr); 5570e94d4478SAnirudh Venkataramanan else 5571e94d4478SAnirudh Venkataramanan err = -EINVAL; 5572e94d4478SAnirudh Venkataramanan 5573e94d4478SAnirudh Venkataramanan return err; 5574e94d4478SAnirudh Venkataramanan } 5575e94d4478SAnirudh Venkataramanan 5576e94d4478SAnirudh Venkataramanan /** 5577d76a60baSAnirudh Venkataramanan * ice_set_features - set the netdev feature flags 5578d76a60baSAnirudh Venkataramanan * @netdev: ptr to the netdev being adjusted 5579d76a60baSAnirudh Venkataramanan * @features: the feature set that the stack is suggesting 5580d76a60baSAnirudh Venkataramanan */ 5581c8b7abddSBruce Allan static int 5582c8b7abddSBruce Allan ice_set_features(struct net_device *netdev, netdev_features_t features) 5583d76a60baSAnirudh Venkataramanan { 5584d76a60baSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 5585d76a60baSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 55865f8cc355SHenry Tieman struct ice_pf *pf = vsi->back; 5587d76a60baSAnirudh Venkataramanan int ret = 0; 5588d76a60baSAnirudh Venkataramanan 5589462acf6aSTony Nguyen /* Don't set any netdev advanced features with device in Safe Mode */ 5590462acf6aSTony Nguyen if (ice_is_safe_mode(vsi->back)) { 559119cce2c6SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); 5592462acf6aSTony Nguyen return ret; 5593462acf6aSTony Nguyen } 5594462acf6aSTony Nguyen 55955f8cc355SHenry Tieman /* Do not change setting during reset */ 55965f8cc355SHenry Tieman if (ice_is_reset_in_progress(pf->state)) { 559719cce2c6SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 55985f8cc355SHenry Tieman return -EBUSY; 55995f8cc355SHenry Tieman } 56005f8cc355SHenry Tieman 56018f529ff9STony Nguyen /* Multiple features can be changed in one call so keep features in 56028f529ff9STony Nguyen * separate if/else statements to guarantee each feature is checked 56038f529ff9STony Nguyen */ 5604492af0abSMd Fahad Iqbal Polash if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 56054fe36226SPaul M Stillwell Jr ice_vsi_manage_rss_lut(vsi, true); 5606492af0abSMd Fahad Iqbal Polash else if (!(features & NETIF_F_RXHASH) && 5607492af0abSMd Fahad Iqbal Polash netdev->features & NETIF_F_RXHASH) 56084fe36226SPaul M Stillwell Jr ice_vsi_manage_rss_lut(vsi, false); 5609492af0abSMd Fahad Iqbal Polash 5610d76a60baSAnirudh Venkataramanan if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 5611d76a60baSAnirudh Venkataramanan !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5612d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_stripping(vsi, true); 5613d76a60baSAnirudh Venkataramanan else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 5614d76a60baSAnirudh Venkataramanan (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5615d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_stripping(vsi, false); 56168f529ff9STony Nguyen 56178f529ff9STony Nguyen if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 5618d76a60baSAnirudh Venkataramanan !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5619d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_insertion(vsi); 5620d76a60baSAnirudh Venkataramanan else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 5621d76a60baSAnirudh Venkataramanan (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5622d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_insertion(vsi); 5623d76a60baSAnirudh Venkataramanan 56243171948eSTony Nguyen if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 56253171948eSTony Nguyen !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 562629e71f41SBrett Creeley ret = ice_cfg_vlan_pruning(vsi, true); 56273171948eSTony Nguyen else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 56283171948eSTony Nguyen (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 562929e71f41SBrett Creeley ret = ice_cfg_vlan_pruning(vsi, false); 56303171948eSTony Nguyen 5631148beb61SHenry Tieman if ((features & NETIF_F_NTUPLE) && 563228bf2672SBrett Creeley !(netdev->features & NETIF_F_NTUPLE)) { 5633148beb61SHenry Tieman ice_vsi_manage_fdir(vsi, true); 563428bf2672SBrett Creeley ice_init_arfs(vsi); 563528bf2672SBrett Creeley } else if (!(features & NETIF_F_NTUPLE) && 563628bf2672SBrett Creeley (netdev->features & NETIF_F_NTUPLE)) { 5637148beb61SHenry Tieman ice_vsi_manage_fdir(vsi, false); 563828bf2672SBrett Creeley ice_clear_arfs(vsi); 563928bf2672SBrett Creeley } 5640148beb61SHenry Tieman 5641fbc7b27aSKiran Patil /* don't turn off hw_tc_offload when ADQ is already enabled */ 5642fbc7b27aSKiran Patil if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 5643fbc7b27aSKiran Patil dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 5644fbc7b27aSKiran Patil return -EACCES; 5645fbc7b27aSKiran Patil } 56469fea7498SKiran Patil 56479fea7498SKiran Patil if ((features & NETIF_F_HW_TC) && 56489fea7498SKiran Patil !(netdev->features & NETIF_F_HW_TC)) 56499fea7498SKiran Patil set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 56509fea7498SKiran Patil else 56519fea7498SKiran Patil clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 56529fea7498SKiran Patil 5653d76a60baSAnirudh Venkataramanan return ret; 5654d76a60baSAnirudh Venkataramanan } 5655d76a60baSAnirudh Venkataramanan 5656d76a60baSAnirudh Venkataramanan /** 5657f9867df6SAnirudh Venkataramanan * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI 5658f9867df6SAnirudh Venkataramanan * @vsi: VSI to setup VLAN properties for 5659d76a60baSAnirudh Venkataramanan */ 5660d76a60baSAnirudh Venkataramanan static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 5661d76a60baSAnirudh Venkataramanan { 5662d76a60baSAnirudh Venkataramanan int ret = 0; 5663d76a60baSAnirudh Venkataramanan 5664d76a60baSAnirudh Venkataramanan if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 5665d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_stripping(vsi, true); 5666d76a60baSAnirudh Venkataramanan if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 5667d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_insertion(vsi); 5668d76a60baSAnirudh Venkataramanan 5669d76a60baSAnirudh Venkataramanan return ret; 5670d76a60baSAnirudh Venkataramanan } 5671d76a60baSAnirudh Venkataramanan 5672d76a60baSAnirudh Venkataramanan /** 5673cdedef59SAnirudh Venkataramanan * ice_vsi_cfg - Setup the VSI 5674cdedef59SAnirudh Venkataramanan * @vsi: the VSI being configured 5675cdedef59SAnirudh Venkataramanan * 5676cdedef59SAnirudh Venkataramanan * Return 0 on success and negative value on error 5677cdedef59SAnirudh Venkataramanan */ 56780e674aebSAnirudh Venkataramanan int ice_vsi_cfg(struct ice_vsi *vsi) 5679cdedef59SAnirudh Venkataramanan { 5680cdedef59SAnirudh Venkataramanan int err; 5681cdedef59SAnirudh Venkataramanan 5682c7f2c42bSAnirudh Venkataramanan if (vsi->netdev) { 5683e94d4478SAnirudh Venkataramanan ice_set_rx_mode(vsi->netdev); 56849ecd25c2SAnirudh Venkataramanan 56859ecd25c2SAnirudh Venkataramanan err = ice_vsi_vlan_setup(vsi); 56869ecd25c2SAnirudh Venkataramanan 5687d76a60baSAnirudh Venkataramanan if (err) 5688d76a60baSAnirudh Venkataramanan return err; 5689c7f2c42bSAnirudh Venkataramanan } 5690a629cf0aSAnirudh Venkataramanan ice_vsi_cfg_dcb_rings(vsi); 569103f7a986SAnirudh Venkataramanan 569203f7a986SAnirudh Venkataramanan err = ice_vsi_cfg_lan_txqs(vsi); 5693efc2214bSMaciej Fijalkowski if (!err && ice_is_xdp_ena_vsi(vsi)) 5694efc2214bSMaciej Fijalkowski err = ice_vsi_cfg_xdp_txqs(vsi); 5695cdedef59SAnirudh Venkataramanan if (!err) 5696cdedef59SAnirudh Venkataramanan err = ice_vsi_cfg_rxqs(vsi); 5697cdedef59SAnirudh Venkataramanan 5698cdedef59SAnirudh Venkataramanan return err; 5699cdedef59SAnirudh Venkataramanan } 5700cdedef59SAnirudh Venkataramanan 5701cdf1f1f1SJacob Keller /* THEORY OF MODERATION: 5702d8eb7ad5SJesse Brandeburg * The ice driver hardware works differently than the hardware that DIMLIB was 5703cdf1f1f1SJacob Keller * originally made for. ice hardware doesn't have packet count limits that 5704cdf1f1f1SJacob Keller * can trigger an interrupt, but it *does* have interrupt rate limit support, 5705d8eb7ad5SJesse Brandeburg * which is hard-coded to a limit of 250,000 ints/second. 5706d8eb7ad5SJesse Brandeburg * If not using dynamic moderation, the INTRL value can be modified 5707d8eb7ad5SJesse Brandeburg * by ethtool rx-usecs-high. 5708cdf1f1f1SJacob Keller */ 5709cdf1f1f1SJacob Keller struct ice_dim { 5710cdf1f1f1SJacob Keller /* the throttle rate for interrupts, basically worst case delay before 5711cdf1f1f1SJacob Keller * an initial interrupt fires, value is stored in microseconds. 5712cdf1f1f1SJacob Keller */ 5713cdf1f1f1SJacob Keller u16 itr; 5714cdf1f1f1SJacob Keller }; 5715cdf1f1f1SJacob Keller 5716cdf1f1f1SJacob Keller /* Make a different profile for Rx that doesn't allow quite so aggressive 5717d8eb7ad5SJesse Brandeburg * moderation at the high end (it maxes out at 126us or about 8k interrupts a 5718d8eb7ad5SJesse Brandeburg * second. 5719cdf1f1f1SJacob Keller */ 5720cdf1f1f1SJacob Keller static const struct ice_dim rx_profile[] = { 5721d8eb7ad5SJesse Brandeburg {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 5722d8eb7ad5SJesse Brandeburg {8}, /* 125,000 ints/s */ 5723d8eb7ad5SJesse Brandeburg {16}, /* 62,500 ints/s */ 5724d8eb7ad5SJesse Brandeburg {62}, /* 16,129 ints/s */ 5725d8eb7ad5SJesse Brandeburg {126} /* 7,936 ints/s */ 5726cdf1f1f1SJacob Keller }; 5727cdf1f1f1SJacob Keller 5728cdf1f1f1SJacob Keller /* The transmit profile, which has the same sorts of values 5729cdf1f1f1SJacob Keller * as the previous struct 5730cdf1f1f1SJacob Keller */ 5731cdf1f1f1SJacob Keller static const struct ice_dim tx_profile[] = { 5732d8eb7ad5SJesse Brandeburg {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 5733d8eb7ad5SJesse Brandeburg {8}, /* 125,000 ints/s */ 5734d8eb7ad5SJesse Brandeburg {40}, /* 16,125 ints/s */ 5735d8eb7ad5SJesse Brandeburg {128}, /* 7,812 ints/s */ 5736d8eb7ad5SJesse Brandeburg {256} /* 3,906 ints/s */ 5737cdf1f1f1SJacob Keller }; 5738cdf1f1f1SJacob Keller 5739cdf1f1f1SJacob Keller static void ice_tx_dim_work(struct work_struct *work) 5740cdf1f1f1SJacob Keller { 5741cdf1f1f1SJacob Keller struct ice_ring_container *rc; 5742cdf1f1f1SJacob Keller struct dim *dim; 5743d8eb7ad5SJesse Brandeburg u16 itr; 5744cdf1f1f1SJacob Keller 5745cdf1f1f1SJacob Keller dim = container_of(work, struct dim, work); 5746d8eb7ad5SJesse Brandeburg rc = (struct ice_ring_container *)dim->priv; 5747cdf1f1f1SJacob Keller 5748d8eb7ad5SJesse Brandeburg WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 5749cdf1f1f1SJacob Keller 5750cdf1f1f1SJacob Keller /* look up the values in our local table */ 5751cdf1f1f1SJacob Keller itr = tx_profile[dim->profile_ix].itr; 5752cdf1f1f1SJacob Keller 5753d8eb7ad5SJesse Brandeburg ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 5754cdf1f1f1SJacob Keller ice_write_itr(rc, itr); 5755cdf1f1f1SJacob Keller 5756cdf1f1f1SJacob Keller dim->state = DIM_START_MEASURE; 5757cdf1f1f1SJacob Keller } 5758cdf1f1f1SJacob Keller 5759cdf1f1f1SJacob Keller static void ice_rx_dim_work(struct work_struct *work) 5760cdf1f1f1SJacob Keller { 5761cdf1f1f1SJacob Keller struct ice_ring_container *rc; 5762cdf1f1f1SJacob Keller struct dim *dim; 5763d8eb7ad5SJesse Brandeburg u16 itr; 5764cdf1f1f1SJacob Keller 5765cdf1f1f1SJacob Keller dim = container_of(work, struct dim, work); 5766d8eb7ad5SJesse Brandeburg rc = (struct ice_ring_container *)dim->priv; 5767cdf1f1f1SJacob Keller 5768d8eb7ad5SJesse Brandeburg WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 5769cdf1f1f1SJacob Keller 5770cdf1f1f1SJacob Keller /* look up the values in our local table */ 5771cdf1f1f1SJacob Keller itr = rx_profile[dim->profile_ix].itr; 5772cdf1f1f1SJacob Keller 5773d8eb7ad5SJesse Brandeburg ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 5774cdf1f1f1SJacob Keller ice_write_itr(rc, itr); 5775cdf1f1f1SJacob Keller 5776cdf1f1f1SJacob Keller dim->state = DIM_START_MEASURE; 5777cdf1f1f1SJacob Keller } 5778cdf1f1f1SJacob Keller 5779d8eb7ad5SJesse Brandeburg #define ICE_DIM_DEFAULT_PROFILE_IX 1 5780d8eb7ad5SJesse Brandeburg 5781d8eb7ad5SJesse Brandeburg /** 5782d8eb7ad5SJesse Brandeburg * ice_init_moderation - set up interrupt moderation 5783d8eb7ad5SJesse Brandeburg * @q_vector: the vector containing rings to be configured 5784d8eb7ad5SJesse Brandeburg * 5785d8eb7ad5SJesse Brandeburg * Set up interrupt moderation registers, with the intent to do the right thing 5786d8eb7ad5SJesse Brandeburg * when called from reset or from probe, and whether or not dynamic moderation 5787d8eb7ad5SJesse Brandeburg * is enabled or not. Take special care to write all the registers in both 5788d8eb7ad5SJesse Brandeburg * dynamic moderation mode or not in order to make sure hardware is in a known 5789d8eb7ad5SJesse Brandeburg * state. 5790d8eb7ad5SJesse Brandeburg */ 5791d8eb7ad5SJesse Brandeburg static void ice_init_moderation(struct ice_q_vector *q_vector) 5792d8eb7ad5SJesse Brandeburg { 5793d8eb7ad5SJesse Brandeburg struct ice_ring_container *rc; 5794d8eb7ad5SJesse Brandeburg bool tx_dynamic, rx_dynamic; 5795d8eb7ad5SJesse Brandeburg 5796d8eb7ad5SJesse Brandeburg rc = &q_vector->tx; 5797d8eb7ad5SJesse Brandeburg INIT_WORK(&rc->dim.work, ice_tx_dim_work); 5798d8eb7ad5SJesse Brandeburg rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5799d8eb7ad5SJesse Brandeburg rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 5800d8eb7ad5SJesse Brandeburg rc->dim.priv = rc; 5801d8eb7ad5SJesse Brandeburg tx_dynamic = ITR_IS_DYNAMIC(rc); 5802d8eb7ad5SJesse Brandeburg 5803d8eb7ad5SJesse Brandeburg /* set the initial TX ITR to match the above */ 5804d8eb7ad5SJesse Brandeburg ice_write_itr(rc, tx_dynamic ? 5805d8eb7ad5SJesse Brandeburg tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 5806d8eb7ad5SJesse Brandeburg 5807d8eb7ad5SJesse Brandeburg rc = &q_vector->rx; 5808d8eb7ad5SJesse Brandeburg INIT_WORK(&rc->dim.work, ice_rx_dim_work); 5809d8eb7ad5SJesse Brandeburg rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5810d8eb7ad5SJesse Brandeburg rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 5811d8eb7ad5SJesse Brandeburg rc->dim.priv = rc; 5812d8eb7ad5SJesse Brandeburg rx_dynamic = ITR_IS_DYNAMIC(rc); 5813d8eb7ad5SJesse Brandeburg 5814d8eb7ad5SJesse Brandeburg /* set the initial RX ITR to match the above */ 5815d8eb7ad5SJesse Brandeburg ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 5816d8eb7ad5SJesse Brandeburg rc->itr_setting); 5817d8eb7ad5SJesse Brandeburg 5818d8eb7ad5SJesse Brandeburg ice_set_q_vector_intrl(q_vector); 5819d8eb7ad5SJesse Brandeburg } 5820d8eb7ad5SJesse Brandeburg 5821cdedef59SAnirudh Venkataramanan /** 58222b245cb2SAnirudh Venkataramanan * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 58232b245cb2SAnirudh Venkataramanan * @vsi: the VSI being configured 58242b245cb2SAnirudh Venkataramanan */ 58252b245cb2SAnirudh Venkataramanan static void ice_napi_enable_all(struct ice_vsi *vsi) 58262b245cb2SAnirudh Venkataramanan { 58272b245cb2SAnirudh Venkataramanan int q_idx; 58282b245cb2SAnirudh Venkataramanan 58292b245cb2SAnirudh Venkataramanan if (!vsi->netdev) 58302b245cb2SAnirudh Venkataramanan return; 58312b245cb2SAnirudh Venkataramanan 58320c2561c8SBrett Creeley ice_for_each_q_vector(vsi, q_idx) { 5833eec90376SYoung Xiao struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 5834eec90376SYoung Xiao 5835d8eb7ad5SJesse Brandeburg ice_init_moderation(q_vector); 5836cdf1f1f1SJacob Keller 5837e72bba21SMaciej Fijalkowski if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 5838eec90376SYoung Xiao napi_enable(&q_vector->napi); 5839eec90376SYoung Xiao } 58402b245cb2SAnirudh Venkataramanan } 58412b245cb2SAnirudh Venkataramanan 58422b245cb2SAnirudh Venkataramanan /** 5843cdedef59SAnirudh Venkataramanan * ice_up_complete - Finish the last steps of bringing up a connection 5844cdedef59SAnirudh Venkataramanan * @vsi: The VSI being configured 5845cdedef59SAnirudh Venkataramanan * 5846cdedef59SAnirudh Venkataramanan * Return 0 on success and negative value on error 5847cdedef59SAnirudh Venkataramanan */ 5848cdedef59SAnirudh Venkataramanan static int ice_up_complete(struct ice_vsi *vsi) 5849cdedef59SAnirudh Venkataramanan { 5850cdedef59SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 5851cdedef59SAnirudh Venkataramanan int err; 5852cdedef59SAnirudh Venkataramanan 5853cdedef59SAnirudh Venkataramanan ice_vsi_cfg_msix(vsi); 5854cdedef59SAnirudh Venkataramanan 5855cdedef59SAnirudh Venkataramanan /* Enable only Rx rings, Tx rings were enabled by the FW when the 5856cdedef59SAnirudh Venkataramanan * Tx queue group list was configured and the context bits were 5857cdedef59SAnirudh Venkataramanan * programmed using ice_vsi_cfg_txqs 5858cdedef59SAnirudh Venkataramanan */ 585913a6233bSBrett Creeley err = ice_vsi_start_all_rx_rings(vsi); 5860cdedef59SAnirudh Venkataramanan if (err) 5861cdedef59SAnirudh Venkataramanan return err; 5862cdedef59SAnirudh Venkataramanan 5863e97fb1aeSAnirudh Venkataramanan clear_bit(ICE_VSI_DOWN, vsi->state); 58642b245cb2SAnirudh Venkataramanan ice_napi_enable_all(vsi); 5865cdedef59SAnirudh Venkataramanan ice_vsi_ena_irq(vsi); 5866cdedef59SAnirudh Venkataramanan 5867cdedef59SAnirudh Venkataramanan if (vsi->port_info && 5868cdedef59SAnirudh Venkataramanan (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 5869cdedef59SAnirudh Venkataramanan vsi->netdev) { 5870cdedef59SAnirudh Venkataramanan ice_print_link_msg(vsi, true); 5871cdedef59SAnirudh Venkataramanan netif_tx_start_all_queues(vsi->netdev); 5872cdedef59SAnirudh Venkataramanan netif_carrier_on(vsi->netdev); 58733a749623SJacob Keller if (!ice_is_e810(&pf->hw)) 58743a749623SJacob Keller ice_ptp_link_change(pf, pf->hw.pf_id, true); 5875cdedef59SAnirudh Venkataramanan } 5876cdedef59SAnirudh Venkataramanan 587728dc1b86SJesse Brandeburg /* clear this now, and the first stats read will be used as baseline */ 587828dc1b86SJesse Brandeburg vsi->stat_offsets_loaded = false; 587928dc1b86SJesse Brandeburg 5880cdedef59SAnirudh Venkataramanan ice_service_task_schedule(pf); 5881cdedef59SAnirudh Venkataramanan 58821b5c19c7SBruce Allan return 0; 5883cdedef59SAnirudh Venkataramanan } 5884cdedef59SAnirudh Venkataramanan 5885cdedef59SAnirudh Venkataramanan /** 5886fcea6f3dSAnirudh Venkataramanan * ice_up - Bring the connection back up after being down 5887fcea6f3dSAnirudh Venkataramanan * @vsi: VSI being configured 5888fcea6f3dSAnirudh Venkataramanan */ 5889fcea6f3dSAnirudh Venkataramanan int ice_up(struct ice_vsi *vsi) 5890fcea6f3dSAnirudh Venkataramanan { 5891fcea6f3dSAnirudh Venkataramanan int err; 5892fcea6f3dSAnirudh Venkataramanan 5893fcea6f3dSAnirudh Venkataramanan err = ice_vsi_cfg(vsi); 5894fcea6f3dSAnirudh Venkataramanan if (!err) 5895fcea6f3dSAnirudh Venkataramanan err = ice_up_complete(vsi); 5896fcea6f3dSAnirudh Venkataramanan 5897fcea6f3dSAnirudh Venkataramanan return err; 5898fcea6f3dSAnirudh Venkataramanan } 5899fcea6f3dSAnirudh Venkataramanan 5900fcea6f3dSAnirudh Venkataramanan /** 5901fcea6f3dSAnirudh Venkataramanan * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 5902e72bba21SMaciej Fijalkowski * @syncp: pointer to u64_stats_sync 5903e72bba21SMaciej Fijalkowski * @stats: stats that pkts and bytes count will be taken from 5904fcea6f3dSAnirudh Venkataramanan * @pkts: packets stats counter 5905fcea6f3dSAnirudh Venkataramanan * @bytes: bytes stats counter 5906fcea6f3dSAnirudh Venkataramanan * 5907fcea6f3dSAnirudh Venkataramanan * This function fetches stats from the ring considering the atomic operations 5908fcea6f3dSAnirudh Venkataramanan * that needs to be performed to read u64 values in 32 bit machine. 5909fcea6f3dSAnirudh Venkataramanan */ 5910c8b7abddSBruce Allan static void 5911e72bba21SMaciej Fijalkowski ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats, 5912e72bba21SMaciej Fijalkowski u64 *pkts, u64 *bytes) 5913fcea6f3dSAnirudh Venkataramanan { 5914fcea6f3dSAnirudh Venkataramanan unsigned int start; 5915fcea6f3dSAnirudh Venkataramanan 5916fcea6f3dSAnirudh Venkataramanan do { 5917e72bba21SMaciej Fijalkowski start = u64_stats_fetch_begin_irq(syncp); 5918e72bba21SMaciej Fijalkowski *pkts = stats.pkts; 5919e72bba21SMaciej Fijalkowski *bytes = stats.bytes; 5920e72bba21SMaciej Fijalkowski } while (u64_stats_fetch_retry_irq(syncp, start)); 5921fcea6f3dSAnirudh Venkataramanan } 5922fcea6f3dSAnirudh Venkataramanan 5923fcea6f3dSAnirudh Venkataramanan /** 592449d358e0SMarta Plantykow * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 592549d358e0SMarta Plantykow * @vsi: the VSI to be updated 59261a0f25a5SJesse Brandeburg * @vsi_stats: the stats struct to be updated 592749d358e0SMarta Plantykow * @rings: rings to work on 592849d358e0SMarta Plantykow * @count: number of rings 592949d358e0SMarta Plantykow */ 593049d358e0SMarta Plantykow static void 59311a0f25a5SJesse Brandeburg ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 59321a0f25a5SJesse Brandeburg struct rtnl_link_stats64 *vsi_stats, 59331a0f25a5SJesse Brandeburg struct ice_tx_ring **rings, u16 count) 593449d358e0SMarta Plantykow { 593549d358e0SMarta Plantykow u16 i; 593649d358e0SMarta Plantykow 593749d358e0SMarta Plantykow for (i = 0; i < count; i++) { 5938e72bba21SMaciej Fijalkowski struct ice_tx_ring *ring; 5939e72bba21SMaciej Fijalkowski u64 pkts = 0, bytes = 0; 594049d358e0SMarta Plantykow 594149d358e0SMarta Plantykow ring = READ_ONCE(rings[i]); 5942e72bba21SMaciej Fijalkowski if (ring) 5943e72bba21SMaciej Fijalkowski ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); 594449d358e0SMarta Plantykow vsi_stats->tx_packets += pkts; 594549d358e0SMarta Plantykow vsi_stats->tx_bytes += bytes; 594649d358e0SMarta Plantykow vsi->tx_restart += ring->tx_stats.restart_q; 594749d358e0SMarta Plantykow vsi->tx_busy += ring->tx_stats.tx_busy; 594849d358e0SMarta Plantykow vsi->tx_linearize += ring->tx_stats.tx_linearize; 594949d358e0SMarta Plantykow } 595049d358e0SMarta Plantykow } 595149d358e0SMarta Plantykow 595249d358e0SMarta Plantykow /** 5953fcea6f3dSAnirudh Venkataramanan * ice_update_vsi_ring_stats - Update VSI stats counters 5954fcea6f3dSAnirudh Venkataramanan * @vsi: the VSI to be updated 5955fcea6f3dSAnirudh Venkataramanan */ 5956fcea6f3dSAnirudh Venkataramanan static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 5957fcea6f3dSAnirudh Venkataramanan { 59581a0f25a5SJesse Brandeburg struct rtnl_link_stats64 *vsi_stats; 5959fcea6f3dSAnirudh Venkataramanan u64 pkts, bytes; 5960fcea6f3dSAnirudh Venkataramanan int i; 5961fcea6f3dSAnirudh Venkataramanan 59621a0f25a5SJesse Brandeburg vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 59631a0f25a5SJesse Brandeburg if (!vsi_stats) 59641a0f25a5SJesse Brandeburg return; 5965fcea6f3dSAnirudh Venkataramanan 5966fcea6f3dSAnirudh Venkataramanan /* reset non-netdev (extended) stats */ 5967fcea6f3dSAnirudh Venkataramanan vsi->tx_restart = 0; 5968fcea6f3dSAnirudh Venkataramanan vsi->tx_busy = 0; 5969fcea6f3dSAnirudh Venkataramanan vsi->tx_linearize = 0; 5970fcea6f3dSAnirudh Venkataramanan vsi->rx_buf_failed = 0; 5971fcea6f3dSAnirudh Venkataramanan vsi->rx_page_failed = 0; 5972fcea6f3dSAnirudh Venkataramanan 5973fcea6f3dSAnirudh Venkataramanan rcu_read_lock(); 5974fcea6f3dSAnirudh Venkataramanan 5975fcea6f3dSAnirudh Venkataramanan /* update Tx rings counters */ 59761a0f25a5SJesse Brandeburg ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 59771a0f25a5SJesse Brandeburg vsi->num_txq); 5978fcea6f3dSAnirudh Venkataramanan 5979fcea6f3dSAnirudh Venkataramanan /* update Rx rings counters */ 5980fcea6f3dSAnirudh Venkataramanan ice_for_each_rxq(vsi, i) { 5981e72bba21SMaciej Fijalkowski struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 5982b6b0501dSPaul M Stillwell Jr 5983e72bba21SMaciej Fijalkowski ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); 5984fcea6f3dSAnirudh Venkataramanan vsi_stats->rx_packets += pkts; 5985fcea6f3dSAnirudh Venkataramanan vsi_stats->rx_bytes += bytes; 5986fcea6f3dSAnirudh Venkataramanan vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 5987fcea6f3dSAnirudh Venkataramanan vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 5988fcea6f3dSAnirudh Venkataramanan } 5989fcea6f3dSAnirudh Venkataramanan 599049d358e0SMarta Plantykow /* update XDP Tx rings counters */ 599149d358e0SMarta Plantykow if (ice_is_xdp_ena_vsi(vsi)) 59921a0f25a5SJesse Brandeburg ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 599349d358e0SMarta Plantykow vsi->num_xdp_txq); 599449d358e0SMarta Plantykow 5995fcea6f3dSAnirudh Venkataramanan rcu_read_unlock(); 59961a0f25a5SJesse Brandeburg 59971a0f25a5SJesse Brandeburg vsi->net_stats.tx_packets = vsi_stats->tx_packets; 59981a0f25a5SJesse Brandeburg vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; 59991a0f25a5SJesse Brandeburg vsi->net_stats.rx_packets = vsi_stats->rx_packets; 60001a0f25a5SJesse Brandeburg vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; 60011a0f25a5SJesse Brandeburg 60021a0f25a5SJesse Brandeburg kfree(vsi_stats); 6003fcea6f3dSAnirudh Venkataramanan } 6004fcea6f3dSAnirudh Venkataramanan 6005fcea6f3dSAnirudh Venkataramanan /** 6006fcea6f3dSAnirudh Venkataramanan * ice_update_vsi_stats - Update VSI stats counters 6007fcea6f3dSAnirudh Venkataramanan * @vsi: the VSI to be updated 6008fcea6f3dSAnirudh Venkataramanan */ 60095a4a8673SBruce Allan void ice_update_vsi_stats(struct ice_vsi *vsi) 6010fcea6f3dSAnirudh Venkataramanan { 6011fcea6f3dSAnirudh Venkataramanan struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6012fcea6f3dSAnirudh Venkataramanan struct ice_eth_stats *cur_es = &vsi->eth_stats; 6013fcea6f3dSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 6014fcea6f3dSAnirudh Venkataramanan 6015e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state) || 60167e408e07SAnirudh Venkataramanan test_bit(ICE_CFG_BUSY, pf->state)) 6017fcea6f3dSAnirudh Venkataramanan return; 6018fcea6f3dSAnirudh Venkataramanan 6019fcea6f3dSAnirudh Venkataramanan /* get stats as recorded by Tx/Rx rings */ 6020fcea6f3dSAnirudh Venkataramanan ice_update_vsi_ring_stats(vsi); 6021fcea6f3dSAnirudh Venkataramanan 6022fcea6f3dSAnirudh Venkataramanan /* get VSI stats as recorded by the hardware */ 6023fcea6f3dSAnirudh Venkataramanan ice_update_eth_stats(vsi); 6024fcea6f3dSAnirudh Venkataramanan 6025fcea6f3dSAnirudh Venkataramanan cur_ns->tx_errors = cur_es->tx_errors; 602651fe27e1SAnirudh Venkataramanan cur_ns->rx_dropped = cur_es->rx_discards; 6027fcea6f3dSAnirudh Venkataramanan cur_ns->tx_dropped = cur_es->tx_discards; 6028fcea6f3dSAnirudh Venkataramanan cur_ns->multicast = cur_es->rx_multicast; 6029fcea6f3dSAnirudh Venkataramanan 6030fcea6f3dSAnirudh Venkataramanan /* update some more netdev stats if this is main VSI */ 6031fcea6f3dSAnirudh Venkataramanan if (vsi->type == ICE_VSI_PF) { 6032fcea6f3dSAnirudh Venkataramanan cur_ns->rx_crc_errors = pf->stats.crc_errors; 6033fcea6f3dSAnirudh Venkataramanan cur_ns->rx_errors = pf->stats.crc_errors + 60344f1fe43cSBrett Creeley pf->stats.illegal_bytes + 60354f1fe43cSBrett Creeley pf->stats.rx_len_errors + 60364f1fe43cSBrett Creeley pf->stats.rx_undersize + 60374f1fe43cSBrett Creeley pf->hw_csum_rx_error + 60384f1fe43cSBrett Creeley pf->stats.rx_jabber + 60394f1fe43cSBrett Creeley pf->stats.rx_fragments + 60404f1fe43cSBrett Creeley pf->stats.rx_oversize; 6041fcea6f3dSAnirudh Venkataramanan cur_ns->rx_length_errors = pf->stats.rx_len_errors; 604256923ab6SBrett Creeley /* record drops from the port level */ 604356923ab6SBrett Creeley cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 6044fcea6f3dSAnirudh Venkataramanan } 6045fcea6f3dSAnirudh Venkataramanan } 6046fcea6f3dSAnirudh Venkataramanan 6047fcea6f3dSAnirudh Venkataramanan /** 6048fcea6f3dSAnirudh Venkataramanan * ice_update_pf_stats - Update PF port stats counters 6049fcea6f3dSAnirudh Venkataramanan * @pf: PF whose stats needs to be updated 6050fcea6f3dSAnirudh Venkataramanan */ 60515a4a8673SBruce Allan void ice_update_pf_stats(struct ice_pf *pf) 6052fcea6f3dSAnirudh Venkataramanan { 6053fcea6f3dSAnirudh Venkataramanan struct ice_hw_port_stats *prev_ps, *cur_ps; 6054fcea6f3dSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 60554ab95646SHenry Tieman u16 fd_ctr_base; 60569e7a5d17SUsha Ketineni u8 port; 6057fcea6f3dSAnirudh Venkataramanan 60589e7a5d17SUsha Ketineni port = hw->port_info->lport; 6059fcea6f3dSAnirudh Venkataramanan prev_ps = &pf->stats_prev; 6060fcea6f3dSAnirudh Venkataramanan cur_ps = &pf->stats; 6061fcea6f3dSAnirudh Venkataramanan 60629e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 606336517fd3SJacob Keller &prev_ps->eth.rx_bytes, 6064fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_bytes); 6065fcea6f3dSAnirudh Venkataramanan 60669e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 606736517fd3SJacob Keller &prev_ps->eth.rx_unicast, 6068fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_unicast); 6069fcea6f3dSAnirudh Venkataramanan 60709e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 607136517fd3SJacob Keller &prev_ps->eth.rx_multicast, 6072fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_multicast); 6073fcea6f3dSAnirudh Venkataramanan 60749e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 607536517fd3SJacob Keller &prev_ps->eth.rx_broadcast, 6076fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_broadcast); 6077fcea6f3dSAnirudh Venkataramanan 607856923ab6SBrett Creeley ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 607956923ab6SBrett Creeley &prev_ps->eth.rx_discards, 608056923ab6SBrett Creeley &cur_ps->eth.rx_discards); 608156923ab6SBrett Creeley 60829e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 608336517fd3SJacob Keller &prev_ps->eth.tx_bytes, 6084fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_bytes); 6085fcea6f3dSAnirudh Venkataramanan 60869e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 608736517fd3SJacob Keller &prev_ps->eth.tx_unicast, 6088fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_unicast); 6089fcea6f3dSAnirudh Venkataramanan 60909e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 609136517fd3SJacob Keller &prev_ps->eth.tx_multicast, 6092fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_multicast); 6093fcea6f3dSAnirudh Venkataramanan 60949e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 609536517fd3SJacob Keller &prev_ps->eth.tx_broadcast, 6096fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_broadcast); 6097fcea6f3dSAnirudh Venkataramanan 60989e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 6099fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_dropped_link_down, 6100fcea6f3dSAnirudh Venkataramanan &cur_ps->tx_dropped_link_down); 6101fcea6f3dSAnirudh Venkataramanan 61029e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 610336517fd3SJacob Keller &prev_ps->rx_size_64, &cur_ps->rx_size_64); 6104fcea6f3dSAnirudh Venkataramanan 61059e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 610636517fd3SJacob Keller &prev_ps->rx_size_127, &cur_ps->rx_size_127); 6107fcea6f3dSAnirudh Venkataramanan 61089e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 610936517fd3SJacob Keller &prev_ps->rx_size_255, &cur_ps->rx_size_255); 6110fcea6f3dSAnirudh Venkataramanan 61119e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 611236517fd3SJacob Keller &prev_ps->rx_size_511, &cur_ps->rx_size_511); 6113fcea6f3dSAnirudh Venkataramanan 61149e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 6115fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 6116fcea6f3dSAnirudh Venkataramanan 61179e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 6118fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 6119fcea6f3dSAnirudh Venkataramanan 61209e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 6121fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_size_big, &cur_ps->rx_size_big); 6122fcea6f3dSAnirudh Venkataramanan 61239e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 612436517fd3SJacob Keller &prev_ps->tx_size_64, &cur_ps->tx_size_64); 6125fcea6f3dSAnirudh Venkataramanan 61269e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 612736517fd3SJacob Keller &prev_ps->tx_size_127, &cur_ps->tx_size_127); 6128fcea6f3dSAnirudh Venkataramanan 61299e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 613036517fd3SJacob Keller &prev_ps->tx_size_255, &cur_ps->tx_size_255); 6131fcea6f3dSAnirudh Venkataramanan 61329e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 613336517fd3SJacob Keller &prev_ps->tx_size_511, &cur_ps->tx_size_511); 6134fcea6f3dSAnirudh Venkataramanan 61359e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 6136fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 6137fcea6f3dSAnirudh Venkataramanan 61389e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 6139fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 6140fcea6f3dSAnirudh Venkataramanan 61419e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 6142fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_size_big, &cur_ps->tx_size_big); 6143fcea6f3dSAnirudh Venkataramanan 61444ab95646SHenry Tieman fd_ctr_base = hw->fd_ctr_base; 61454ab95646SHenry Tieman 61464ab95646SHenry Tieman ice_stat_update40(hw, 61474ab95646SHenry Tieman GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 61484ab95646SHenry Tieman pf->stat_prev_loaded, &prev_ps->fd_sb_match, 61494ab95646SHenry Tieman &cur_ps->fd_sb_match); 61509e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 6151fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 6152fcea6f3dSAnirudh Venkataramanan 61539e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 6154fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 6155fcea6f3dSAnirudh Venkataramanan 61569e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 6157fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 6158fcea6f3dSAnirudh Venkataramanan 61599e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 6160fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 6161fcea6f3dSAnirudh Venkataramanan 61624b0fdcebSAnirudh Venkataramanan ice_update_dcb_stats(pf); 61634b0fdcebSAnirudh Venkataramanan 61649e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 6165fcea6f3dSAnirudh Venkataramanan &prev_ps->crc_errors, &cur_ps->crc_errors); 6166fcea6f3dSAnirudh Venkataramanan 61679e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 6168fcea6f3dSAnirudh Venkataramanan &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 6169fcea6f3dSAnirudh Venkataramanan 61709e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 6171fcea6f3dSAnirudh Venkataramanan &prev_ps->mac_local_faults, 6172fcea6f3dSAnirudh Venkataramanan &cur_ps->mac_local_faults); 6173fcea6f3dSAnirudh Venkataramanan 61749e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 6175fcea6f3dSAnirudh Venkataramanan &prev_ps->mac_remote_faults, 6176fcea6f3dSAnirudh Venkataramanan &cur_ps->mac_remote_faults); 6177fcea6f3dSAnirudh Venkataramanan 61789e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 6179fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 6180fcea6f3dSAnirudh Venkataramanan 61819e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 6182fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_undersize, &cur_ps->rx_undersize); 6183fcea6f3dSAnirudh Venkataramanan 61849e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 6185fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_fragments, &cur_ps->rx_fragments); 6186fcea6f3dSAnirudh Venkataramanan 61879e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 6188fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_oversize, &cur_ps->rx_oversize); 6189fcea6f3dSAnirudh Venkataramanan 61909e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 6191fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_jabber, &cur_ps->rx_jabber); 6192fcea6f3dSAnirudh Venkataramanan 61934ab95646SHenry Tieman cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 61944ab95646SHenry Tieman 6195fcea6f3dSAnirudh Venkataramanan pf->stat_prev_loaded = true; 6196fcea6f3dSAnirudh Venkataramanan } 6197fcea6f3dSAnirudh Venkataramanan 6198fcea6f3dSAnirudh Venkataramanan /** 6199fcea6f3dSAnirudh Venkataramanan * ice_get_stats64 - get statistics for network device structure 6200fcea6f3dSAnirudh Venkataramanan * @netdev: network interface device structure 6201fcea6f3dSAnirudh Venkataramanan * @stats: main device statistics structure 6202fcea6f3dSAnirudh Venkataramanan */ 6203fcea6f3dSAnirudh Venkataramanan static 6204fcea6f3dSAnirudh Venkataramanan void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 6205fcea6f3dSAnirudh Venkataramanan { 6206fcea6f3dSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 6207fcea6f3dSAnirudh Venkataramanan struct rtnl_link_stats64 *vsi_stats; 6208fcea6f3dSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 6209fcea6f3dSAnirudh Venkataramanan 6210fcea6f3dSAnirudh Venkataramanan vsi_stats = &vsi->net_stats; 6211fcea6f3dSAnirudh Venkataramanan 62123d57fd10SDave Ertman if (!vsi->num_txq || !vsi->num_rxq) 6213fcea6f3dSAnirudh Venkataramanan return; 62143d57fd10SDave Ertman 6215fcea6f3dSAnirudh Venkataramanan /* netdev packet/byte stats come from ring counter. These are obtained 6216fcea6f3dSAnirudh Venkataramanan * by summing up ring counters (done by ice_update_vsi_ring_stats). 62173d57fd10SDave Ertman * But, only call the update routine and read the registers if VSI is 62183d57fd10SDave Ertman * not down. 6219fcea6f3dSAnirudh Venkataramanan */ 6220e97fb1aeSAnirudh Venkataramanan if (!test_bit(ICE_VSI_DOWN, vsi->state)) 6221fcea6f3dSAnirudh Venkataramanan ice_update_vsi_ring_stats(vsi); 6222fcea6f3dSAnirudh Venkataramanan stats->tx_packets = vsi_stats->tx_packets; 6223fcea6f3dSAnirudh Venkataramanan stats->tx_bytes = vsi_stats->tx_bytes; 6224fcea6f3dSAnirudh Venkataramanan stats->rx_packets = vsi_stats->rx_packets; 6225fcea6f3dSAnirudh Venkataramanan stats->rx_bytes = vsi_stats->rx_bytes; 6226fcea6f3dSAnirudh Venkataramanan 6227fcea6f3dSAnirudh Venkataramanan /* The rest of the stats can be read from the hardware but instead we 6228fcea6f3dSAnirudh Venkataramanan * just return values that the watchdog task has already obtained from 6229fcea6f3dSAnirudh Venkataramanan * the hardware. 6230fcea6f3dSAnirudh Venkataramanan */ 6231fcea6f3dSAnirudh Venkataramanan stats->multicast = vsi_stats->multicast; 6232fcea6f3dSAnirudh Venkataramanan stats->tx_errors = vsi_stats->tx_errors; 6233fcea6f3dSAnirudh Venkataramanan stats->tx_dropped = vsi_stats->tx_dropped; 6234fcea6f3dSAnirudh Venkataramanan stats->rx_errors = vsi_stats->rx_errors; 6235fcea6f3dSAnirudh Venkataramanan stats->rx_dropped = vsi_stats->rx_dropped; 6236fcea6f3dSAnirudh Venkataramanan stats->rx_crc_errors = vsi_stats->rx_crc_errors; 6237fcea6f3dSAnirudh Venkataramanan stats->rx_length_errors = vsi_stats->rx_length_errors; 6238fcea6f3dSAnirudh Venkataramanan } 6239fcea6f3dSAnirudh Venkataramanan 6240fcea6f3dSAnirudh Venkataramanan /** 62412b245cb2SAnirudh Venkataramanan * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 62422b245cb2SAnirudh Venkataramanan * @vsi: VSI having NAPI disabled 62432b245cb2SAnirudh Venkataramanan */ 62442b245cb2SAnirudh Venkataramanan static void ice_napi_disable_all(struct ice_vsi *vsi) 62452b245cb2SAnirudh Venkataramanan { 62462b245cb2SAnirudh Venkataramanan int q_idx; 62472b245cb2SAnirudh Venkataramanan 62482b245cb2SAnirudh Venkataramanan if (!vsi->netdev) 62492b245cb2SAnirudh Venkataramanan return; 62502b245cb2SAnirudh Venkataramanan 62510c2561c8SBrett Creeley ice_for_each_q_vector(vsi, q_idx) { 6252eec90376SYoung Xiao struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6253eec90376SYoung Xiao 6254e72bba21SMaciej Fijalkowski if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6255eec90376SYoung Xiao napi_disable(&q_vector->napi); 6256cdf1f1f1SJacob Keller 6257cdf1f1f1SJacob Keller cancel_work_sync(&q_vector->tx.dim.work); 6258cdf1f1f1SJacob Keller cancel_work_sync(&q_vector->rx.dim.work); 6259eec90376SYoung Xiao } 62602b245cb2SAnirudh Venkataramanan } 62612b245cb2SAnirudh Venkataramanan 62622b245cb2SAnirudh Venkataramanan /** 6263cdedef59SAnirudh Venkataramanan * ice_down - Shutdown the connection 6264cdedef59SAnirudh Venkataramanan * @vsi: The VSI being stopped 626521c6e36bSJesse Brandeburg * 626621c6e36bSJesse Brandeburg * Caller of this function is expected to set the vsi->state ICE_DOWN bit 6267cdedef59SAnirudh Venkataramanan */ 6268fcea6f3dSAnirudh Venkataramanan int ice_down(struct ice_vsi *vsi) 6269cdedef59SAnirudh Venkataramanan { 6270ab4ab73fSBruce Allan int i, tx_err, rx_err, link_err = 0; 6271cdedef59SAnirudh Venkataramanan 627221c6e36bSJesse Brandeburg WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 627321c6e36bSJesse Brandeburg 6274b3be918dSGrzegorz Nitka if (vsi->netdev && vsi->type == ICE_VSI_PF) { 62753a749623SJacob Keller if (!ice_is_e810(&vsi->back->hw)) 62763a749623SJacob Keller ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); 6277cdedef59SAnirudh Venkataramanan netif_carrier_off(vsi->netdev); 6278cdedef59SAnirudh Venkataramanan netif_tx_disable(vsi->netdev); 6279b3be918dSGrzegorz Nitka } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 6280b3be918dSGrzegorz Nitka ice_eswitch_stop_all_tx_queues(vsi->back); 6281cdedef59SAnirudh Venkataramanan } 6282cdedef59SAnirudh Venkataramanan 6283cdedef59SAnirudh Venkataramanan ice_vsi_dis_irq(vsi); 628403f7a986SAnirudh Venkataramanan 628503f7a986SAnirudh Venkataramanan tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 628672adf242SAnirudh Venkataramanan if (tx_err) 628719cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 628872adf242SAnirudh Venkataramanan vsi->vsi_num, tx_err); 6289efc2214bSMaciej Fijalkowski if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 6290efc2214bSMaciej Fijalkowski tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 6291efc2214bSMaciej Fijalkowski if (tx_err) 629219cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 6293efc2214bSMaciej Fijalkowski vsi->vsi_num, tx_err); 6294efc2214bSMaciej Fijalkowski } 629572adf242SAnirudh Venkataramanan 629613a6233bSBrett Creeley rx_err = ice_vsi_stop_all_rx_rings(vsi); 629772adf242SAnirudh Venkataramanan if (rx_err) 629819cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 629972adf242SAnirudh Venkataramanan vsi->vsi_num, rx_err); 630072adf242SAnirudh Venkataramanan 63012b245cb2SAnirudh Venkataramanan ice_napi_disable_all(vsi); 6302cdedef59SAnirudh Venkataramanan 6303ab4ab73fSBruce Allan if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 6304b6f934f0SBrett Creeley link_err = ice_force_phys_link_state(vsi, false); 6305b6f934f0SBrett Creeley if (link_err) 630619cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 6307b6f934f0SBrett Creeley vsi->vsi_num, link_err); 6308ab4ab73fSBruce Allan } 6309b6f934f0SBrett Creeley 6310cdedef59SAnirudh Venkataramanan ice_for_each_txq(vsi, i) 6311cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(vsi->tx_rings[i]); 6312cdedef59SAnirudh Venkataramanan 6313cdedef59SAnirudh Venkataramanan ice_for_each_rxq(vsi, i) 6314cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(vsi->rx_rings[i]); 6315cdedef59SAnirudh Venkataramanan 6316b6f934f0SBrett Creeley if (tx_err || rx_err || link_err) { 631719cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 6318cdedef59SAnirudh Venkataramanan vsi->vsi_num, vsi->vsw->sw_id); 631972adf242SAnirudh Venkataramanan return -EIO; 632072adf242SAnirudh Venkataramanan } 632172adf242SAnirudh Venkataramanan 632272adf242SAnirudh Venkataramanan return 0; 6323cdedef59SAnirudh Venkataramanan } 6324cdedef59SAnirudh Venkataramanan 6325cdedef59SAnirudh Venkataramanan /** 6326cdedef59SAnirudh Venkataramanan * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 6327cdedef59SAnirudh Venkataramanan * @vsi: VSI having resources allocated 6328cdedef59SAnirudh Venkataramanan * 6329cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on failure 6330cdedef59SAnirudh Venkataramanan */ 63310e674aebSAnirudh Venkataramanan int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 6332cdedef59SAnirudh Venkataramanan { 6333dab0588fSJesse Brandeburg int i, err = 0; 6334cdedef59SAnirudh Venkataramanan 6335cdedef59SAnirudh Venkataramanan if (!vsi->num_txq) { 63369a946843SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 6337cdedef59SAnirudh Venkataramanan vsi->vsi_num); 6338cdedef59SAnirudh Venkataramanan return -EINVAL; 6339cdedef59SAnirudh Venkataramanan } 6340cdedef59SAnirudh Venkataramanan 6341cdedef59SAnirudh Venkataramanan ice_for_each_txq(vsi, i) { 6342e72bba21SMaciej Fijalkowski struct ice_tx_ring *ring = vsi->tx_rings[i]; 6343eb0ee8abSMichal Swiatkowski 6344eb0ee8abSMichal Swiatkowski if (!ring) 6345eb0ee8abSMichal Swiatkowski return -EINVAL; 6346eb0ee8abSMichal Swiatkowski 63471c54c839SGrzegorz Nitka if (vsi->netdev) 6348eb0ee8abSMichal Swiatkowski ring->netdev = vsi->netdev; 6349eb0ee8abSMichal Swiatkowski err = ice_setup_tx_ring(ring); 6350cdedef59SAnirudh Venkataramanan if (err) 6351cdedef59SAnirudh Venkataramanan break; 6352cdedef59SAnirudh Venkataramanan } 6353cdedef59SAnirudh Venkataramanan 6354cdedef59SAnirudh Venkataramanan return err; 6355cdedef59SAnirudh Venkataramanan } 6356cdedef59SAnirudh Venkataramanan 6357cdedef59SAnirudh Venkataramanan /** 6358cdedef59SAnirudh Venkataramanan * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 6359cdedef59SAnirudh Venkataramanan * @vsi: VSI having resources allocated 6360cdedef59SAnirudh Venkataramanan * 6361cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on failure 6362cdedef59SAnirudh Venkataramanan */ 63630e674aebSAnirudh Venkataramanan int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 6364cdedef59SAnirudh Venkataramanan { 6365dab0588fSJesse Brandeburg int i, err = 0; 6366cdedef59SAnirudh Venkataramanan 6367cdedef59SAnirudh Venkataramanan if (!vsi->num_rxq) { 63689a946843SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 6369cdedef59SAnirudh Venkataramanan vsi->vsi_num); 6370cdedef59SAnirudh Venkataramanan return -EINVAL; 6371cdedef59SAnirudh Venkataramanan } 6372cdedef59SAnirudh Venkataramanan 6373cdedef59SAnirudh Venkataramanan ice_for_each_rxq(vsi, i) { 6374e72bba21SMaciej Fijalkowski struct ice_rx_ring *ring = vsi->rx_rings[i]; 6375eb0ee8abSMichal Swiatkowski 6376eb0ee8abSMichal Swiatkowski if (!ring) 6377eb0ee8abSMichal Swiatkowski return -EINVAL; 6378eb0ee8abSMichal Swiatkowski 63791c54c839SGrzegorz Nitka if (vsi->netdev) 6380eb0ee8abSMichal Swiatkowski ring->netdev = vsi->netdev; 6381eb0ee8abSMichal Swiatkowski err = ice_setup_rx_ring(ring); 6382cdedef59SAnirudh Venkataramanan if (err) 6383cdedef59SAnirudh Venkataramanan break; 6384cdedef59SAnirudh Venkataramanan } 6385cdedef59SAnirudh Venkataramanan 6386cdedef59SAnirudh Venkataramanan return err; 6387cdedef59SAnirudh Venkataramanan } 6388cdedef59SAnirudh Venkataramanan 6389cdedef59SAnirudh Venkataramanan /** 6390148beb61SHenry Tieman * ice_vsi_open_ctrl - open control VSI for use 6391148beb61SHenry Tieman * @vsi: the VSI to open 6392148beb61SHenry Tieman * 6393148beb61SHenry Tieman * Initialization of the Control VSI 6394148beb61SHenry Tieman * 6395148beb61SHenry Tieman * Returns 0 on success, negative value on error 6396148beb61SHenry Tieman */ 6397148beb61SHenry Tieman int ice_vsi_open_ctrl(struct ice_vsi *vsi) 6398148beb61SHenry Tieman { 6399148beb61SHenry Tieman char int_name[ICE_INT_NAME_STR_LEN]; 6400148beb61SHenry Tieman struct ice_pf *pf = vsi->back; 6401148beb61SHenry Tieman struct device *dev; 6402148beb61SHenry Tieman int err; 6403148beb61SHenry Tieman 6404148beb61SHenry Tieman dev = ice_pf_to_dev(pf); 6405148beb61SHenry Tieman /* allocate descriptors */ 6406148beb61SHenry Tieman err = ice_vsi_setup_tx_rings(vsi); 6407148beb61SHenry Tieman if (err) 6408148beb61SHenry Tieman goto err_setup_tx; 6409148beb61SHenry Tieman 6410148beb61SHenry Tieman err = ice_vsi_setup_rx_rings(vsi); 6411148beb61SHenry Tieman if (err) 6412148beb61SHenry Tieman goto err_setup_rx; 6413148beb61SHenry Tieman 6414148beb61SHenry Tieman err = ice_vsi_cfg(vsi); 6415148beb61SHenry Tieman if (err) 6416148beb61SHenry Tieman goto err_setup_rx; 6417148beb61SHenry Tieman 6418148beb61SHenry Tieman snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 6419148beb61SHenry Tieman dev_driver_string(dev), dev_name(dev)); 6420148beb61SHenry Tieman err = ice_vsi_req_irq_msix(vsi, int_name); 6421148beb61SHenry Tieman if (err) 6422148beb61SHenry Tieman goto err_setup_rx; 6423148beb61SHenry Tieman 6424148beb61SHenry Tieman ice_vsi_cfg_msix(vsi); 6425148beb61SHenry Tieman 6426148beb61SHenry Tieman err = ice_vsi_start_all_rx_rings(vsi); 6427148beb61SHenry Tieman if (err) 6428148beb61SHenry Tieman goto err_up_complete; 6429148beb61SHenry Tieman 6430e97fb1aeSAnirudh Venkataramanan clear_bit(ICE_VSI_DOWN, vsi->state); 6431148beb61SHenry Tieman ice_vsi_ena_irq(vsi); 6432148beb61SHenry Tieman 6433148beb61SHenry Tieman return 0; 6434148beb61SHenry Tieman 6435148beb61SHenry Tieman err_up_complete: 6436148beb61SHenry Tieman ice_down(vsi); 6437148beb61SHenry Tieman err_setup_rx: 6438148beb61SHenry Tieman ice_vsi_free_rx_rings(vsi); 6439148beb61SHenry Tieman err_setup_tx: 6440148beb61SHenry Tieman ice_vsi_free_tx_rings(vsi); 6441148beb61SHenry Tieman 6442148beb61SHenry Tieman return err; 6443148beb61SHenry Tieman } 6444148beb61SHenry Tieman 6445148beb61SHenry Tieman /** 6446cdedef59SAnirudh Venkataramanan * ice_vsi_open - Called when a network interface is made active 6447cdedef59SAnirudh Venkataramanan * @vsi: the VSI to open 6448cdedef59SAnirudh Venkataramanan * 6449cdedef59SAnirudh Venkataramanan * Initialization of the VSI 6450cdedef59SAnirudh Venkataramanan * 6451cdedef59SAnirudh Venkataramanan * Returns 0 on success, negative value on error 6452cdedef59SAnirudh Venkataramanan */ 64531a1c40dfSGrzegorz Nitka int ice_vsi_open(struct ice_vsi *vsi) 6454cdedef59SAnirudh Venkataramanan { 6455cdedef59SAnirudh Venkataramanan char int_name[ICE_INT_NAME_STR_LEN]; 6456cdedef59SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 6457cdedef59SAnirudh Venkataramanan int err; 6458cdedef59SAnirudh Venkataramanan 6459cdedef59SAnirudh Venkataramanan /* allocate descriptors */ 6460cdedef59SAnirudh Venkataramanan err = ice_vsi_setup_tx_rings(vsi); 6461cdedef59SAnirudh Venkataramanan if (err) 6462cdedef59SAnirudh Venkataramanan goto err_setup_tx; 6463cdedef59SAnirudh Venkataramanan 6464cdedef59SAnirudh Venkataramanan err = ice_vsi_setup_rx_rings(vsi); 6465cdedef59SAnirudh Venkataramanan if (err) 6466cdedef59SAnirudh Venkataramanan goto err_setup_rx; 6467cdedef59SAnirudh Venkataramanan 6468cdedef59SAnirudh Venkataramanan err = ice_vsi_cfg(vsi); 6469cdedef59SAnirudh Venkataramanan if (err) 6470cdedef59SAnirudh Venkataramanan goto err_setup_rx; 6471cdedef59SAnirudh Venkataramanan 6472cdedef59SAnirudh Venkataramanan snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 64734015d11eSBrett Creeley dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 6474ba880734SBrett Creeley err = ice_vsi_req_irq_msix(vsi, int_name); 6475cdedef59SAnirudh Venkataramanan if (err) 6476cdedef59SAnirudh Venkataramanan goto err_setup_rx; 6477cdedef59SAnirudh Venkataramanan 64781a1c40dfSGrzegorz Nitka if (vsi->type == ICE_VSI_PF) { 6479cdedef59SAnirudh Venkataramanan /* Notify the stack of the actual queue counts. */ 6480cdedef59SAnirudh Venkataramanan err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 6481cdedef59SAnirudh Venkataramanan if (err) 6482cdedef59SAnirudh Venkataramanan goto err_set_qs; 6483cdedef59SAnirudh Venkataramanan 6484cdedef59SAnirudh Venkataramanan err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 6485cdedef59SAnirudh Venkataramanan if (err) 6486cdedef59SAnirudh Venkataramanan goto err_set_qs; 64871a1c40dfSGrzegorz Nitka } 6488cdedef59SAnirudh Venkataramanan 6489cdedef59SAnirudh Venkataramanan err = ice_up_complete(vsi); 6490cdedef59SAnirudh Venkataramanan if (err) 6491cdedef59SAnirudh Venkataramanan goto err_up_complete; 6492cdedef59SAnirudh Venkataramanan 6493cdedef59SAnirudh Venkataramanan return 0; 6494cdedef59SAnirudh Venkataramanan 6495cdedef59SAnirudh Venkataramanan err_up_complete: 6496cdedef59SAnirudh Venkataramanan ice_down(vsi); 6497cdedef59SAnirudh Venkataramanan err_set_qs: 6498cdedef59SAnirudh Venkataramanan ice_vsi_free_irq(vsi); 6499cdedef59SAnirudh Venkataramanan err_setup_rx: 6500cdedef59SAnirudh Venkataramanan ice_vsi_free_rx_rings(vsi); 6501cdedef59SAnirudh Venkataramanan err_setup_tx: 6502cdedef59SAnirudh Venkataramanan ice_vsi_free_tx_rings(vsi); 6503cdedef59SAnirudh Venkataramanan 6504cdedef59SAnirudh Venkataramanan return err; 6505cdedef59SAnirudh Venkataramanan } 6506cdedef59SAnirudh Venkataramanan 6507cdedef59SAnirudh Venkataramanan /** 65080f9d5027SAnirudh Venkataramanan * ice_vsi_release_all - Delete all VSIs 65090f9d5027SAnirudh Venkataramanan * @pf: PF from which all VSIs are being removed 65100f9d5027SAnirudh Venkataramanan */ 65110f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf) 65120f9d5027SAnirudh Venkataramanan { 65130f9d5027SAnirudh Venkataramanan int err, i; 65140f9d5027SAnirudh Venkataramanan 65150f9d5027SAnirudh Venkataramanan if (!pf->vsi) 65160f9d5027SAnirudh Venkataramanan return; 65170f9d5027SAnirudh Venkataramanan 651880ed404aSBrett Creeley ice_for_each_vsi(pf, i) { 65190f9d5027SAnirudh Venkataramanan if (!pf->vsi[i]) 65200f9d5027SAnirudh Venkataramanan continue; 65210f9d5027SAnirudh Venkataramanan 6522fbc7b27aSKiran Patil if (pf->vsi[i]->type == ICE_VSI_CHNL) 6523fbc7b27aSKiran Patil continue; 6524fbc7b27aSKiran Patil 65250f9d5027SAnirudh Venkataramanan err = ice_vsi_release(pf->vsi[i]); 65260f9d5027SAnirudh Venkataramanan if (err) 652719cce2c6SAnirudh Venkataramanan dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 65280f9d5027SAnirudh Venkataramanan i, err, pf->vsi[i]->vsi_num); 65290f9d5027SAnirudh Venkataramanan } 65300f9d5027SAnirudh Venkataramanan } 65310f9d5027SAnirudh Venkataramanan 65320f9d5027SAnirudh Venkataramanan /** 6533462acf6aSTony Nguyen * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 6534462acf6aSTony Nguyen * @pf: pointer to the PF instance 6535462acf6aSTony Nguyen * @type: VSI type to rebuild 6536462acf6aSTony Nguyen * 6537462acf6aSTony Nguyen * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 65380f9d5027SAnirudh Venkataramanan */ 6539462acf6aSTony Nguyen static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 65400f9d5027SAnirudh Venkataramanan { 65414015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 6542462acf6aSTony Nguyen int i, err; 65430f9d5027SAnirudh Venkataramanan 654480ed404aSBrett Creeley ice_for_each_vsi(pf, i) { 65454425e053SKrzysztof Kazimierczak struct ice_vsi *vsi = pf->vsi[i]; 65460f9d5027SAnirudh Venkataramanan 6547462acf6aSTony Nguyen if (!vsi || vsi->type != type) 65480f9d5027SAnirudh Venkataramanan continue; 65490f9d5027SAnirudh Venkataramanan 6550462acf6aSTony Nguyen /* rebuild the VSI */ 655187324e74SHenry Tieman err = ice_vsi_rebuild(vsi, true); 65520f9d5027SAnirudh Venkataramanan if (err) { 655319cce2c6SAnirudh Venkataramanan dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 6554964674f1SAnirudh Venkataramanan err, vsi->idx, ice_vsi_type_str(type)); 65550f9d5027SAnirudh Venkataramanan return err; 65560f9d5027SAnirudh Venkataramanan } 65570f9d5027SAnirudh Venkataramanan 6558462acf6aSTony Nguyen /* replay filters for the VSI */ 65592ccc1c1cSTony Nguyen err = ice_replay_vsi(&pf->hw, vsi->idx); 65602ccc1c1cSTony Nguyen if (err) { 65615f87ec48STony Nguyen dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 65622ccc1c1cSTony Nguyen err, vsi->idx, ice_vsi_type_str(type)); 6563c1484691STony Nguyen return err; 6564334cb062SAnirudh Venkataramanan } 6565334cb062SAnirudh Venkataramanan 6566334cb062SAnirudh Venkataramanan /* Re-map HW VSI number, using VSI handle that has been 6567334cb062SAnirudh Venkataramanan * previously validated in ice_replay_vsi() call above 6568334cb062SAnirudh Venkataramanan */ 6569462acf6aSTony Nguyen vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 6570334cb062SAnirudh Venkataramanan 6571462acf6aSTony Nguyen /* enable the VSI */ 6572462acf6aSTony Nguyen err = ice_ena_vsi(vsi, false); 6573462acf6aSTony Nguyen if (err) { 657419cce2c6SAnirudh Venkataramanan dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 6575964674f1SAnirudh Venkataramanan err, vsi->idx, ice_vsi_type_str(type)); 6576462acf6aSTony Nguyen return err; 6577334cb062SAnirudh Venkataramanan } 6578334cb062SAnirudh Venkataramanan 65794015d11eSBrett Creeley dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 65804015d11eSBrett Creeley ice_vsi_type_str(type)); 6581462acf6aSTony Nguyen } 6582462acf6aSTony Nguyen 6583334cb062SAnirudh Venkataramanan return 0; 6584334cb062SAnirudh Venkataramanan } 6585334cb062SAnirudh Venkataramanan 6586334cb062SAnirudh Venkataramanan /** 6587462acf6aSTony Nguyen * ice_update_pf_netdev_link - Update PF netdev link status 6588462acf6aSTony Nguyen * @pf: pointer to the PF instance 6589462acf6aSTony Nguyen */ 6590462acf6aSTony Nguyen static void ice_update_pf_netdev_link(struct ice_pf *pf) 6591462acf6aSTony Nguyen { 6592462acf6aSTony Nguyen bool link_up; 6593462acf6aSTony Nguyen int i; 6594462acf6aSTony Nguyen 6595462acf6aSTony Nguyen ice_for_each_vsi(pf, i) { 6596462acf6aSTony Nguyen struct ice_vsi *vsi = pf->vsi[i]; 6597462acf6aSTony Nguyen 6598462acf6aSTony Nguyen if (!vsi || vsi->type != ICE_VSI_PF) 6599462acf6aSTony Nguyen return; 6600462acf6aSTony Nguyen 6601462acf6aSTony Nguyen ice_get_link_status(pf->vsi[i]->port_info, &link_up); 6602462acf6aSTony Nguyen if (link_up) { 6603462acf6aSTony Nguyen netif_carrier_on(pf->vsi[i]->netdev); 6604462acf6aSTony Nguyen netif_tx_wake_all_queues(pf->vsi[i]->netdev); 6605462acf6aSTony Nguyen } else { 6606462acf6aSTony Nguyen netif_carrier_off(pf->vsi[i]->netdev); 6607462acf6aSTony Nguyen netif_tx_stop_all_queues(pf->vsi[i]->netdev); 6608462acf6aSTony Nguyen } 6609462acf6aSTony Nguyen } 6610462acf6aSTony Nguyen } 6611462acf6aSTony Nguyen 6612462acf6aSTony Nguyen /** 66130b28b702SAnirudh Venkataramanan * ice_rebuild - rebuild after reset 66142f2da36eSAnirudh Venkataramanan * @pf: PF to rebuild 6615462acf6aSTony Nguyen * @reset_type: type of reset 661612bb018cSBrett Creeley * 661712bb018cSBrett Creeley * Do not rebuild VF VSI in this flow because that is already handled via 661812bb018cSBrett Creeley * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 661912bb018cSBrett Creeley * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 662012bb018cSBrett Creeley * to reset/rebuild all the VF VSI twice. 66210b28b702SAnirudh Venkataramanan */ 6622462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 66230b28b702SAnirudh Venkataramanan { 66244015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 66250b28b702SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 6626462acf6aSTony Nguyen int err; 66270b28b702SAnirudh Venkataramanan 66287e408e07SAnirudh Venkataramanan if (test_bit(ICE_DOWN, pf->state)) 66290b28b702SAnirudh Venkataramanan goto clear_recovery; 66300b28b702SAnirudh Venkataramanan 6631462acf6aSTony Nguyen dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 66320b28b702SAnirudh Venkataramanan 6633399e27dbSJacob Keller if (reset_type == ICE_RESET_EMPR) { 6634399e27dbSJacob Keller /* If an EMP reset has occurred, any previously pending flash 6635399e27dbSJacob Keller * update will have completed. We no longer know whether or 6636399e27dbSJacob Keller * not the NVM update EMP reset is restricted. 6637399e27dbSJacob Keller */ 6638399e27dbSJacob Keller pf->fw_emp_reset_disabled = false; 6639399e27dbSJacob Keller } 6640399e27dbSJacob Keller 66412ccc1c1cSTony Nguyen err = ice_init_all_ctrlq(hw); 66422ccc1c1cSTony Nguyen if (err) { 66432ccc1c1cSTony Nguyen dev_err(dev, "control queues init failed %d\n", err); 66440f9d5027SAnirudh Venkataramanan goto err_init_ctrlq; 66450b28b702SAnirudh Venkataramanan } 66460b28b702SAnirudh Venkataramanan 6647462acf6aSTony Nguyen /* if DDP was previously loaded successfully */ 6648462acf6aSTony Nguyen if (!ice_is_safe_mode(pf)) { 6649462acf6aSTony Nguyen /* reload the SW DB of filter tables */ 6650462acf6aSTony Nguyen if (reset_type == ICE_RESET_PFR) 6651462acf6aSTony Nguyen ice_fill_blk_tbls(hw); 6652462acf6aSTony Nguyen else 6653462acf6aSTony Nguyen /* Reload DDP Package after CORER/GLOBR reset */ 6654462acf6aSTony Nguyen ice_load_pkg(NULL, pf); 6655462acf6aSTony Nguyen } 6656462acf6aSTony Nguyen 66572ccc1c1cSTony Nguyen err = ice_clear_pf_cfg(hw); 66582ccc1c1cSTony Nguyen if (err) { 66592ccc1c1cSTony Nguyen dev_err(dev, "clear PF configuration failed %d\n", err); 66600f9d5027SAnirudh Venkataramanan goto err_init_ctrlq; 66610b28b702SAnirudh Venkataramanan } 66620b28b702SAnirudh Venkataramanan 6663fc0f39bcSBrett Creeley if (pf->first_sw->dflt_vsi_ena) 666419cce2c6SAnirudh Venkataramanan dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); 6665fc0f39bcSBrett Creeley /* clear the default VSI configuration if it exists */ 6666fc0f39bcSBrett Creeley pf->first_sw->dflt_vsi = NULL; 6667fc0f39bcSBrett Creeley pf->first_sw->dflt_vsi_ena = false; 6668fc0f39bcSBrett Creeley 66690b28b702SAnirudh Venkataramanan ice_clear_pxe_mode(hw); 66700b28b702SAnirudh Venkataramanan 66712ccc1c1cSTony Nguyen err = ice_init_nvm(hw); 66722ccc1c1cSTony Nguyen if (err) { 66732ccc1c1cSTony Nguyen dev_err(dev, "ice_init_nvm failed %d\n", err); 667497a4ec01SJacob Keller goto err_init_ctrlq; 667597a4ec01SJacob Keller } 667697a4ec01SJacob Keller 66772ccc1c1cSTony Nguyen err = ice_get_caps(hw); 66782ccc1c1cSTony Nguyen if (err) { 66792ccc1c1cSTony Nguyen dev_err(dev, "ice_get_caps failed %d\n", err); 66800f9d5027SAnirudh Venkataramanan goto err_init_ctrlq; 66810b28b702SAnirudh Venkataramanan } 66820b28b702SAnirudh Venkataramanan 66832ccc1c1cSTony Nguyen err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 66842ccc1c1cSTony Nguyen if (err) { 66852ccc1c1cSTony Nguyen dev_err(dev, "set_mac_cfg failed %d\n", err); 668642449105SAnirudh Venkataramanan goto err_init_ctrlq; 668742449105SAnirudh Venkataramanan } 668842449105SAnirudh Venkataramanan 66890f9d5027SAnirudh Venkataramanan err = ice_sched_init_port(hw->port_info); 66900f9d5027SAnirudh Venkataramanan if (err) 66910f9d5027SAnirudh Venkataramanan goto err_sched_init_port; 66920f9d5027SAnirudh Venkataramanan 66930b28b702SAnirudh Venkataramanan /* start misc vector */ 66940b28b702SAnirudh Venkataramanan err = ice_req_irq_msix_misc(pf); 66950b28b702SAnirudh Venkataramanan if (err) { 66960b28b702SAnirudh Venkataramanan dev_err(dev, "misc vector setup failed: %d\n", err); 6697462acf6aSTony Nguyen goto err_sched_init_port; 66980b28b702SAnirudh Venkataramanan } 66990b28b702SAnirudh Venkataramanan 670083af0039SHenry Tieman if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 670183af0039SHenry Tieman wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 670283af0039SHenry Tieman if (!rd32(hw, PFQF_FD_SIZE)) { 670383af0039SHenry Tieman u16 unused, guar, b_effort; 670483af0039SHenry Tieman 670583af0039SHenry Tieman guar = hw->func_caps.fd_fltr_guar; 670683af0039SHenry Tieman b_effort = hw->func_caps.fd_fltr_best_effort; 670783af0039SHenry Tieman 670883af0039SHenry Tieman /* force guaranteed filter pool for PF */ 670983af0039SHenry Tieman ice_alloc_fd_guar_item(hw, &unused, guar); 671083af0039SHenry Tieman /* force shared filter pool for PF */ 671183af0039SHenry Tieman ice_alloc_fd_shrd_item(hw, &unused, b_effort); 671283af0039SHenry Tieman } 671383af0039SHenry Tieman } 671483af0039SHenry Tieman 6715462acf6aSTony Nguyen if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 6716462acf6aSTony Nguyen ice_dcb_rebuild(pf); 6717462acf6aSTony Nguyen 671806c16d89SJacob Keller /* If the PF previously had enabled PTP, PTP init needs to happen before 671906c16d89SJacob Keller * the VSI rebuild. If not, this causes the PTP link status events to 672006c16d89SJacob Keller * fail. 672106c16d89SJacob Keller */ 672206c16d89SJacob Keller if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 672348096710SKarol Kolacinski ice_ptp_reset(pf); 672406c16d89SJacob Keller 6725462acf6aSTony Nguyen /* rebuild PF VSI */ 6726462acf6aSTony Nguyen err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 67270f9d5027SAnirudh Venkataramanan if (err) { 6728462acf6aSTony Nguyen dev_err(dev, "PF VSI rebuild failed: %d\n", err); 67290f9d5027SAnirudh Venkataramanan goto err_vsi_rebuild; 67300f9d5027SAnirudh Venkataramanan } 67310b28b702SAnirudh Venkataramanan 673248096710SKarol Kolacinski /* configure PTP timestamping after VSI rebuild */ 673348096710SKarol Kolacinski if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 673448096710SKarol Kolacinski ice_ptp_cfg_timestamp(pf, false); 673548096710SKarol Kolacinski 6736b3be918dSGrzegorz Nitka err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); 6737b3be918dSGrzegorz Nitka if (err) { 6738b3be918dSGrzegorz Nitka dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); 6739b3be918dSGrzegorz Nitka goto err_vsi_rebuild; 6740b3be918dSGrzegorz Nitka } 6741b3be918dSGrzegorz Nitka 6742fbc7b27aSKiran Patil if (reset_type == ICE_RESET_PFR) { 6743fbc7b27aSKiran Patil err = ice_rebuild_channels(pf); 6744fbc7b27aSKiran Patil if (err) { 6745fbc7b27aSKiran Patil dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 6746fbc7b27aSKiran Patil err); 6747fbc7b27aSKiran Patil goto err_vsi_rebuild; 6748fbc7b27aSKiran Patil } 6749fbc7b27aSKiran Patil } 6750fbc7b27aSKiran Patil 675183af0039SHenry Tieman /* If Flow Director is active */ 675283af0039SHenry Tieman if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 675383af0039SHenry Tieman err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 675483af0039SHenry Tieman if (err) { 675583af0039SHenry Tieman dev_err(dev, "control VSI rebuild failed: %d\n", err); 675683af0039SHenry Tieman goto err_vsi_rebuild; 675783af0039SHenry Tieman } 675883af0039SHenry Tieman 675983af0039SHenry Tieman /* replay HW Flow Director recipes */ 676083af0039SHenry Tieman if (hw->fdir_prof) 676183af0039SHenry Tieman ice_fdir_replay_flows(hw); 676283af0039SHenry Tieman 676383af0039SHenry Tieman /* replay Flow Director filters */ 676483af0039SHenry Tieman ice_fdir_replay_fltrs(pf); 676528bf2672SBrett Creeley 676628bf2672SBrett Creeley ice_rebuild_arfs(pf); 676783af0039SHenry Tieman } 676883af0039SHenry Tieman 6769462acf6aSTony Nguyen ice_update_pf_netdev_link(pf); 6770462acf6aSTony Nguyen 6771462acf6aSTony Nguyen /* tell the firmware we are up */ 67722ccc1c1cSTony Nguyen err = ice_send_version(pf); 67732ccc1c1cSTony Nguyen if (err) { 67745f87ec48STony Nguyen dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 67752ccc1c1cSTony Nguyen err); 6776462acf6aSTony Nguyen goto err_vsi_rebuild; 6777ce317dd9SAnirudh Venkataramanan } 6778462acf6aSTony Nguyen 6779462acf6aSTony Nguyen ice_replay_post(hw); 6780ce317dd9SAnirudh Venkataramanan 67810f9d5027SAnirudh Venkataramanan /* if we get here, reset flow is successful */ 67827e408e07SAnirudh Venkataramanan clear_bit(ICE_RESET_FAILED, pf->state); 6783f9f5301eSDave Ertman 6784f9f5301eSDave Ertman ice_plug_aux_dev(pf); 67850b28b702SAnirudh Venkataramanan return; 67860b28b702SAnirudh Venkataramanan 67870f9d5027SAnirudh Venkataramanan err_vsi_rebuild: 67880f9d5027SAnirudh Venkataramanan err_sched_init_port: 67890f9d5027SAnirudh Venkataramanan ice_sched_cleanup_all(hw); 67900f9d5027SAnirudh Venkataramanan err_init_ctrlq: 67910b28b702SAnirudh Venkataramanan ice_shutdown_all_ctrlq(hw); 67927e408e07SAnirudh Venkataramanan set_bit(ICE_RESET_FAILED, pf->state); 67930b28b702SAnirudh Venkataramanan clear_recovery: 67940f9d5027SAnirudh Venkataramanan /* set this bit in PF state to control service task scheduling */ 67957e408e07SAnirudh Venkataramanan set_bit(ICE_NEEDS_RESTART, pf->state); 67960f9d5027SAnirudh Venkataramanan dev_err(dev, "Rebuild failed, unload and reload driver\n"); 67970b28b702SAnirudh Venkataramanan } 67980b28b702SAnirudh Venkataramanan 67990b28b702SAnirudh Venkataramanan /** 680023b44513SMaciej Fijalkowski * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 680123b44513SMaciej Fijalkowski * @vsi: Pointer to VSI structure 680223b44513SMaciej Fijalkowski */ 680323b44513SMaciej Fijalkowski static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 680423b44513SMaciej Fijalkowski { 680523b44513SMaciej Fijalkowski if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 680623b44513SMaciej Fijalkowski return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 680723b44513SMaciej Fijalkowski else 680823b44513SMaciej Fijalkowski return ICE_RXBUF_3072; 680923b44513SMaciej Fijalkowski } 681023b44513SMaciej Fijalkowski 681123b44513SMaciej Fijalkowski /** 6812e94d4478SAnirudh Venkataramanan * ice_change_mtu - NDO callback to change the MTU 6813e94d4478SAnirudh Venkataramanan * @netdev: network interface device structure 6814e94d4478SAnirudh Venkataramanan * @new_mtu: new value for maximum frame size 6815e94d4478SAnirudh Venkataramanan * 6816e94d4478SAnirudh Venkataramanan * Returns 0 on success, negative on failure 6817e94d4478SAnirudh Venkataramanan */ 6818e94d4478SAnirudh Venkataramanan static int ice_change_mtu(struct net_device *netdev, int new_mtu) 6819e94d4478SAnirudh Venkataramanan { 6820e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 6821e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 6822e94d4478SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 6823348048e7SDave Ertman struct iidc_event *event; 6824e94d4478SAnirudh Venkataramanan u8 count = 0; 6825348048e7SDave Ertman int err = 0; 6826e94d4478SAnirudh Venkataramanan 682722bef5e7SJesse Brandeburg if (new_mtu == (int)netdev->mtu) { 68282f2da36eSAnirudh Venkataramanan netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 6829e94d4478SAnirudh Venkataramanan return 0; 6830e94d4478SAnirudh Venkataramanan } 6831e94d4478SAnirudh Venkataramanan 6832efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(vsi)) { 683323b44513SMaciej Fijalkowski int frame_size = ice_max_xdp_frame_size(vsi); 6834efc2214bSMaciej Fijalkowski 6835efc2214bSMaciej Fijalkowski if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 6836efc2214bSMaciej Fijalkowski netdev_err(netdev, "max MTU for XDP usage is %d\n", 683723b44513SMaciej Fijalkowski frame_size - ICE_ETH_PKT_HDR_PAD); 6838efc2214bSMaciej Fijalkowski return -EINVAL; 6839efc2214bSMaciej Fijalkowski } 6840efc2214bSMaciej Fijalkowski } 6841efc2214bSMaciej Fijalkowski 6842e94d4478SAnirudh Venkataramanan /* if a reset is in progress, wait for some time for it to complete */ 6843e94d4478SAnirudh Venkataramanan do { 68445df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state)) { 6845e94d4478SAnirudh Venkataramanan count++; 6846e94d4478SAnirudh Venkataramanan usleep_range(1000, 2000); 6847e94d4478SAnirudh Venkataramanan } else { 6848e94d4478SAnirudh Venkataramanan break; 6849e94d4478SAnirudh Venkataramanan } 6850e94d4478SAnirudh Venkataramanan 6851e94d4478SAnirudh Venkataramanan } while (count < 100); 6852e94d4478SAnirudh Venkataramanan 6853e94d4478SAnirudh Venkataramanan if (count == 100) { 68542f2da36eSAnirudh Venkataramanan netdev_err(netdev, "can't change MTU. Device is busy\n"); 6855e94d4478SAnirudh Venkataramanan return -EBUSY; 6856e94d4478SAnirudh Venkataramanan } 6857e94d4478SAnirudh Venkataramanan 6858348048e7SDave Ertman event = kzalloc(sizeof(*event), GFP_KERNEL); 6859348048e7SDave Ertman if (!event) 6860348048e7SDave Ertman return -ENOMEM; 6861348048e7SDave Ertman 6862348048e7SDave Ertman set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); 6863348048e7SDave Ertman ice_send_event_to_aux(pf, event); 6864348048e7SDave Ertman clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); 6865348048e7SDave Ertman 686622bef5e7SJesse Brandeburg netdev->mtu = (unsigned int)new_mtu; 6867e94d4478SAnirudh Venkataramanan 6868e94d4478SAnirudh Venkataramanan /* if VSI is up, bring it down and then back up */ 6869e97fb1aeSAnirudh Venkataramanan if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6870e94d4478SAnirudh Venkataramanan err = ice_down(vsi); 6871e94d4478SAnirudh Venkataramanan if (err) { 6872fe6cd890SMitch Williams netdev_err(netdev, "change MTU if_down err %d\n", err); 6873348048e7SDave Ertman goto event_after; 6874e94d4478SAnirudh Venkataramanan } 6875e94d4478SAnirudh Venkataramanan 6876e94d4478SAnirudh Venkataramanan err = ice_up(vsi); 6877e94d4478SAnirudh Venkataramanan if (err) { 68782f2da36eSAnirudh Venkataramanan netdev_err(netdev, "change MTU if_up err %d\n", err); 6879348048e7SDave Ertman goto event_after; 6880e94d4478SAnirudh Venkataramanan } 6881e94d4478SAnirudh Venkataramanan } 6882e94d4478SAnirudh Venkataramanan 6883bda5b7dbSTony Nguyen netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 6884348048e7SDave Ertman event_after: 6885348048e7SDave Ertman set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 6886348048e7SDave Ertman ice_send_event_to_aux(pf, event); 6887348048e7SDave Ertman kfree(event); 6888348048e7SDave Ertman 6889348048e7SDave Ertman return err; 6890e94d4478SAnirudh Venkataramanan } 6891e94d4478SAnirudh Venkataramanan 6892e94d4478SAnirudh Venkataramanan /** 6893a7605370SArnd Bergmann * ice_eth_ioctl - Access the hwtstamp interface 689477a78115SJacob Keller * @netdev: network interface device structure 689577a78115SJacob Keller * @ifr: interface request data 689677a78115SJacob Keller * @cmd: ioctl command 689777a78115SJacob Keller */ 6898a7605370SArnd Bergmann static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 689977a78115SJacob Keller { 690077a78115SJacob Keller struct ice_netdev_priv *np = netdev_priv(netdev); 690177a78115SJacob Keller struct ice_pf *pf = np->vsi->back; 690277a78115SJacob Keller 690377a78115SJacob Keller switch (cmd) { 690477a78115SJacob Keller case SIOCGHWTSTAMP: 690577a78115SJacob Keller return ice_ptp_get_ts_config(pf, ifr); 690677a78115SJacob Keller case SIOCSHWTSTAMP: 690777a78115SJacob Keller return ice_ptp_set_ts_config(pf, ifr); 690877a78115SJacob Keller default: 690977a78115SJacob Keller return -EOPNOTSUPP; 691077a78115SJacob Keller } 691177a78115SJacob Keller } 691277a78115SJacob Keller 691377a78115SJacob Keller /** 69140fee3577SLihong Yang * ice_aq_str - convert AQ err code to a string 69150fee3577SLihong Yang * @aq_err: the AQ error code to convert 69160fee3577SLihong Yang */ 69170fee3577SLihong Yang const char *ice_aq_str(enum ice_aq_err aq_err) 69180fee3577SLihong Yang { 69190fee3577SLihong Yang switch (aq_err) { 69200fee3577SLihong Yang case ICE_AQ_RC_OK: 69210fee3577SLihong Yang return "OK"; 69220fee3577SLihong Yang case ICE_AQ_RC_EPERM: 69230fee3577SLihong Yang return "ICE_AQ_RC_EPERM"; 69240fee3577SLihong Yang case ICE_AQ_RC_ENOENT: 69250fee3577SLihong Yang return "ICE_AQ_RC_ENOENT"; 69260fee3577SLihong Yang case ICE_AQ_RC_ENOMEM: 69270fee3577SLihong Yang return "ICE_AQ_RC_ENOMEM"; 69280fee3577SLihong Yang case ICE_AQ_RC_EBUSY: 69290fee3577SLihong Yang return "ICE_AQ_RC_EBUSY"; 69300fee3577SLihong Yang case ICE_AQ_RC_EEXIST: 69310fee3577SLihong Yang return "ICE_AQ_RC_EEXIST"; 69320fee3577SLihong Yang case ICE_AQ_RC_EINVAL: 69330fee3577SLihong Yang return "ICE_AQ_RC_EINVAL"; 69340fee3577SLihong Yang case ICE_AQ_RC_ENOSPC: 69350fee3577SLihong Yang return "ICE_AQ_RC_ENOSPC"; 69360fee3577SLihong Yang case ICE_AQ_RC_ENOSYS: 69370fee3577SLihong Yang return "ICE_AQ_RC_ENOSYS"; 6938b5e19a64SChinh T Cao case ICE_AQ_RC_EMODE: 6939b5e19a64SChinh T Cao return "ICE_AQ_RC_EMODE"; 69400fee3577SLihong Yang case ICE_AQ_RC_ENOSEC: 69410fee3577SLihong Yang return "ICE_AQ_RC_ENOSEC"; 69420fee3577SLihong Yang case ICE_AQ_RC_EBADSIG: 69430fee3577SLihong Yang return "ICE_AQ_RC_EBADSIG"; 69440fee3577SLihong Yang case ICE_AQ_RC_ESVN: 69450fee3577SLihong Yang return "ICE_AQ_RC_ESVN"; 69460fee3577SLihong Yang case ICE_AQ_RC_EBADMAN: 69470fee3577SLihong Yang return "ICE_AQ_RC_EBADMAN"; 69480fee3577SLihong Yang case ICE_AQ_RC_EBADBUF: 69490fee3577SLihong Yang return "ICE_AQ_RC_EBADBUF"; 69500fee3577SLihong Yang } 69510fee3577SLihong Yang 69520fee3577SLihong Yang return "ICE_AQ_RC_UNKNOWN"; 69530fee3577SLihong Yang } 69540fee3577SLihong Yang 69550fee3577SLihong Yang /** 6956b66a972aSBrett Creeley * ice_set_rss_lut - Set RSS LUT 6957d76a60baSAnirudh Venkataramanan * @vsi: Pointer to VSI structure 6958d76a60baSAnirudh Venkataramanan * @lut: Lookup table 6959d76a60baSAnirudh Venkataramanan * @lut_size: Lookup table size 6960d76a60baSAnirudh Venkataramanan * 6961d76a60baSAnirudh Venkataramanan * Returns 0 on success, negative on failure 6962d76a60baSAnirudh Venkataramanan */ 6963b66a972aSBrett Creeley int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 6964d76a60baSAnirudh Venkataramanan { 6965b66a972aSBrett Creeley struct ice_aq_get_set_rss_lut_params params = {}; 6966b66a972aSBrett Creeley struct ice_hw *hw = &vsi->back->hw; 69675e24d598STony Nguyen int status; 6968d76a60baSAnirudh Venkataramanan 6969b66a972aSBrett Creeley if (!lut) 6970b66a972aSBrett Creeley return -EINVAL; 6971d76a60baSAnirudh Venkataramanan 6972b66a972aSBrett Creeley params.vsi_handle = vsi->idx; 6973b66a972aSBrett Creeley params.lut_size = lut_size; 6974b66a972aSBrett Creeley params.lut_type = vsi->rss_lut_type; 6975b66a972aSBrett Creeley params.lut = lut; 6976d76a60baSAnirudh Venkataramanan 6977b66a972aSBrett Creeley status = ice_aq_set_rss_lut(hw, ¶ms); 6978c1484691STony Nguyen if (status) 69795f87ec48STony Nguyen dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 69805518ac2aSTony Nguyen status, ice_aq_str(hw->adminq.sq_last_status)); 6981d76a60baSAnirudh Venkataramanan 6982c1484691STony Nguyen return status; 6983d76a60baSAnirudh Venkataramanan } 6984d76a60baSAnirudh Venkataramanan 6985d76a60baSAnirudh Venkataramanan /** 6986b66a972aSBrett Creeley * ice_set_rss_key - Set RSS key 6987b66a972aSBrett Creeley * @vsi: Pointer to the VSI structure 6988b66a972aSBrett Creeley * @seed: RSS hash seed 6989b66a972aSBrett Creeley * 6990b66a972aSBrett Creeley * Returns 0 on success, negative on failure 6991b66a972aSBrett Creeley */ 6992b66a972aSBrett Creeley int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 6993b66a972aSBrett Creeley { 6994b66a972aSBrett Creeley struct ice_hw *hw = &vsi->back->hw; 69955e24d598STony Nguyen int status; 6996b66a972aSBrett Creeley 6997b66a972aSBrett Creeley if (!seed) 6998b66a972aSBrett Creeley return -EINVAL; 6999b66a972aSBrett Creeley 7000b66a972aSBrett Creeley status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7001c1484691STony Nguyen if (status) 70025f87ec48STony Nguyen dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 70035518ac2aSTony Nguyen status, ice_aq_str(hw->adminq.sq_last_status)); 7004b66a972aSBrett Creeley 7005c1484691STony Nguyen return status; 7006b66a972aSBrett Creeley } 7007b66a972aSBrett Creeley 7008b66a972aSBrett Creeley /** 7009b66a972aSBrett Creeley * ice_get_rss_lut - Get RSS LUT 7010d76a60baSAnirudh Venkataramanan * @vsi: Pointer to VSI structure 7011d76a60baSAnirudh Venkataramanan * @lut: Buffer to store the lookup table entries 7012d76a60baSAnirudh Venkataramanan * @lut_size: Size of buffer to store the lookup table entries 7013d76a60baSAnirudh Venkataramanan * 7014d76a60baSAnirudh Venkataramanan * Returns 0 on success, negative on failure 7015d76a60baSAnirudh Venkataramanan */ 7016b66a972aSBrett Creeley int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7017d76a60baSAnirudh Venkataramanan { 7018b66a972aSBrett Creeley struct ice_aq_get_set_rss_lut_params params = {}; 7019b66a972aSBrett Creeley struct ice_hw *hw = &vsi->back->hw; 70205e24d598STony Nguyen int status; 7021d76a60baSAnirudh Venkataramanan 7022b66a972aSBrett Creeley if (!lut) 7023b66a972aSBrett Creeley return -EINVAL; 7024d76a60baSAnirudh Venkataramanan 7025b66a972aSBrett Creeley params.vsi_handle = vsi->idx; 7026b66a972aSBrett Creeley params.lut_size = lut_size; 7027b66a972aSBrett Creeley params.lut_type = vsi->rss_lut_type; 7028b66a972aSBrett Creeley params.lut = lut; 7029b66a972aSBrett Creeley 7030b66a972aSBrett Creeley status = ice_aq_get_rss_lut(hw, ¶ms); 7031c1484691STony Nguyen if (status) 70325f87ec48STony Nguyen dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 70335518ac2aSTony Nguyen status, ice_aq_str(hw->adminq.sq_last_status)); 7034b66a972aSBrett Creeley 7035c1484691STony Nguyen return status; 7036d76a60baSAnirudh Venkataramanan } 7037d76a60baSAnirudh Venkataramanan 7038b66a972aSBrett Creeley /** 7039b66a972aSBrett Creeley * ice_get_rss_key - Get RSS key 7040b66a972aSBrett Creeley * @vsi: Pointer to VSI structure 7041b66a972aSBrett Creeley * @seed: Buffer to store the key in 7042b66a972aSBrett Creeley * 7043b66a972aSBrett Creeley * Returns 0 on success, negative on failure 7044b66a972aSBrett Creeley */ 7045b66a972aSBrett Creeley int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 7046b66a972aSBrett Creeley { 7047b66a972aSBrett Creeley struct ice_hw *hw = &vsi->back->hw; 70485e24d598STony Nguyen int status; 7049e3c53928SBrett Creeley 7050b66a972aSBrett Creeley if (!seed) 7051b66a972aSBrett Creeley return -EINVAL; 7052b66a972aSBrett Creeley 7053b66a972aSBrett Creeley status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7054c1484691STony Nguyen if (status) 70555f87ec48STony Nguyen dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 70565518ac2aSTony Nguyen status, ice_aq_str(hw->adminq.sq_last_status)); 7057d76a60baSAnirudh Venkataramanan 7058c1484691STony Nguyen return status; 7059d76a60baSAnirudh Venkataramanan } 7060d76a60baSAnirudh Venkataramanan 7061d76a60baSAnirudh Venkataramanan /** 7062b1edc14aSMd Fahad Iqbal Polash * ice_bridge_getlink - Get the hardware bridge mode 7063b1edc14aSMd Fahad Iqbal Polash * @skb: skb buff 7064f9867df6SAnirudh Venkataramanan * @pid: process ID 7065b1edc14aSMd Fahad Iqbal Polash * @seq: RTNL message seq 7066b1edc14aSMd Fahad Iqbal Polash * @dev: the netdev being configured 7067b1edc14aSMd Fahad Iqbal Polash * @filter_mask: filter mask passed in 7068b1edc14aSMd Fahad Iqbal Polash * @nlflags: netlink flags passed in 7069b1edc14aSMd Fahad Iqbal Polash * 7070b1edc14aSMd Fahad Iqbal Polash * Return the bridge mode (VEB/VEPA) 7071b1edc14aSMd Fahad Iqbal Polash */ 7072b1edc14aSMd Fahad Iqbal Polash static int 7073b1edc14aSMd Fahad Iqbal Polash ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7074b1edc14aSMd Fahad Iqbal Polash struct net_device *dev, u32 filter_mask, int nlflags) 7075b1edc14aSMd Fahad Iqbal Polash { 7076b1edc14aSMd Fahad Iqbal Polash struct ice_netdev_priv *np = netdev_priv(dev); 7077b1edc14aSMd Fahad Iqbal Polash struct ice_vsi *vsi = np->vsi; 7078b1edc14aSMd Fahad Iqbal Polash struct ice_pf *pf = vsi->back; 7079b1edc14aSMd Fahad Iqbal Polash u16 bmode; 7080b1edc14aSMd Fahad Iqbal Polash 7081b1edc14aSMd Fahad Iqbal Polash bmode = pf->first_sw->bridge_mode; 7082b1edc14aSMd Fahad Iqbal Polash 7083b1edc14aSMd Fahad Iqbal Polash return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 7084b1edc14aSMd Fahad Iqbal Polash filter_mask, NULL); 7085b1edc14aSMd Fahad Iqbal Polash } 7086b1edc14aSMd Fahad Iqbal Polash 7087b1edc14aSMd Fahad Iqbal Polash /** 7088b1edc14aSMd Fahad Iqbal Polash * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 7089b1edc14aSMd Fahad Iqbal Polash * @vsi: Pointer to VSI structure 7090b1edc14aSMd Fahad Iqbal Polash * @bmode: Hardware bridge mode (VEB/VEPA) 7091b1edc14aSMd Fahad Iqbal Polash * 7092b1edc14aSMd Fahad Iqbal Polash * Returns 0 on success, negative on failure 7093b1edc14aSMd Fahad Iqbal Polash */ 7094b1edc14aSMd Fahad Iqbal Polash static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 7095b1edc14aSMd Fahad Iqbal Polash { 7096b1edc14aSMd Fahad Iqbal Polash struct ice_aqc_vsi_props *vsi_props; 7097b1edc14aSMd Fahad Iqbal Polash struct ice_hw *hw = &vsi->back->hw; 7098198a666aSBruce Allan struct ice_vsi_ctx *ctxt; 70992ccc1c1cSTony Nguyen int ret; 7100b1edc14aSMd Fahad Iqbal Polash 7101b1edc14aSMd Fahad Iqbal Polash vsi_props = &vsi->info; 7102198a666aSBruce Allan 71039efe35d0STony Nguyen ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 7104198a666aSBruce Allan if (!ctxt) 7105198a666aSBruce Allan return -ENOMEM; 7106198a666aSBruce Allan 7107198a666aSBruce Allan ctxt->info = vsi->info; 7108b1edc14aSMd Fahad Iqbal Polash 7109b1edc14aSMd Fahad Iqbal Polash if (bmode == BRIDGE_MODE_VEB) 7110b1edc14aSMd Fahad Iqbal Polash /* change from VEPA to VEB mode */ 7111198a666aSBruce Allan ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7112b1edc14aSMd Fahad Iqbal Polash else 7113b1edc14aSMd Fahad Iqbal Polash /* change from VEB to VEPA mode */ 7114198a666aSBruce Allan ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7115198a666aSBruce Allan ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 71165726ca0eSAnirudh Venkataramanan 71172ccc1c1cSTony Nguyen ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 71182ccc1c1cSTony Nguyen if (ret) { 71195f87ec48STony Nguyen dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 71202ccc1c1cSTony Nguyen bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); 7121198a666aSBruce Allan goto out; 7122b1edc14aSMd Fahad Iqbal Polash } 7123b1edc14aSMd Fahad Iqbal Polash /* Update sw flags for book keeping */ 7124198a666aSBruce Allan vsi_props->sw_flags = ctxt->info.sw_flags; 7125b1edc14aSMd Fahad Iqbal Polash 7126198a666aSBruce Allan out: 71279efe35d0STony Nguyen kfree(ctxt); 7128198a666aSBruce Allan return ret; 7129b1edc14aSMd Fahad Iqbal Polash } 7130b1edc14aSMd Fahad Iqbal Polash 7131b1edc14aSMd Fahad Iqbal Polash /** 7132b1edc14aSMd Fahad Iqbal Polash * ice_bridge_setlink - Set the hardware bridge mode 7133b1edc14aSMd Fahad Iqbal Polash * @dev: the netdev being configured 7134b1edc14aSMd Fahad Iqbal Polash * @nlh: RTNL message 7135b1edc14aSMd Fahad Iqbal Polash * @flags: bridge setlink flags 71362fd527b7SPetr Machata * @extack: netlink extended ack 7137b1edc14aSMd Fahad Iqbal Polash * 7138b1edc14aSMd Fahad Iqbal Polash * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 7139b1edc14aSMd Fahad Iqbal Polash * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 7140b1edc14aSMd Fahad Iqbal Polash * not already set for all VSIs connected to this switch. And also update the 7141b1edc14aSMd Fahad Iqbal Polash * unicast switch filter rules for the corresponding switch of the netdev. 7142b1edc14aSMd Fahad Iqbal Polash */ 7143b1edc14aSMd Fahad Iqbal Polash static int 7144b1edc14aSMd Fahad Iqbal Polash ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 71453d505147SBruce Allan u16 __always_unused flags, 71463d505147SBruce Allan struct netlink_ext_ack __always_unused *extack) 7147b1edc14aSMd Fahad Iqbal Polash { 7148b1edc14aSMd Fahad Iqbal Polash struct ice_netdev_priv *np = netdev_priv(dev); 7149b1edc14aSMd Fahad Iqbal Polash struct ice_pf *pf = np->vsi->back; 7150b1edc14aSMd Fahad Iqbal Polash struct nlattr *attr, *br_spec; 7151b1edc14aSMd Fahad Iqbal Polash struct ice_hw *hw = &pf->hw; 7152b1edc14aSMd Fahad Iqbal Polash struct ice_sw *pf_sw; 7153b1edc14aSMd Fahad Iqbal Polash int rem, v, err = 0; 7154b1edc14aSMd Fahad Iqbal Polash 7155b1edc14aSMd Fahad Iqbal Polash pf_sw = pf->first_sw; 7156b1edc14aSMd Fahad Iqbal Polash /* find the attribute in the netlink message */ 7157b1edc14aSMd Fahad Iqbal Polash br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7158b1edc14aSMd Fahad Iqbal Polash 7159b1edc14aSMd Fahad Iqbal Polash nla_for_each_nested(attr, br_spec, rem) { 7160b1edc14aSMd Fahad Iqbal Polash __u16 mode; 7161b1edc14aSMd Fahad Iqbal Polash 7162b1edc14aSMd Fahad Iqbal Polash if (nla_type(attr) != IFLA_BRIDGE_MODE) 7163b1edc14aSMd Fahad Iqbal Polash continue; 7164b1edc14aSMd Fahad Iqbal Polash mode = nla_get_u16(attr); 7165b1edc14aSMd Fahad Iqbal Polash if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 7166b1edc14aSMd Fahad Iqbal Polash return -EINVAL; 7167b1edc14aSMd Fahad Iqbal Polash /* Continue if bridge mode is not being flipped */ 7168b1edc14aSMd Fahad Iqbal Polash if (mode == pf_sw->bridge_mode) 7169b1edc14aSMd Fahad Iqbal Polash continue; 7170b1edc14aSMd Fahad Iqbal Polash /* Iterates through the PF VSI list and update the loopback 7171b1edc14aSMd Fahad Iqbal Polash * mode of the VSI 7172b1edc14aSMd Fahad Iqbal Polash */ 7173b1edc14aSMd Fahad Iqbal Polash ice_for_each_vsi(pf, v) { 7174b1edc14aSMd Fahad Iqbal Polash if (!pf->vsi[v]) 7175b1edc14aSMd Fahad Iqbal Polash continue; 7176b1edc14aSMd Fahad Iqbal Polash err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 7177b1edc14aSMd Fahad Iqbal Polash if (err) 7178b1edc14aSMd Fahad Iqbal Polash return err; 7179b1edc14aSMd Fahad Iqbal Polash } 7180b1edc14aSMd Fahad Iqbal Polash 7181b1edc14aSMd Fahad Iqbal Polash hw->evb_veb = (mode == BRIDGE_MODE_VEB); 7182b1edc14aSMd Fahad Iqbal Polash /* Update the unicast switch filter rules for the corresponding 7183b1edc14aSMd Fahad Iqbal Polash * switch of the netdev 7184b1edc14aSMd Fahad Iqbal Polash */ 71852ccc1c1cSTony Nguyen err = ice_update_sw_rule_bridge_mode(hw); 71862ccc1c1cSTony Nguyen if (err) { 71875f87ec48STony Nguyen netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 71882ccc1c1cSTony Nguyen mode, err, 71890fee3577SLihong Yang ice_aq_str(hw->adminq.sq_last_status)); 7190b1edc14aSMd Fahad Iqbal Polash /* revert hw->evb_veb */ 7191b1edc14aSMd Fahad Iqbal Polash hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 7192c1484691STony Nguyen return err; 7193b1edc14aSMd Fahad Iqbal Polash } 7194b1edc14aSMd Fahad Iqbal Polash 7195b1edc14aSMd Fahad Iqbal Polash pf_sw->bridge_mode = mode; 7196b1edc14aSMd Fahad Iqbal Polash } 7197b1edc14aSMd Fahad Iqbal Polash 7198b1edc14aSMd Fahad Iqbal Polash return 0; 7199b1edc14aSMd Fahad Iqbal Polash } 7200b1edc14aSMd Fahad Iqbal Polash 7201b1edc14aSMd Fahad Iqbal Polash /** 7202b3969fd7SSudheer Mogilappagari * ice_tx_timeout - Respond to a Tx Hang 7203b3969fd7SSudheer Mogilappagari * @netdev: network interface device structure 7204644f40eaSBruce Allan * @txqueue: Tx queue 7205b3969fd7SSudheer Mogilappagari */ 72060290bd29SMichael S. Tsirkin static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 7207b3969fd7SSudheer Mogilappagari { 7208b3969fd7SSudheer Mogilappagari struct ice_netdev_priv *np = netdev_priv(netdev); 7209e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring = NULL; 7210b3969fd7SSudheer Mogilappagari struct ice_vsi *vsi = np->vsi; 7211b3969fd7SSudheer Mogilappagari struct ice_pf *pf = vsi->back; 7212807bc98dSBrett Creeley u32 i; 7213b3969fd7SSudheer Mogilappagari 7214b3969fd7SSudheer Mogilappagari pf->tx_timeout_count++; 7215b3969fd7SSudheer Mogilappagari 7216610ed0e9SAvinash JD /* Check if PFC is enabled for the TC to which the queue belongs 7217610ed0e9SAvinash JD * to. If yes then Tx timeout is not caused by a hung queue, no 7218610ed0e9SAvinash JD * need to reset and rebuild 7219610ed0e9SAvinash JD */ 7220610ed0e9SAvinash JD if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 7221610ed0e9SAvinash JD dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 7222610ed0e9SAvinash JD txqueue); 7223610ed0e9SAvinash JD return; 7224610ed0e9SAvinash JD } 7225610ed0e9SAvinash JD 7226b3969fd7SSudheer Mogilappagari /* now that we have an index, find the tx_ring struct */ 72272faf63b6SMaciej Fijalkowski ice_for_each_txq(vsi, i) 7228bc0c6fabSBruce Allan if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 7229ed5a3f66SJulio Faracco if (txqueue == vsi->tx_rings[i]->q_index) { 7230b3969fd7SSudheer Mogilappagari tx_ring = vsi->tx_rings[i]; 7231b3969fd7SSudheer Mogilappagari break; 7232b3969fd7SSudheer Mogilappagari } 7233b3969fd7SSudheer Mogilappagari 7234b3969fd7SSudheer Mogilappagari /* Reset recovery level if enough time has elapsed after last timeout. 7235b3969fd7SSudheer Mogilappagari * Also ensure no new reset action happens before next timeout period. 7236b3969fd7SSudheer Mogilappagari */ 7237b3969fd7SSudheer Mogilappagari if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 7238b3969fd7SSudheer Mogilappagari pf->tx_timeout_recovery_level = 1; 7239b3969fd7SSudheer Mogilappagari else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 7240b3969fd7SSudheer Mogilappagari netdev->watchdog_timeo))) 7241b3969fd7SSudheer Mogilappagari return; 7242b3969fd7SSudheer Mogilappagari 7243b3969fd7SSudheer Mogilappagari if (tx_ring) { 7244807bc98dSBrett Creeley struct ice_hw *hw = &pf->hw; 7245807bc98dSBrett Creeley u32 head, val = 0; 7246807bc98dSBrett Creeley 7247ed5a3f66SJulio Faracco head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 7248807bc98dSBrett Creeley QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 7249b3969fd7SSudheer Mogilappagari /* Read interrupt register */ 7250ba880734SBrett Creeley val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 7251b3969fd7SSudheer Mogilappagari 725293ff4858STony Nguyen netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 7253ed5a3f66SJulio Faracco vsi->vsi_num, txqueue, tx_ring->next_to_clean, 7254807bc98dSBrett Creeley head, tx_ring->next_to_use, val); 7255b3969fd7SSudheer Mogilappagari } 7256b3969fd7SSudheer Mogilappagari 7257b3969fd7SSudheer Mogilappagari pf->tx_timeout_last_recovery = jiffies; 725893ff4858STony Nguyen netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 7259ed5a3f66SJulio Faracco pf->tx_timeout_recovery_level, txqueue); 7260b3969fd7SSudheer Mogilappagari 7261b3969fd7SSudheer Mogilappagari switch (pf->tx_timeout_recovery_level) { 7262b3969fd7SSudheer Mogilappagari case 1: 72637e408e07SAnirudh Venkataramanan set_bit(ICE_PFR_REQ, pf->state); 7264b3969fd7SSudheer Mogilappagari break; 7265b3969fd7SSudheer Mogilappagari case 2: 72667e408e07SAnirudh Venkataramanan set_bit(ICE_CORER_REQ, pf->state); 7267b3969fd7SSudheer Mogilappagari break; 7268b3969fd7SSudheer Mogilappagari case 3: 72697e408e07SAnirudh Venkataramanan set_bit(ICE_GLOBR_REQ, pf->state); 7270b3969fd7SSudheer Mogilappagari break; 7271b3969fd7SSudheer Mogilappagari default: 7272b3969fd7SSudheer Mogilappagari netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 72737e408e07SAnirudh Venkataramanan set_bit(ICE_DOWN, pf->state); 7274e97fb1aeSAnirudh Venkataramanan set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 72757e408e07SAnirudh Venkataramanan set_bit(ICE_SERVICE_DIS, pf->state); 7276b3969fd7SSudheer Mogilappagari break; 7277b3969fd7SSudheer Mogilappagari } 7278b3969fd7SSudheer Mogilappagari 7279b3969fd7SSudheer Mogilappagari ice_service_task_schedule(pf); 7280b3969fd7SSudheer Mogilappagari pf->tx_timeout_recovery_level++; 7281b3969fd7SSudheer Mogilappagari } 7282b3969fd7SSudheer Mogilappagari 7283b3969fd7SSudheer Mogilappagari /** 72840d08a441SKiran Patil * ice_setup_tc_cls_flower - flower classifier offloads 72850d08a441SKiran Patil * @np: net device to configure 72860d08a441SKiran Patil * @filter_dev: device on which filter is added 72870d08a441SKiran Patil * @cls_flower: offload data 72880d08a441SKiran Patil */ 72890d08a441SKiran Patil static int 72900d08a441SKiran Patil ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 72910d08a441SKiran Patil struct net_device *filter_dev, 72920d08a441SKiran Patil struct flow_cls_offload *cls_flower) 72930d08a441SKiran Patil { 72940d08a441SKiran Patil struct ice_vsi *vsi = np->vsi; 72950d08a441SKiran Patil 72960d08a441SKiran Patil if (cls_flower->common.chain_index) 72970d08a441SKiran Patil return -EOPNOTSUPP; 72980d08a441SKiran Patil 72990d08a441SKiran Patil switch (cls_flower->command) { 73000d08a441SKiran Patil case FLOW_CLS_REPLACE: 73010d08a441SKiran Patil return ice_add_cls_flower(filter_dev, vsi, cls_flower); 73020d08a441SKiran Patil case FLOW_CLS_DESTROY: 73030d08a441SKiran Patil return ice_del_cls_flower(vsi, cls_flower); 73040d08a441SKiran Patil default: 73050d08a441SKiran Patil return -EINVAL; 73060d08a441SKiran Patil } 73070d08a441SKiran Patil } 73080d08a441SKiran Patil 73090d08a441SKiran Patil /** 73100d08a441SKiran Patil * ice_setup_tc_block_cb - callback handler registered for TC block 73110d08a441SKiran Patil * @type: TC SETUP type 73120d08a441SKiran Patil * @type_data: TC flower offload data that contains user input 73130d08a441SKiran Patil * @cb_priv: netdev private data 73140d08a441SKiran Patil */ 73150d08a441SKiran Patil static int 73160d08a441SKiran Patil ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 73170d08a441SKiran Patil { 73180d08a441SKiran Patil struct ice_netdev_priv *np = cb_priv; 73190d08a441SKiran Patil 73200d08a441SKiran Patil switch (type) { 73210d08a441SKiran Patil case TC_SETUP_CLSFLOWER: 73220d08a441SKiran Patil return ice_setup_tc_cls_flower(np, np->vsi->netdev, 73230d08a441SKiran Patil type_data); 73240d08a441SKiran Patil default: 73250d08a441SKiran Patil return -EOPNOTSUPP; 73260d08a441SKiran Patil } 73270d08a441SKiran Patil } 73280d08a441SKiran Patil 7329fbc7b27aSKiran Patil /** 7330fbc7b27aSKiran Patil * ice_validate_mqprio_qopt - Validate TCF input parameters 7331fbc7b27aSKiran Patil * @vsi: Pointer to VSI 7332fbc7b27aSKiran Patil * @mqprio_qopt: input parameters for mqprio queue configuration 7333fbc7b27aSKiran Patil * 7334fbc7b27aSKiran Patil * This function validates MQPRIO params, such as qcount (power of 2 wherever 7335fbc7b27aSKiran Patil * needed), and make sure user doesn't specify qcount and BW rate limit 7336fbc7b27aSKiran Patil * for TCs, which are more than "num_tc" 7337fbc7b27aSKiran Patil */ 7338fbc7b27aSKiran Patil static int 7339fbc7b27aSKiran Patil ice_validate_mqprio_qopt(struct ice_vsi *vsi, 7340fbc7b27aSKiran Patil struct tc_mqprio_qopt_offload *mqprio_qopt) 7341fbc7b27aSKiran Patil { 7342fbc7b27aSKiran Patil u64 sum_max_rate = 0, sum_min_rate = 0; 7343fbc7b27aSKiran Patil int non_power_of_2_qcount = 0; 7344fbc7b27aSKiran Patil struct ice_pf *pf = vsi->back; 7345fbc7b27aSKiran Patil int max_rss_q_cnt = 0; 7346fbc7b27aSKiran Patil struct device *dev; 7347fbc7b27aSKiran Patil int i, speed; 7348fbc7b27aSKiran Patil u8 num_tc; 7349fbc7b27aSKiran Patil 7350fbc7b27aSKiran Patil if (vsi->type != ICE_VSI_PF) 7351fbc7b27aSKiran Patil return -EINVAL; 7352fbc7b27aSKiran Patil 7353fbc7b27aSKiran Patil if (mqprio_qopt->qopt.offset[0] != 0 || 7354fbc7b27aSKiran Patil mqprio_qopt->qopt.num_tc < 1 || 7355fbc7b27aSKiran Patil mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 7356fbc7b27aSKiran Patil return -EINVAL; 7357fbc7b27aSKiran Patil 7358fbc7b27aSKiran Patil dev = ice_pf_to_dev(pf); 7359fbc7b27aSKiran Patil vsi->ch_rss_size = 0; 7360fbc7b27aSKiran Patil num_tc = mqprio_qopt->qopt.num_tc; 7361fbc7b27aSKiran Patil 7362fbc7b27aSKiran Patil for (i = 0; num_tc; i++) { 7363fbc7b27aSKiran Patil int qcount = mqprio_qopt->qopt.count[i]; 7364fbc7b27aSKiran Patil u64 max_rate, min_rate, rem; 7365fbc7b27aSKiran Patil 7366fbc7b27aSKiran Patil if (!qcount) 7367fbc7b27aSKiran Patil return -EINVAL; 7368fbc7b27aSKiran Patil 7369fbc7b27aSKiran Patil if (is_power_of_2(qcount)) { 7370fbc7b27aSKiran Patil if (non_power_of_2_qcount && 7371fbc7b27aSKiran Patil qcount > non_power_of_2_qcount) { 7372fbc7b27aSKiran Patil dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 7373fbc7b27aSKiran Patil qcount, non_power_of_2_qcount); 7374fbc7b27aSKiran Patil return -EINVAL; 7375fbc7b27aSKiran Patil } 7376fbc7b27aSKiran Patil if (qcount > max_rss_q_cnt) 7377fbc7b27aSKiran Patil max_rss_q_cnt = qcount; 7378fbc7b27aSKiran Patil } else { 7379fbc7b27aSKiran Patil if (non_power_of_2_qcount && 7380fbc7b27aSKiran Patil qcount != non_power_of_2_qcount) { 7381fbc7b27aSKiran Patil dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 7382fbc7b27aSKiran Patil qcount, non_power_of_2_qcount); 7383fbc7b27aSKiran Patil return -EINVAL; 7384fbc7b27aSKiran Patil } 7385fbc7b27aSKiran Patil if (qcount < max_rss_q_cnt) { 7386fbc7b27aSKiran Patil dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 7387fbc7b27aSKiran Patil qcount, max_rss_q_cnt); 7388fbc7b27aSKiran Patil return -EINVAL; 7389fbc7b27aSKiran Patil } 7390fbc7b27aSKiran Patil max_rss_q_cnt = qcount; 7391fbc7b27aSKiran Patil non_power_of_2_qcount = qcount; 7392fbc7b27aSKiran Patil } 7393fbc7b27aSKiran Patil 7394fbc7b27aSKiran Patil /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 7395fbc7b27aSKiran Patil * converts the bandwidth rate limit into Bytes/s when 7396fbc7b27aSKiran Patil * passing it down to the driver. So convert input bandwidth 7397fbc7b27aSKiran Patil * from Bytes/s to Kbps 7398fbc7b27aSKiran Patil */ 7399fbc7b27aSKiran Patil max_rate = mqprio_qopt->max_rate[i]; 7400fbc7b27aSKiran Patil max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 7401fbc7b27aSKiran Patil sum_max_rate += max_rate; 7402fbc7b27aSKiran Patil 7403fbc7b27aSKiran Patil /* min_rate is minimum guaranteed rate and it can't be zero */ 7404fbc7b27aSKiran Patil min_rate = mqprio_qopt->min_rate[i]; 7405fbc7b27aSKiran Patil min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 7406fbc7b27aSKiran Patil sum_min_rate += min_rate; 7407fbc7b27aSKiran Patil 7408fbc7b27aSKiran Patil if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 7409fbc7b27aSKiran Patil dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 7410fbc7b27aSKiran Patil min_rate, ICE_MIN_BW_LIMIT); 7411fbc7b27aSKiran Patil return -EINVAL; 7412fbc7b27aSKiran Patil } 7413fbc7b27aSKiran Patil 7414fbc7b27aSKiran Patil iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 7415fbc7b27aSKiran Patil if (rem) { 7416fbc7b27aSKiran Patil dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 7417fbc7b27aSKiran Patil i, ICE_MIN_BW_LIMIT); 7418fbc7b27aSKiran Patil return -EINVAL; 7419fbc7b27aSKiran Patil } 7420fbc7b27aSKiran Patil 7421fbc7b27aSKiran Patil iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 7422fbc7b27aSKiran Patil if (rem) { 7423fbc7b27aSKiran Patil dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 7424fbc7b27aSKiran Patil i, ICE_MIN_BW_LIMIT); 7425fbc7b27aSKiran Patil return -EINVAL; 7426fbc7b27aSKiran Patil } 7427fbc7b27aSKiran Patil 7428fbc7b27aSKiran Patil /* min_rate can't be more than max_rate, except when max_rate 7429fbc7b27aSKiran Patil * is zero (implies max_rate sought is max line rate). In such 7430fbc7b27aSKiran Patil * a case min_rate can be more than max. 7431fbc7b27aSKiran Patil */ 7432fbc7b27aSKiran Patil if (max_rate && min_rate > max_rate) { 7433fbc7b27aSKiran Patil dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 7434fbc7b27aSKiran Patil min_rate, max_rate); 7435fbc7b27aSKiran Patil return -EINVAL; 7436fbc7b27aSKiran Patil } 7437fbc7b27aSKiran Patil 7438fbc7b27aSKiran Patil if (i >= mqprio_qopt->qopt.num_tc - 1) 7439fbc7b27aSKiran Patil break; 7440fbc7b27aSKiran Patil if (mqprio_qopt->qopt.offset[i + 1] != 7441fbc7b27aSKiran Patil (mqprio_qopt->qopt.offset[i] + qcount)) 7442fbc7b27aSKiran Patil return -EINVAL; 7443fbc7b27aSKiran Patil } 7444fbc7b27aSKiran Patil if (vsi->num_rxq < 7445fbc7b27aSKiran Patil (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7446fbc7b27aSKiran Patil return -EINVAL; 7447fbc7b27aSKiran Patil if (vsi->num_txq < 7448fbc7b27aSKiran Patil (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7449fbc7b27aSKiran Patil return -EINVAL; 7450fbc7b27aSKiran Patil 7451fbc7b27aSKiran Patil speed = ice_get_link_speed_kbps(vsi); 7452fbc7b27aSKiran Patil if (sum_max_rate && sum_max_rate > (u64)speed) { 7453fbc7b27aSKiran Patil dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n", 7454fbc7b27aSKiran Patil sum_max_rate, speed); 7455fbc7b27aSKiran Patil return -EINVAL; 7456fbc7b27aSKiran Patil } 7457fbc7b27aSKiran Patil if (sum_min_rate && sum_min_rate > (u64)speed) { 7458fbc7b27aSKiran Patil dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 7459fbc7b27aSKiran Patil sum_min_rate, speed); 7460fbc7b27aSKiran Patil return -EINVAL; 7461fbc7b27aSKiran Patil } 7462fbc7b27aSKiran Patil 7463fbc7b27aSKiran Patil /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 7464fbc7b27aSKiran Patil vsi->ch_rss_size = max_rss_q_cnt; 7465fbc7b27aSKiran Patil 7466fbc7b27aSKiran Patil return 0; 7467fbc7b27aSKiran Patil } 7468fbc7b27aSKiran Patil 7469fbc7b27aSKiran Patil /** 747040319796SKiran Patil * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 747140319796SKiran Patil * @pf: ptr to PF device 747240319796SKiran Patil * @vsi: ptr to VSI 747340319796SKiran Patil */ 747440319796SKiran Patil static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 747540319796SKiran Patil { 747640319796SKiran Patil struct device *dev = ice_pf_to_dev(pf); 747740319796SKiran Patil bool added = false; 747840319796SKiran Patil struct ice_hw *hw; 747940319796SKiran Patil int flow; 748040319796SKiran Patil 748140319796SKiran Patil if (!(vsi->num_gfltr || vsi->num_bfltr)) 748240319796SKiran Patil return -EINVAL; 748340319796SKiran Patil 748440319796SKiran Patil hw = &pf->hw; 748540319796SKiran Patil for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 748640319796SKiran Patil struct ice_fd_hw_prof *prof; 748740319796SKiran Patil int tun, status; 748840319796SKiran Patil u64 entry_h; 748940319796SKiran Patil 749040319796SKiran Patil if (!(hw->fdir_prof && hw->fdir_prof[flow] && 749140319796SKiran Patil hw->fdir_prof[flow]->cnt)) 749240319796SKiran Patil continue; 749340319796SKiran Patil 749440319796SKiran Patil for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 749540319796SKiran Patil enum ice_flow_priority prio; 749640319796SKiran Patil u64 prof_id; 749740319796SKiran Patil 749840319796SKiran Patil /* add this VSI to FDir profile for this flow */ 749940319796SKiran Patil prio = ICE_FLOW_PRIO_NORMAL; 750040319796SKiran Patil prof = hw->fdir_prof[flow]; 750140319796SKiran Patil prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 750240319796SKiran Patil status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, 750340319796SKiran Patil prof->vsi_h[0], vsi->idx, 750440319796SKiran Patil prio, prof->fdir_seg[tun], 750540319796SKiran Patil &entry_h); 750640319796SKiran Patil if (status) { 750740319796SKiran Patil dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 750840319796SKiran Patil vsi->idx, flow); 750940319796SKiran Patil continue; 751040319796SKiran Patil } 751140319796SKiran Patil 751240319796SKiran Patil prof->entry_h[prof->cnt][tun] = entry_h; 751340319796SKiran Patil } 751440319796SKiran Patil 751540319796SKiran Patil /* store VSI for filter replay and delete */ 751640319796SKiran Patil prof->vsi_h[prof->cnt] = vsi->idx; 751740319796SKiran Patil prof->cnt++; 751840319796SKiran Patil 751940319796SKiran Patil added = true; 752040319796SKiran Patil dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 752140319796SKiran Patil flow); 752240319796SKiran Patil } 752340319796SKiran Patil 752440319796SKiran Patil if (!added) 752540319796SKiran Patil dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 752640319796SKiran Patil 752740319796SKiran Patil return 0; 752840319796SKiran Patil } 752940319796SKiran Patil 753040319796SKiran Patil /** 7531fbc7b27aSKiran Patil * ice_add_channel - add a channel by adding VSI 7532fbc7b27aSKiran Patil * @pf: ptr to PF device 7533fbc7b27aSKiran Patil * @sw_id: underlying HW switching element ID 7534fbc7b27aSKiran Patil * @ch: ptr to channel structure 7535fbc7b27aSKiran Patil * 7536fbc7b27aSKiran Patil * Add a channel (VSI) using add_vsi and queue_map 7537fbc7b27aSKiran Patil */ 7538fbc7b27aSKiran Patil static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 7539fbc7b27aSKiran Patil { 7540fbc7b27aSKiran Patil struct device *dev = ice_pf_to_dev(pf); 7541fbc7b27aSKiran Patil struct ice_vsi *vsi; 7542fbc7b27aSKiran Patil 7543fbc7b27aSKiran Patil if (ch->type != ICE_VSI_CHNL) { 7544fbc7b27aSKiran Patil dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 7545fbc7b27aSKiran Patil return -EINVAL; 7546fbc7b27aSKiran Patil } 7547fbc7b27aSKiran Patil 7548fbc7b27aSKiran Patil vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 7549fbc7b27aSKiran Patil if (!vsi || vsi->type != ICE_VSI_CHNL) { 7550fbc7b27aSKiran Patil dev_err(dev, "create chnl VSI failure\n"); 7551fbc7b27aSKiran Patil return -EINVAL; 7552fbc7b27aSKiran Patil } 7553fbc7b27aSKiran Patil 755440319796SKiran Patil ice_add_vsi_to_fdir(pf, vsi); 755540319796SKiran Patil 7556fbc7b27aSKiran Patil ch->sw_id = sw_id; 7557fbc7b27aSKiran Patil ch->vsi_num = vsi->vsi_num; 7558fbc7b27aSKiran Patil ch->info.mapping_flags = vsi->info.mapping_flags; 7559fbc7b27aSKiran Patil ch->ch_vsi = vsi; 7560fbc7b27aSKiran Patil /* set the back pointer of channel for newly created VSI */ 7561fbc7b27aSKiran Patil vsi->ch = ch; 7562fbc7b27aSKiran Patil 7563fbc7b27aSKiran Patil memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 7564fbc7b27aSKiran Patil sizeof(vsi->info.q_mapping)); 7565fbc7b27aSKiran Patil memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 7566fbc7b27aSKiran Patil sizeof(vsi->info.tc_mapping)); 7567fbc7b27aSKiran Patil 7568fbc7b27aSKiran Patil return 0; 7569fbc7b27aSKiran Patil } 7570fbc7b27aSKiran Patil 7571fbc7b27aSKiran Patil /** 7572fbc7b27aSKiran Patil * ice_chnl_cfg_res 7573fbc7b27aSKiran Patil * @vsi: the VSI being setup 7574fbc7b27aSKiran Patil * @ch: ptr to channel structure 7575fbc7b27aSKiran Patil * 7576fbc7b27aSKiran Patil * Configure channel specific resources such as rings, vector. 7577fbc7b27aSKiran Patil */ 7578fbc7b27aSKiran Patil static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 7579fbc7b27aSKiran Patil { 7580fbc7b27aSKiran Patil int i; 7581fbc7b27aSKiran Patil 7582fbc7b27aSKiran Patil for (i = 0; i < ch->num_txq; i++) { 7583fbc7b27aSKiran Patil struct ice_q_vector *tx_q_vector, *rx_q_vector; 7584fbc7b27aSKiran Patil struct ice_ring_container *rc; 7585fbc7b27aSKiran Patil struct ice_tx_ring *tx_ring; 7586fbc7b27aSKiran Patil struct ice_rx_ring *rx_ring; 7587fbc7b27aSKiran Patil 7588fbc7b27aSKiran Patil tx_ring = vsi->tx_rings[ch->base_q + i]; 7589fbc7b27aSKiran Patil rx_ring = vsi->rx_rings[ch->base_q + i]; 7590fbc7b27aSKiran Patil if (!tx_ring || !rx_ring) 7591fbc7b27aSKiran Patil continue; 7592fbc7b27aSKiran Patil 7593fbc7b27aSKiran Patil /* setup ring being channel enabled */ 7594fbc7b27aSKiran Patil tx_ring->ch = ch; 7595fbc7b27aSKiran Patil rx_ring->ch = ch; 7596fbc7b27aSKiran Patil 7597fbc7b27aSKiran Patil /* following code block sets up vector specific attributes */ 7598fbc7b27aSKiran Patil tx_q_vector = tx_ring->q_vector; 7599fbc7b27aSKiran Patil rx_q_vector = rx_ring->q_vector; 7600fbc7b27aSKiran Patil if (!tx_q_vector && !rx_q_vector) 7601fbc7b27aSKiran Patil continue; 7602fbc7b27aSKiran Patil 7603fbc7b27aSKiran Patil if (tx_q_vector) { 7604fbc7b27aSKiran Patil tx_q_vector->ch = ch; 7605fbc7b27aSKiran Patil /* setup Tx and Rx ITR setting if DIM is off */ 7606fbc7b27aSKiran Patil rc = &tx_q_vector->tx; 7607fbc7b27aSKiran Patil if (!ITR_IS_DYNAMIC(rc)) 7608fbc7b27aSKiran Patil ice_write_itr(rc, rc->itr_setting); 7609fbc7b27aSKiran Patil } 7610fbc7b27aSKiran Patil if (rx_q_vector) { 7611fbc7b27aSKiran Patil rx_q_vector->ch = ch; 7612fbc7b27aSKiran Patil /* setup Tx and Rx ITR setting if DIM is off */ 7613fbc7b27aSKiran Patil rc = &rx_q_vector->rx; 7614fbc7b27aSKiran Patil if (!ITR_IS_DYNAMIC(rc)) 7615fbc7b27aSKiran Patil ice_write_itr(rc, rc->itr_setting); 7616fbc7b27aSKiran Patil } 7617fbc7b27aSKiran Patil } 7618fbc7b27aSKiran Patil 7619fbc7b27aSKiran Patil /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 7620fbc7b27aSKiran Patil * GLINT_ITR register would have written to perform in-context 7621fbc7b27aSKiran Patil * update, hence perform flush 7622fbc7b27aSKiran Patil */ 7623fbc7b27aSKiran Patil if (ch->num_txq || ch->num_rxq) 7624fbc7b27aSKiran Patil ice_flush(&vsi->back->hw); 7625fbc7b27aSKiran Patil } 7626fbc7b27aSKiran Patil 7627fbc7b27aSKiran Patil /** 7628fbc7b27aSKiran Patil * ice_cfg_chnl_all_res - configure channel resources 7629fbc7b27aSKiran Patil * @vsi: pte to main_vsi 7630fbc7b27aSKiran Patil * @ch: ptr to channel structure 7631fbc7b27aSKiran Patil * 7632fbc7b27aSKiran Patil * This function configures channel specific resources such as flow-director 7633fbc7b27aSKiran Patil * counter index, and other resources such as queues, vectors, ITR settings 7634fbc7b27aSKiran Patil */ 7635fbc7b27aSKiran Patil static void 7636fbc7b27aSKiran Patil ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 7637fbc7b27aSKiran Patil { 7638fbc7b27aSKiran Patil /* configure channel (aka ADQ) resources such as queues, vectors, 7639fbc7b27aSKiran Patil * ITR settings for channel specific vectors and anything else 7640fbc7b27aSKiran Patil */ 7641fbc7b27aSKiran Patil ice_chnl_cfg_res(vsi, ch); 7642fbc7b27aSKiran Patil } 7643fbc7b27aSKiran Patil 7644fbc7b27aSKiran Patil /** 7645fbc7b27aSKiran Patil * ice_setup_hw_channel - setup new channel 7646fbc7b27aSKiran Patil * @pf: ptr to PF device 7647fbc7b27aSKiran Patil * @vsi: the VSI being setup 7648fbc7b27aSKiran Patil * @ch: ptr to channel structure 7649fbc7b27aSKiran Patil * @sw_id: underlying HW switching element ID 7650fbc7b27aSKiran Patil * @type: type of channel to be created (VMDq2/VF) 7651fbc7b27aSKiran Patil * 7652fbc7b27aSKiran Patil * Setup new channel (VSI) based on specified type (VMDq2/VF) 7653fbc7b27aSKiran Patil * and configures Tx rings accordingly 7654fbc7b27aSKiran Patil */ 7655fbc7b27aSKiran Patil static int 7656fbc7b27aSKiran Patil ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 7657fbc7b27aSKiran Patil struct ice_channel *ch, u16 sw_id, u8 type) 7658fbc7b27aSKiran Patil { 7659fbc7b27aSKiran Patil struct device *dev = ice_pf_to_dev(pf); 7660fbc7b27aSKiran Patil int ret; 7661fbc7b27aSKiran Patil 7662fbc7b27aSKiran Patil ch->base_q = vsi->next_base_q; 7663fbc7b27aSKiran Patil ch->type = type; 7664fbc7b27aSKiran Patil 7665fbc7b27aSKiran Patil ret = ice_add_channel(pf, sw_id, ch); 7666fbc7b27aSKiran Patil if (ret) { 7667fbc7b27aSKiran Patil dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 7668fbc7b27aSKiran Patil return ret; 7669fbc7b27aSKiran Patil } 7670fbc7b27aSKiran Patil 7671fbc7b27aSKiran Patil /* configure/setup ADQ specific resources */ 7672fbc7b27aSKiran Patil ice_cfg_chnl_all_res(vsi, ch); 7673fbc7b27aSKiran Patil 7674fbc7b27aSKiran Patil /* make sure to update the next_base_q so that subsequent channel's 7675fbc7b27aSKiran Patil * (aka ADQ) VSI queue map is correct 7676fbc7b27aSKiran Patil */ 7677fbc7b27aSKiran Patil vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 7678fbc7b27aSKiran Patil dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 7679fbc7b27aSKiran Patil ch->num_rxq); 7680fbc7b27aSKiran Patil 7681fbc7b27aSKiran Patil return 0; 7682fbc7b27aSKiran Patil } 7683fbc7b27aSKiran Patil 7684fbc7b27aSKiran Patil /** 7685fbc7b27aSKiran Patil * ice_setup_channel - setup new channel using uplink element 7686fbc7b27aSKiran Patil * @pf: ptr to PF device 7687fbc7b27aSKiran Patil * @vsi: the VSI being setup 7688fbc7b27aSKiran Patil * @ch: ptr to channel structure 7689fbc7b27aSKiran Patil * 7690fbc7b27aSKiran Patil * Setup new channel (VSI) based on specified type (VMDq2/VF) 7691fbc7b27aSKiran Patil * and uplink switching element 7692fbc7b27aSKiran Patil */ 7693fbc7b27aSKiran Patil static bool 7694fbc7b27aSKiran Patil ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 7695fbc7b27aSKiran Patil struct ice_channel *ch) 7696fbc7b27aSKiran Patil { 7697fbc7b27aSKiran Patil struct device *dev = ice_pf_to_dev(pf); 7698fbc7b27aSKiran Patil u16 sw_id; 7699fbc7b27aSKiran Patil int ret; 7700fbc7b27aSKiran Patil 7701fbc7b27aSKiran Patil if (vsi->type != ICE_VSI_PF) { 7702fbc7b27aSKiran Patil dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 7703fbc7b27aSKiran Patil return false; 7704fbc7b27aSKiran Patil } 7705fbc7b27aSKiran Patil 7706fbc7b27aSKiran Patil sw_id = pf->first_sw->sw_id; 7707fbc7b27aSKiran Patil 7708fbc7b27aSKiran Patil /* create channel (VSI) */ 7709fbc7b27aSKiran Patil ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 7710fbc7b27aSKiran Patil if (ret) { 7711fbc7b27aSKiran Patil dev_err(dev, "failed to setup hw_channel\n"); 7712fbc7b27aSKiran Patil return false; 7713fbc7b27aSKiran Patil } 7714fbc7b27aSKiran Patil dev_dbg(dev, "successfully created channel()\n"); 7715fbc7b27aSKiran Patil 7716fbc7b27aSKiran Patil return ch->ch_vsi ? true : false; 7717fbc7b27aSKiran Patil } 7718fbc7b27aSKiran Patil 7719fbc7b27aSKiran Patil /** 7720fbc7b27aSKiran Patil * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 7721fbc7b27aSKiran Patil * @vsi: VSI to be configured 7722fbc7b27aSKiran Patil * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 7723fbc7b27aSKiran Patil * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 7724fbc7b27aSKiran Patil */ 7725fbc7b27aSKiran Patil static int 7726fbc7b27aSKiran Patil ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 7727fbc7b27aSKiran Patil { 7728fbc7b27aSKiran Patil int err; 7729fbc7b27aSKiran Patil 7730fbc7b27aSKiran Patil err = ice_set_min_bw_limit(vsi, min_tx_rate); 7731fbc7b27aSKiran Patil if (err) 7732fbc7b27aSKiran Patil return err; 7733fbc7b27aSKiran Patil 7734fbc7b27aSKiran Patil return ice_set_max_bw_limit(vsi, max_tx_rate); 7735fbc7b27aSKiran Patil } 7736fbc7b27aSKiran Patil 7737fbc7b27aSKiran Patil /** 7738fbc7b27aSKiran Patil * ice_create_q_channel - function to create channel 7739fbc7b27aSKiran Patil * @vsi: VSI to be configured 7740fbc7b27aSKiran Patil * @ch: ptr to channel (it contains channel specific params) 7741fbc7b27aSKiran Patil * 7742fbc7b27aSKiran Patil * This function creates channel (VSI) using num_queues specified by user, 7743fbc7b27aSKiran Patil * reconfigs RSS if needed. 7744fbc7b27aSKiran Patil */ 7745fbc7b27aSKiran Patil static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 7746fbc7b27aSKiran Patil { 7747fbc7b27aSKiran Patil struct ice_pf *pf = vsi->back; 7748fbc7b27aSKiran Patil struct device *dev; 7749fbc7b27aSKiran Patil 7750fbc7b27aSKiran Patil if (!ch) 7751fbc7b27aSKiran Patil return -EINVAL; 7752fbc7b27aSKiran Patil 7753fbc7b27aSKiran Patil dev = ice_pf_to_dev(pf); 7754fbc7b27aSKiran Patil if (!ch->num_txq || !ch->num_rxq) { 7755fbc7b27aSKiran Patil dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 7756fbc7b27aSKiran Patil return -EINVAL; 7757fbc7b27aSKiran Patil } 7758fbc7b27aSKiran Patil 7759fbc7b27aSKiran Patil if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 7760fbc7b27aSKiran Patil dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 7761fbc7b27aSKiran Patil vsi->cnt_q_avail, ch->num_txq); 7762fbc7b27aSKiran Patil return -EINVAL; 7763fbc7b27aSKiran Patil } 7764fbc7b27aSKiran Patil 7765fbc7b27aSKiran Patil if (!ice_setup_channel(pf, vsi, ch)) { 7766fbc7b27aSKiran Patil dev_info(dev, "Failed to setup channel\n"); 7767fbc7b27aSKiran Patil return -EINVAL; 7768fbc7b27aSKiran Patil } 7769fbc7b27aSKiran Patil /* configure BW rate limit */ 7770fbc7b27aSKiran Patil if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 7771fbc7b27aSKiran Patil int ret; 7772fbc7b27aSKiran Patil 7773fbc7b27aSKiran Patil ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 7774fbc7b27aSKiran Patil ch->min_tx_rate); 7775fbc7b27aSKiran Patil if (ret) 7776fbc7b27aSKiran Patil dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 7777fbc7b27aSKiran Patil ch->max_tx_rate, ch->ch_vsi->vsi_num); 7778fbc7b27aSKiran Patil else 7779fbc7b27aSKiran Patil dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 7780fbc7b27aSKiran Patil ch->max_tx_rate, ch->ch_vsi->vsi_num); 7781fbc7b27aSKiran Patil } 7782fbc7b27aSKiran Patil 7783fbc7b27aSKiran Patil vsi->cnt_q_avail -= ch->num_txq; 7784fbc7b27aSKiran Patil 7785fbc7b27aSKiran Patil return 0; 7786fbc7b27aSKiran Patil } 7787fbc7b27aSKiran Patil 7788fbc7b27aSKiran Patil /** 77899fea7498SKiran Patil * ice_rem_all_chnl_fltrs - removes all channel filters 77909fea7498SKiran Patil * @pf: ptr to PF, TC-flower based filter are tracked at PF level 77919fea7498SKiran Patil * 77929fea7498SKiran Patil * Remove all advanced switch filters only if they are channel specific 77939fea7498SKiran Patil * tc-flower based filter 77949fea7498SKiran Patil */ 77959fea7498SKiran Patil static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 77969fea7498SKiran Patil { 77979fea7498SKiran Patil struct ice_tc_flower_fltr *fltr; 77989fea7498SKiran Patil struct hlist_node *node; 77999fea7498SKiran Patil 78009fea7498SKiran Patil /* to remove all channel filters, iterate an ordered list of filters */ 78019fea7498SKiran Patil hlist_for_each_entry_safe(fltr, node, 78029fea7498SKiran Patil &pf->tc_flower_fltr_list, 78039fea7498SKiran Patil tc_flower_node) { 78049fea7498SKiran Patil struct ice_rule_query_data rule; 78059fea7498SKiran Patil int status; 78069fea7498SKiran Patil 78079fea7498SKiran Patil /* for now process only channel specific filters */ 78089fea7498SKiran Patil if (!ice_is_chnl_fltr(fltr)) 78099fea7498SKiran Patil continue; 78109fea7498SKiran Patil 78119fea7498SKiran Patil rule.rid = fltr->rid; 78129fea7498SKiran Patil rule.rule_id = fltr->rule_id; 78139fea7498SKiran Patil rule.vsi_handle = fltr->dest_id; 78149fea7498SKiran Patil status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 78159fea7498SKiran Patil if (status) { 78169fea7498SKiran Patil if (status == -ENOENT) 78179fea7498SKiran Patil dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 78189fea7498SKiran Patil rule.rule_id); 78199fea7498SKiran Patil else 78209fea7498SKiran Patil dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 78219fea7498SKiran Patil status); 78229fea7498SKiran Patil } else if (fltr->dest_vsi) { 78239fea7498SKiran Patil /* update advanced switch filter count */ 78249fea7498SKiran Patil if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 78259fea7498SKiran Patil u32 flags = fltr->flags; 78269fea7498SKiran Patil 78279fea7498SKiran Patil fltr->dest_vsi->num_chnl_fltr--; 78289fea7498SKiran Patil if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 78299fea7498SKiran Patil ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 78309fea7498SKiran Patil pf->num_dmac_chnl_fltrs--; 78319fea7498SKiran Patil } 78329fea7498SKiran Patil } 78339fea7498SKiran Patil 78349fea7498SKiran Patil hlist_del(&fltr->tc_flower_node); 78359fea7498SKiran Patil kfree(fltr); 78369fea7498SKiran Patil } 78379fea7498SKiran Patil } 78389fea7498SKiran Patil 78399fea7498SKiran Patil /** 7840fbc7b27aSKiran Patil * ice_remove_q_channels - Remove queue channels for the TCs 7841fbc7b27aSKiran Patil * @vsi: VSI to be configured 7842fbc7b27aSKiran Patil * @rem_fltr: delete advanced switch filter or not 7843fbc7b27aSKiran Patil * 7844fbc7b27aSKiran Patil * Remove queue channels for the TCs 7845fbc7b27aSKiran Patil */ 78469fea7498SKiran Patil static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 7847fbc7b27aSKiran Patil { 7848fbc7b27aSKiran Patil struct ice_channel *ch, *ch_tmp; 78499fea7498SKiran Patil struct ice_pf *pf = vsi->back; 7850fbc7b27aSKiran Patil int i; 7851fbc7b27aSKiran Patil 78529fea7498SKiran Patil /* remove all tc-flower based filter if they are channel filters only */ 78539fea7498SKiran Patil if (rem_fltr) 78549fea7498SKiran Patil ice_rem_all_chnl_fltrs(pf); 78559fea7498SKiran Patil 785640319796SKiran Patil /* remove ntuple filters since queue configuration is being changed */ 785740319796SKiran Patil if (vsi->netdev->features & NETIF_F_NTUPLE) { 785840319796SKiran Patil struct ice_hw *hw = &pf->hw; 785940319796SKiran Patil 786040319796SKiran Patil mutex_lock(&hw->fdir_fltr_lock); 786140319796SKiran Patil ice_fdir_del_all_fltrs(vsi); 786240319796SKiran Patil mutex_unlock(&hw->fdir_fltr_lock); 786340319796SKiran Patil } 786440319796SKiran Patil 7865fbc7b27aSKiran Patil /* perform cleanup for channels if they exist */ 7866fbc7b27aSKiran Patil list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 7867fbc7b27aSKiran Patil struct ice_vsi *ch_vsi; 7868fbc7b27aSKiran Patil 7869fbc7b27aSKiran Patil list_del(&ch->list); 7870fbc7b27aSKiran Patil ch_vsi = ch->ch_vsi; 7871fbc7b27aSKiran Patil if (!ch_vsi) { 7872fbc7b27aSKiran Patil kfree(ch); 7873fbc7b27aSKiran Patil continue; 7874fbc7b27aSKiran Patil } 7875fbc7b27aSKiran Patil 7876fbc7b27aSKiran Patil /* Reset queue contexts */ 7877fbc7b27aSKiran Patil for (i = 0; i < ch->num_rxq; i++) { 7878fbc7b27aSKiran Patil struct ice_tx_ring *tx_ring; 7879fbc7b27aSKiran Patil struct ice_rx_ring *rx_ring; 7880fbc7b27aSKiran Patil 7881fbc7b27aSKiran Patil tx_ring = vsi->tx_rings[ch->base_q + i]; 7882fbc7b27aSKiran Patil rx_ring = vsi->rx_rings[ch->base_q + i]; 7883fbc7b27aSKiran Patil if (tx_ring) { 7884fbc7b27aSKiran Patil tx_ring->ch = NULL; 7885fbc7b27aSKiran Patil if (tx_ring->q_vector) 7886fbc7b27aSKiran Patil tx_ring->q_vector->ch = NULL; 7887fbc7b27aSKiran Patil } 7888fbc7b27aSKiran Patil if (rx_ring) { 7889fbc7b27aSKiran Patil rx_ring->ch = NULL; 7890fbc7b27aSKiran Patil if (rx_ring->q_vector) 7891fbc7b27aSKiran Patil rx_ring->q_vector->ch = NULL; 7892fbc7b27aSKiran Patil } 7893fbc7b27aSKiran Patil } 7894fbc7b27aSKiran Patil 789540319796SKiran Patil /* Release FD resources for the channel VSI */ 789640319796SKiran Patil ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 789740319796SKiran Patil 7898fbc7b27aSKiran Patil /* clear the VSI from scheduler tree */ 7899fbc7b27aSKiran Patil ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 7900fbc7b27aSKiran Patil 7901fbc7b27aSKiran Patil /* Delete VSI from FW */ 7902fbc7b27aSKiran Patil ice_vsi_delete(ch->ch_vsi); 7903fbc7b27aSKiran Patil 7904fbc7b27aSKiran Patil /* Delete VSI from PF and HW VSI arrays */ 7905fbc7b27aSKiran Patil ice_vsi_clear(ch->ch_vsi); 7906fbc7b27aSKiran Patil 7907fbc7b27aSKiran Patil /* free the channel */ 7908fbc7b27aSKiran Patil kfree(ch); 7909fbc7b27aSKiran Patil } 7910fbc7b27aSKiran Patil 7911fbc7b27aSKiran Patil /* clear the channel VSI map which is stored in main VSI */ 7912fbc7b27aSKiran Patil ice_for_each_chnl_tc(i) 7913fbc7b27aSKiran Patil vsi->tc_map_vsi[i] = NULL; 7914fbc7b27aSKiran Patil 7915fbc7b27aSKiran Patil /* reset main VSI's all TC information */ 7916fbc7b27aSKiran Patil vsi->all_enatc = 0; 7917fbc7b27aSKiran Patil vsi->all_numtc = 0; 7918fbc7b27aSKiran Patil } 7919fbc7b27aSKiran Patil 7920fbc7b27aSKiran Patil /** 7921fbc7b27aSKiran Patil * ice_rebuild_channels - rebuild channel 7922fbc7b27aSKiran Patil * @pf: ptr to PF 7923fbc7b27aSKiran Patil * 7924fbc7b27aSKiran Patil * Recreate channel VSIs and replay filters 7925fbc7b27aSKiran Patil */ 7926fbc7b27aSKiran Patil static int ice_rebuild_channels(struct ice_pf *pf) 7927fbc7b27aSKiran Patil { 7928fbc7b27aSKiran Patil struct device *dev = ice_pf_to_dev(pf); 7929fbc7b27aSKiran Patil struct ice_vsi *main_vsi; 7930fbc7b27aSKiran Patil bool rem_adv_fltr = true; 7931fbc7b27aSKiran Patil struct ice_channel *ch; 7932fbc7b27aSKiran Patil struct ice_vsi *vsi; 7933fbc7b27aSKiran Patil int tc_idx = 1; 7934fbc7b27aSKiran Patil int i, err; 7935fbc7b27aSKiran Patil 7936fbc7b27aSKiran Patil main_vsi = ice_get_main_vsi(pf); 7937fbc7b27aSKiran Patil if (!main_vsi) 7938fbc7b27aSKiran Patil return 0; 7939fbc7b27aSKiran Patil 7940fbc7b27aSKiran Patil if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 7941fbc7b27aSKiran Patil main_vsi->old_numtc == 1) 7942fbc7b27aSKiran Patil return 0; /* nothing to be done */ 7943fbc7b27aSKiran Patil 7944fbc7b27aSKiran Patil /* reconfigure main VSI based on old value of TC and cached values 7945fbc7b27aSKiran Patil * for MQPRIO opts 7946fbc7b27aSKiran Patil */ 7947fbc7b27aSKiran Patil err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 7948fbc7b27aSKiran Patil if (err) { 7949fbc7b27aSKiran Patil dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 7950fbc7b27aSKiran Patil main_vsi->old_ena_tc, main_vsi->vsi_num); 7951fbc7b27aSKiran Patil return err; 7952fbc7b27aSKiran Patil } 7953fbc7b27aSKiran Patil 7954fbc7b27aSKiran Patil /* rebuild ADQ VSIs */ 7955fbc7b27aSKiran Patil ice_for_each_vsi(pf, i) { 7956fbc7b27aSKiran Patil enum ice_vsi_type type; 7957fbc7b27aSKiran Patil 7958fbc7b27aSKiran Patil vsi = pf->vsi[i]; 7959fbc7b27aSKiran Patil if (!vsi || vsi->type != ICE_VSI_CHNL) 7960fbc7b27aSKiran Patil continue; 7961fbc7b27aSKiran Patil 7962fbc7b27aSKiran Patil type = vsi->type; 7963fbc7b27aSKiran Patil 7964fbc7b27aSKiran Patil /* rebuild ADQ VSI */ 7965fbc7b27aSKiran Patil err = ice_vsi_rebuild(vsi, true); 7966fbc7b27aSKiran Patil if (err) { 7967fbc7b27aSKiran Patil dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 7968fbc7b27aSKiran Patil ice_vsi_type_str(type), vsi->idx, err); 7969fbc7b27aSKiran Patil goto cleanup; 7970fbc7b27aSKiran Patil } 7971fbc7b27aSKiran Patil 7972fbc7b27aSKiran Patil /* Re-map HW VSI number, using VSI handle that has been 7973fbc7b27aSKiran Patil * previously validated in ice_replay_vsi() call above 7974fbc7b27aSKiran Patil */ 7975fbc7b27aSKiran Patil vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 7976fbc7b27aSKiran Patil 7977fbc7b27aSKiran Patil /* replay filters for the VSI */ 7978fbc7b27aSKiran Patil err = ice_replay_vsi(&pf->hw, vsi->idx); 7979fbc7b27aSKiran Patil if (err) { 7980fbc7b27aSKiran Patil dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 7981fbc7b27aSKiran Patil ice_vsi_type_str(type), err, vsi->idx); 7982fbc7b27aSKiran Patil rem_adv_fltr = false; 7983fbc7b27aSKiran Patil goto cleanup; 7984fbc7b27aSKiran Patil } 7985fbc7b27aSKiran Patil dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 7986fbc7b27aSKiran Patil ice_vsi_type_str(type), vsi->idx); 7987fbc7b27aSKiran Patil 7988fbc7b27aSKiran Patil /* store ADQ VSI at correct TC index in main VSI's 7989fbc7b27aSKiran Patil * map of TC to VSI 7990fbc7b27aSKiran Patil */ 7991fbc7b27aSKiran Patil main_vsi->tc_map_vsi[tc_idx++] = vsi; 7992fbc7b27aSKiran Patil } 7993fbc7b27aSKiran Patil 7994fbc7b27aSKiran Patil /* ADQ VSI(s) has been rebuilt successfully, so setup 7995fbc7b27aSKiran Patil * channel for main VSI's Tx and Rx rings 7996fbc7b27aSKiran Patil */ 7997fbc7b27aSKiran Patil list_for_each_entry(ch, &main_vsi->ch_list, list) { 7998fbc7b27aSKiran Patil struct ice_vsi *ch_vsi; 7999fbc7b27aSKiran Patil 8000fbc7b27aSKiran Patil ch_vsi = ch->ch_vsi; 8001fbc7b27aSKiran Patil if (!ch_vsi) 8002fbc7b27aSKiran Patil continue; 8003fbc7b27aSKiran Patil 8004fbc7b27aSKiran Patil /* reconfig channel resources */ 8005fbc7b27aSKiran Patil ice_cfg_chnl_all_res(main_vsi, ch); 8006fbc7b27aSKiran Patil 8007fbc7b27aSKiran Patil /* replay BW rate limit if it is non-zero */ 8008fbc7b27aSKiran Patil if (!ch->max_tx_rate && !ch->min_tx_rate) 8009fbc7b27aSKiran Patil continue; 8010fbc7b27aSKiran Patil 8011fbc7b27aSKiran Patil err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 8012fbc7b27aSKiran Patil ch->min_tx_rate); 8013fbc7b27aSKiran Patil if (err) 8014fbc7b27aSKiran Patil dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8015fbc7b27aSKiran Patil err, ch->max_tx_rate, ch->min_tx_rate, 8016fbc7b27aSKiran Patil ch_vsi->vsi_num); 8017fbc7b27aSKiran Patil else 8018fbc7b27aSKiran Patil dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8019fbc7b27aSKiran Patil ch->max_tx_rate, ch->min_tx_rate, 8020fbc7b27aSKiran Patil ch_vsi->vsi_num); 8021fbc7b27aSKiran Patil } 8022fbc7b27aSKiran Patil 8023fbc7b27aSKiran Patil /* reconfig RSS for main VSI */ 8024fbc7b27aSKiran Patil if (main_vsi->ch_rss_size) 8025fbc7b27aSKiran Patil ice_vsi_cfg_rss_lut_key(main_vsi); 8026fbc7b27aSKiran Patil 8027fbc7b27aSKiran Patil return 0; 8028fbc7b27aSKiran Patil 8029fbc7b27aSKiran Patil cleanup: 8030fbc7b27aSKiran Patil ice_remove_q_channels(main_vsi, rem_adv_fltr); 8031fbc7b27aSKiran Patil return err; 8032fbc7b27aSKiran Patil } 8033fbc7b27aSKiran Patil 8034fbc7b27aSKiran Patil /** 8035fbc7b27aSKiran Patil * ice_create_q_channels - Add queue channel for the given TCs 8036fbc7b27aSKiran Patil * @vsi: VSI to be configured 8037fbc7b27aSKiran Patil * 8038fbc7b27aSKiran Patil * Configures queue channel mapping to the given TCs 8039fbc7b27aSKiran Patil */ 8040fbc7b27aSKiran Patil static int ice_create_q_channels(struct ice_vsi *vsi) 8041fbc7b27aSKiran Patil { 8042fbc7b27aSKiran Patil struct ice_pf *pf = vsi->back; 8043fbc7b27aSKiran Patil struct ice_channel *ch; 8044fbc7b27aSKiran Patil int ret = 0, i; 8045fbc7b27aSKiran Patil 8046fbc7b27aSKiran Patil ice_for_each_chnl_tc(i) { 8047fbc7b27aSKiran Patil if (!(vsi->all_enatc & BIT(i))) 8048fbc7b27aSKiran Patil continue; 8049fbc7b27aSKiran Patil 8050fbc7b27aSKiran Patil ch = kzalloc(sizeof(*ch), GFP_KERNEL); 8051fbc7b27aSKiran Patil if (!ch) { 8052fbc7b27aSKiran Patil ret = -ENOMEM; 8053fbc7b27aSKiran Patil goto err_free; 8054fbc7b27aSKiran Patil } 8055fbc7b27aSKiran Patil INIT_LIST_HEAD(&ch->list); 8056fbc7b27aSKiran Patil ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 8057fbc7b27aSKiran Patil ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 8058fbc7b27aSKiran Patil ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 8059fbc7b27aSKiran Patil ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 8060fbc7b27aSKiran Patil ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 8061fbc7b27aSKiran Patil 8062fbc7b27aSKiran Patil /* convert to Kbits/s */ 8063fbc7b27aSKiran Patil if (ch->max_tx_rate) 8064fbc7b27aSKiran Patil ch->max_tx_rate = div_u64(ch->max_tx_rate, 8065fbc7b27aSKiran Patil ICE_BW_KBPS_DIVISOR); 8066fbc7b27aSKiran Patil if (ch->min_tx_rate) 8067fbc7b27aSKiran Patil ch->min_tx_rate = div_u64(ch->min_tx_rate, 8068fbc7b27aSKiran Patil ICE_BW_KBPS_DIVISOR); 8069fbc7b27aSKiran Patil 8070fbc7b27aSKiran Patil ret = ice_create_q_channel(vsi, ch); 8071fbc7b27aSKiran Patil if (ret) { 8072fbc7b27aSKiran Patil dev_err(ice_pf_to_dev(pf), 8073fbc7b27aSKiran Patil "failed creating channel TC:%d\n", i); 8074fbc7b27aSKiran Patil kfree(ch); 8075fbc7b27aSKiran Patil goto err_free; 8076fbc7b27aSKiran Patil } 8077fbc7b27aSKiran Patil list_add_tail(&ch->list, &vsi->ch_list); 8078fbc7b27aSKiran Patil vsi->tc_map_vsi[i] = ch->ch_vsi; 8079fbc7b27aSKiran Patil dev_dbg(ice_pf_to_dev(pf), 8080fbc7b27aSKiran Patil "successfully created channel: VSI %pK\n", ch->ch_vsi); 8081fbc7b27aSKiran Patil } 8082fbc7b27aSKiran Patil return 0; 8083fbc7b27aSKiran Patil 8084fbc7b27aSKiran Patil err_free: 8085fbc7b27aSKiran Patil ice_remove_q_channels(vsi, false); 8086fbc7b27aSKiran Patil 8087fbc7b27aSKiran Patil return ret; 8088fbc7b27aSKiran Patil } 8089fbc7b27aSKiran Patil 8090fbc7b27aSKiran Patil /** 8091fbc7b27aSKiran Patil * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 8092fbc7b27aSKiran Patil * @netdev: net device to configure 8093fbc7b27aSKiran Patil * @type_data: TC offload data 8094fbc7b27aSKiran Patil */ 8095fbc7b27aSKiran Patil static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 8096fbc7b27aSKiran Patil { 8097fbc7b27aSKiran Patil struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 8098fbc7b27aSKiran Patil struct ice_netdev_priv *np = netdev_priv(netdev); 8099fbc7b27aSKiran Patil struct ice_vsi *vsi = np->vsi; 8100fbc7b27aSKiran Patil struct ice_pf *pf = vsi->back; 8101fbc7b27aSKiran Patil u16 mode, ena_tc_qdisc = 0; 8102fbc7b27aSKiran Patil int cur_txq, cur_rxq; 8103fbc7b27aSKiran Patil u8 hw = 0, num_tcf; 8104fbc7b27aSKiran Patil struct device *dev; 8105fbc7b27aSKiran Patil int ret, i; 8106fbc7b27aSKiran Patil 8107fbc7b27aSKiran Patil dev = ice_pf_to_dev(pf); 8108fbc7b27aSKiran Patil num_tcf = mqprio_qopt->qopt.num_tc; 8109fbc7b27aSKiran Patil hw = mqprio_qopt->qopt.hw; 8110fbc7b27aSKiran Patil mode = mqprio_qopt->mode; 8111fbc7b27aSKiran Patil if (!hw) { 8112fbc7b27aSKiran Patil clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8113fbc7b27aSKiran Patil vsi->ch_rss_size = 0; 8114fbc7b27aSKiran Patil memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8115fbc7b27aSKiran Patil goto config_tcf; 8116fbc7b27aSKiran Patil } 8117fbc7b27aSKiran Patil 8118fbc7b27aSKiran Patil /* Generate queue region map for number of TCF requested */ 8119fbc7b27aSKiran Patil for (i = 0; i < num_tcf; i++) 8120fbc7b27aSKiran Patil ena_tc_qdisc |= BIT(i); 8121fbc7b27aSKiran Patil 8122fbc7b27aSKiran Patil switch (mode) { 8123fbc7b27aSKiran Patil case TC_MQPRIO_MODE_CHANNEL: 8124fbc7b27aSKiran Patil 8125fbc7b27aSKiran Patil ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 8126fbc7b27aSKiran Patil if (ret) { 8127fbc7b27aSKiran Patil netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 8128fbc7b27aSKiran Patil ret); 8129fbc7b27aSKiran Patil return ret; 8130fbc7b27aSKiran Patil } 8131fbc7b27aSKiran Patil memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8132fbc7b27aSKiran Patil set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 81339fea7498SKiran Patil /* don't assume state of hw_tc_offload during driver load 81349fea7498SKiran Patil * and set the flag for TC flower filter if hw_tc_offload 81359fea7498SKiran Patil * already ON 81369fea7498SKiran Patil */ 81379fea7498SKiran Patil if (vsi->netdev->features & NETIF_F_HW_TC) 81389fea7498SKiran Patil set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 8139fbc7b27aSKiran Patil break; 8140fbc7b27aSKiran Patil default: 8141fbc7b27aSKiran Patil return -EINVAL; 8142fbc7b27aSKiran Patil } 8143fbc7b27aSKiran Patil 8144fbc7b27aSKiran Patil config_tcf: 8145fbc7b27aSKiran Patil 8146fbc7b27aSKiran Patil /* Requesting same TCF configuration as already enabled */ 8147fbc7b27aSKiran Patil if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 8148fbc7b27aSKiran Patil mode != TC_MQPRIO_MODE_CHANNEL) 8149fbc7b27aSKiran Patil return 0; 8150fbc7b27aSKiran Patil 8151fbc7b27aSKiran Patil /* Pause VSI queues */ 8152fbc7b27aSKiran Patil ice_dis_vsi(vsi, true); 8153fbc7b27aSKiran Patil 8154fbc7b27aSKiran Patil if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 8155fbc7b27aSKiran Patil ice_remove_q_channels(vsi, true); 8156fbc7b27aSKiran Patil 8157fbc7b27aSKiran Patil if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8158fbc7b27aSKiran Patil vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 8159fbc7b27aSKiran Patil num_online_cpus()); 8160fbc7b27aSKiran Patil vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 8161fbc7b27aSKiran Patil num_online_cpus()); 8162fbc7b27aSKiran Patil } else { 8163fbc7b27aSKiran Patil /* logic to rebuild VSI, same like ethtool -L */ 8164fbc7b27aSKiran Patil u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 8165fbc7b27aSKiran Patil 8166fbc7b27aSKiran Patil for (i = 0; i < num_tcf; i++) { 8167fbc7b27aSKiran Patil if (!(ena_tc_qdisc & BIT(i))) 8168fbc7b27aSKiran Patil continue; 8169fbc7b27aSKiran Patil 8170fbc7b27aSKiran Patil offset = vsi->mqprio_qopt.qopt.offset[i]; 8171fbc7b27aSKiran Patil qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 8172fbc7b27aSKiran Patil qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 8173fbc7b27aSKiran Patil } 8174fbc7b27aSKiran Patil vsi->req_txq = offset + qcount_tx; 8175fbc7b27aSKiran Patil vsi->req_rxq = offset + qcount_rx; 8176fbc7b27aSKiran Patil 8177fbc7b27aSKiran Patil /* store away original rss_size info, so that it gets reused 8178fbc7b27aSKiran Patil * form ice_vsi_rebuild during tc-qdisc delete stage - to 8179fbc7b27aSKiran Patil * determine, what should be the rss_sizefor main VSI 8180fbc7b27aSKiran Patil */ 8181fbc7b27aSKiran Patil vsi->orig_rss_size = vsi->rss_size; 8182fbc7b27aSKiran Patil } 8183fbc7b27aSKiran Patil 8184fbc7b27aSKiran Patil /* save current values of Tx and Rx queues before calling VSI rebuild 8185fbc7b27aSKiran Patil * for fallback option 8186fbc7b27aSKiran Patil */ 8187fbc7b27aSKiran Patil cur_txq = vsi->num_txq; 8188fbc7b27aSKiran Patil cur_rxq = vsi->num_rxq; 8189fbc7b27aSKiran Patil 8190fbc7b27aSKiran Patil /* proceed with rebuild main VSI using correct number of queues */ 8191fbc7b27aSKiran Patil ret = ice_vsi_rebuild(vsi, false); 8192fbc7b27aSKiran Patil if (ret) { 8193fbc7b27aSKiran Patil /* fallback to current number of queues */ 8194fbc7b27aSKiran Patil dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 8195fbc7b27aSKiran Patil vsi->req_txq = cur_txq; 8196fbc7b27aSKiran Patil vsi->req_rxq = cur_rxq; 8197fbc7b27aSKiran Patil clear_bit(ICE_RESET_FAILED, pf->state); 8198fbc7b27aSKiran Patil if (ice_vsi_rebuild(vsi, false)) { 8199fbc7b27aSKiran Patil dev_err(dev, "Rebuild of main VSI failed again\n"); 8200fbc7b27aSKiran Patil return ret; 8201fbc7b27aSKiran Patil } 8202fbc7b27aSKiran Patil } 8203fbc7b27aSKiran Patil 8204fbc7b27aSKiran Patil vsi->all_numtc = num_tcf; 8205fbc7b27aSKiran Patil vsi->all_enatc = ena_tc_qdisc; 8206fbc7b27aSKiran Patil ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 8207fbc7b27aSKiran Patil if (ret) { 8208fbc7b27aSKiran Patil netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 8209fbc7b27aSKiran Patil vsi->vsi_num); 8210fbc7b27aSKiran Patil goto exit; 8211fbc7b27aSKiran Patil } 8212fbc7b27aSKiran Patil 8213fbc7b27aSKiran Patil if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8214fbc7b27aSKiran Patil u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 8215fbc7b27aSKiran Patil u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 8216fbc7b27aSKiran Patil 8217fbc7b27aSKiran Patil /* set TC0 rate limit if specified */ 8218fbc7b27aSKiran Patil if (max_tx_rate || min_tx_rate) { 8219fbc7b27aSKiran Patil /* convert to Kbits/s */ 8220fbc7b27aSKiran Patil if (max_tx_rate) 8221fbc7b27aSKiran Patil max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 8222fbc7b27aSKiran Patil if (min_tx_rate) 8223fbc7b27aSKiran Patil min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 8224fbc7b27aSKiran Patil 8225fbc7b27aSKiran Patil ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 8226fbc7b27aSKiran Patil if (!ret) { 8227fbc7b27aSKiran Patil dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 8228fbc7b27aSKiran Patil max_tx_rate, min_tx_rate, vsi->vsi_num); 8229fbc7b27aSKiran Patil } else { 8230fbc7b27aSKiran Patil dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 8231fbc7b27aSKiran Patil max_tx_rate, min_tx_rate, vsi->vsi_num); 8232fbc7b27aSKiran Patil goto exit; 8233fbc7b27aSKiran Patil } 8234fbc7b27aSKiran Patil } 8235fbc7b27aSKiran Patil ret = ice_create_q_channels(vsi); 8236fbc7b27aSKiran Patil if (ret) { 8237fbc7b27aSKiran Patil netdev_err(netdev, "failed configuring queue channels\n"); 8238fbc7b27aSKiran Patil goto exit; 8239fbc7b27aSKiran Patil } else { 8240fbc7b27aSKiran Patil netdev_dbg(netdev, "successfully configured channels\n"); 8241fbc7b27aSKiran Patil } 8242fbc7b27aSKiran Patil } 8243fbc7b27aSKiran Patil 8244fbc7b27aSKiran Patil if (vsi->ch_rss_size) 8245fbc7b27aSKiran Patil ice_vsi_cfg_rss_lut_key(vsi); 8246fbc7b27aSKiran Patil 8247fbc7b27aSKiran Patil exit: 8248fbc7b27aSKiran Patil /* if error, reset the all_numtc and all_enatc */ 8249fbc7b27aSKiran Patil if (ret) { 8250fbc7b27aSKiran Patil vsi->all_numtc = 0; 8251fbc7b27aSKiran Patil vsi->all_enatc = 0; 8252fbc7b27aSKiran Patil } 8253fbc7b27aSKiran Patil /* resume VSI */ 8254fbc7b27aSKiran Patil ice_ena_vsi(vsi, true); 8255fbc7b27aSKiran Patil 8256fbc7b27aSKiran Patil return ret; 8257fbc7b27aSKiran Patil } 8258fbc7b27aSKiran Patil 82590d08a441SKiran Patil static LIST_HEAD(ice_block_cb_list); 82600d08a441SKiran Patil 82610d08a441SKiran Patil static int 82620d08a441SKiran Patil ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 82630d08a441SKiran Patil void *type_data) 82640d08a441SKiran Patil { 82650d08a441SKiran Patil struct ice_netdev_priv *np = netdev_priv(netdev); 8266fbc7b27aSKiran Patil struct ice_pf *pf = np->vsi->back; 8267fbc7b27aSKiran Patil int err; 82680d08a441SKiran Patil 82690d08a441SKiran Patil switch (type) { 82700d08a441SKiran Patil case TC_SETUP_BLOCK: 82710d08a441SKiran Patil return flow_block_cb_setup_simple(type_data, 82720d08a441SKiran Patil &ice_block_cb_list, 82730d08a441SKiran Patil ice_setup_tc_block_cb, 82740d08a441SKiran Patil np, np, true); 8275fbc7b27aSKiran Patil case TC_SETUP_QDISC_MQPRIO: 8276fbc7b27aSKiran Patil /* setup traffic classifier for receive side */ 8277fbc7b27aSKiran Patil mutex_lock(&pf->tc_mutex); 8278fbc7b27aSKiran Patil err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 8279fbc7b27aSKiran Patil mutex_unlock(&pf->tc_mutex); 8280fbc7b27aSKiran Patil return err; 82810d08a441SKiran Patil default: 82820d08a441SKiran Patil return -EOPNOTSUPP; 82830d08a441SKiran Patil } 82840d08a441SKiran Patil return -EOPNOTSUPP; 82850d08a441SKiran Patil } 82860d08a441SKiran Patil 8287195bb48fSMichal Swiatkowski static struct ice_indr_block_priv * 8288195bb48fSMichal Swiatkowski ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 8289195bb48fSMichal Swiatkowski struct net_device *netdev) 8290195bb48fSMichal Swiatkowski { 8291195bb48fSMichal Swiatkowski struct ice_indr_block_priv *cb_priv; 8292195bb48fSMichal Swiatkowski 8293195bb48fSMichal Swiatkowski list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 8294195bb48fSMichal Swiatkowski if (!cb_priv->netdev) 8295195bb48fSMichal Swiatkowski return NULL; 8296195bb48fSMichal Swiatkowski if (cb_priv->netdev == netdev) 8297195bb48fSMichal Swiatkowski return cb_priv; 8298195bb48fSMichal Swiatkowski } 8299195bb48fSMichal Swiatkowski return NULL; 8300195bb48fSMichal Swiatkowski } 8301195bb48fSMichal Swiatkowski 8302195bb48fSMichal Swiatkowski static int 8303195bb48fSMichal Swiatkowski ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 8304195bb48fSMichal Swiatkowski void *indr_priv) 8305195bb48fSMichal Swiatkowski { 8306195bb48fSMichal Swiatkowski struct ice_indr_block_priv *priv = indr_priv; 8307195bb48fSMichal Swiatkowski struct ice_netdev_priv *np = priv->np; 8308195bb48fSMichal Swiatkowski 8309195bb48fSMichal Swiatkowski switch (type) { 8310195bb48fSMichal Swiatkowski case TC_SETUP_CLSFLOWER: 8311195bb48fSMichal Swiatkowski return ice_setup_tc_cls_flower(np, priv->netdev, 8312195bb48fSMichal Swiatkowski (struct flow_cls_offload *) 8313195bb48fSMichal Swiatkowski type_data); 8314195bb48fSMichal Swiatkowski default: 8315195bb48fSMichal Swiatkowski return -EOPNOTSUPP; 8316195bb48fSMichal Swiatkowski } 8317195bb48fSMichal Swiatkowski } 8318195bb48fSMichal Swiatkowski 8319195bb48fSMichal Swiatkowski static int 8320195bb48fSMichal Swiatkowski ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 8321195bb48fSMichal Swiatkowski struct ice_netdev_priv *np, 8322195bb48fSMichal Swiatkowski struct flow_block_offload *f, void *data, 8323195bb48fSMichal Swiatkowski void (*cleanup)(struct flow_block_cb *block_cb)) 8324195bb48fSMichal Swiatkowski { 8325195bb48fSMichal Swiatkowski struct ice_indr_block_priv *indr_priv; 8326195bb48fSMichal Swiatkowski struct flow_block_cb *block_cb; 8327195bb48fSMichal Swiatkowski 83289e300987SMichal Swiatkowski if (!ice_is_tunnel_supported(netdev) && 83299e300987SMichal Swiatkowski !(is_vlan_dev(netdev) && 83309e300987SMichal Swiatkowski vlan_dev_real_dev(netdev) == np->vsi->netdev)) 83319e300987SMichal Swiatkowski return -EOPNOTSUPP; 83329e300987SMichal Swiatkowski 8333195bb48fSMichal Swiatkowski if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 8334195bb48fSMichal Swiatkowski return -EOPNOTSUPP; 8335195bb48fSMichal Swiatkowski 8336195bb48fSMichal Swiatkowski switch (f->command) { 8337195bb48fSMichal Swiatkowski case FLOW_BLOCK_BIND: 8338195bb48fSMichal Swiatkowski indr_priv = ice_indr_block_priv_lookup(np, netdev); 8339195bb48fSMichal Swiatkowski if (indr_priv) 8340195bb48fSMichal Swiatkowski return -EEXIST; 8341195bb48fSMichal Swiatkowski 8342195bb48fSMichal Swiatkowski indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 8343195bb48fSMichal Swiatkowski if (!indr_priv) 8344195bb48fSMichal Swiatkowski return -ENOMEM; 8345195bb48fSMichal Swiatkowski 8346195bb48fSMichal Swiatkowski indr_priv->netdev = netdev; 8347195bb48fSMichal Swiatkowski indr_priv->np = np; 8348195bb48fSMichal Swiatkowski list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 8349195bb48fSMichal Swiatkowski 8350195bb48fSMichal Swiatkowski block_cb = 8351195bb48fSMichal Swiatkowski flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 8352195bb48fSMichal Swiatkowski indr_priv, indr_priv, 8353195bb48fSMichal Swiatkowski ice_rep_indr_tc_block_unbind, 8354195bb48fSMichal Swiatkowski f, netdev, sch, data, np, 8355195bb48fSMichal Swiatkowski cleanup); 8356195bb48fSMichal Swiatkowski 8357195bb48fSMichal Swiatkowski if (IS_ERR(block_cb)) { 8358195bb48fSMichal Swiatkowski list_del(&indr_priv->list); 8359195bb48fSMichal Swiatkowski kfree(indr_priv); 8360195bb48fSMichal Swiatkowski return PTR_ERR(block_cb); 8361195bb48fSMichal Swiatkowski } 8362195bb48fSMichal Swiatkowski flow_block_cb_add(block_cb, f); 8363195bb48fSMichal Swiatkowski list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 8364195bb48fSMichal Swiatkowski break; 8365195bb48fSMichal Swiatkowski case FLOW_BLOCK_UNBIND: 8366195bb48fSMichal Swiatkowski indr_priv = ice_indr_block_priv_lookup(np, netdev); 8367195bb48fSMichal Swiatkowski if (!indr_priv) 8368195bb48fSMichal Swiatkowski return -ENOENT; 8369195bb48fSMichal Swiatkowski 8370195bb48fSMichal Swiatkowski block_cb = flow_block_cb_lookup(f->block, 8371195bb48fSMichal Swiatkowski ice_indr_setup_block_cb, 8372195bb48fSMichal Swiatkowski indr_priv); 8373195bb48fSMichal Swiatkowski if (!block_cb) 8374195bb48fSMichal Swiatkowski return -ENOENT; 8375195bb48fSMichal Swiatkowski 8376195bb48fSMichal Swiatkowski flow_indr_block_cb_remove(block_cb, f); 8377195bb48fSMichal Swiatkowski 8378195bb48fSMichal Swiatkowski list_del(&block_cb->driver_list); 8379195bb48fSMichal Swiatkowski break; 8380195bb48fSMichal Swiatkowski default: 8381195bb48fSMichal Swiatkowski return -EOPNOTSUPP; 8382195bb48fSMichal Swiatkowski } 8383195bb48fSMichal Swiatkowski return 0; 8384195bb48fSMichal Swiatkowski } 8385195bb48fSMichal Swiatkowski 8386195bb48fSMichal Swiatkowski static int 8387195bb48fSMichal Swiatkowski ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 8388195bb48fSMichal Swiatkowski void *cb_priv, enum tc_setup_type type, void *type_data, 8389195bb48fSMichal Swiatkowski void *data, 8390195bb48fSMichal Swiatkowski void (*cleanup)(struct flow_block_cb *block_cb)) 8391195bb48fSMichal Swiatkowski { 8392195bb48fSMichal Swiatkowski switch (type) { 8393195bb48fSMichal Swiatkowski case TC_SETUP_BLOCK: 8394195bb48fSMichal Swiatkowski return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 8395195bb48fSMichal Swiatkowski data, cleanup); 8396195bb48fSMichal Swiatkowski 8397195bb48fSMichal Swiatkowski default: 8398195bb48fSMichal Swiatkowski return -EOPNOTSUPP; 8399195bb48fSMichal Swiatkowski } 8400195bb48fSMichal Swiatkowski } 8401195bb48fSMichal Swiatkowski 84020d08a441SKiran Patil /** 8403cdedef59SAnirudh Venkataramanan * ice_open - Called when a network interface becomes active 8404cdedef59SAnirudh Venkataramanan * @netdev: network interface device structure 8405cdedef59SAnirudh Venkataramanan * 8406cdedef59SAnirudh Venkataramanan * The open entry point is called when a network interface is made 8407cdedef59SAnirudh Venkataramanan * active by the system (IFF_UP). At this point all resources needed 8408cdedef59SAnirudh Venkataramanan * for transmit and receive operations are allocated, the interrupt 8409cdedef59SAnirudh Venkataramanan * handler is registered with the OS, the netdev watchdog is enabled, 8410cdedef59SAnirudh Venkataramanan * and the stack is notified that the interface is ready. 8411cdedef59SAnirudh Venkataramanan * 8412cdedef59SAnirudh Venkataramanan * Returns 0 on success, negative value on failure 8413cdedef59SAnirudh Venkataramanan */ 84140e674aebSAnirudh Venkataramanan int ice_open(struct net_device *netdev) 8415cdedef59SAnirudh Venkataramanan { 8416cdedef59SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 8417e95fc857SKrzysztof Goreczny struct ice_pf *pf = np->vsi->back; 8418e95fc857SKrzysztof Goreczny 8419e95fc857SKrzysztof Goreczny if (ice_is_reset_in_progress(pf->state)) { 8420e95fc857SKrzysztof Goreczny netdev_err(netdev, "can't open net device while reset is in progress"); 8421e95fc857SKrzysztof Goreczny return -EBUSY; 8422e95fc857SKrzysztof Goreczny } 8423e95fc857SKrzysztof Goreczny 8424e95fc857SKrzysztof Goreczny return ice_open_internal(netdev); 8425e95fc857SKrzysztof Goreczny } 8426e95fc857SKrzysztof Goreczny 8427e95fc857SKrzysztof Goreczny /** 8428e95fc857SKrzysztof Goreczny * ice_open_internal - Called when a network interface becomes active 8429e95fc857SKrzysztof Goreczny * @netdev: network interface device structure 8430e95fc857SKrzysztof Goreczny * 8431e95fc857SKrzysztof Goreczny * Internal ice_open implementation. Should not be used directly except for ice_open and reset 8432e95fc857SKrzysztof Goreczny * handling routine 8433e95fc857SKrzysztof Goreczny * 8434e95fc857SKrzysztof Goreczny * Returns 0 on success, negative value on failure 8435e95fc857SKrzysztof Goreczny */ 8436e95fc857SKrzysztof Goreczny int ice_open_internal(struct net_device *netdev) 8437e95fc857SKrzysztof Goreczny { 8438e95fc857SKrzysztof Goreczny struct ice_netdev_priv *np = netdev_priv(netdev); 8439cdedef59SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 8440de75135bSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 84416d599946STony Nguyen struct ice_port_info *pi; 8442cdedef59SAnirudh Venkataramanan int err; 8443cdedef59SAnirudh Venkataramanan 84447e408e07SAnirudh Venkataramanan if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 84450f9d5027SAnirudh Venkataramanan netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 84460f9d5027SAnirudh Venkataramanan return -EIO; 84470f9d5027SAnirudh Venkataramanan } 84480f9d5027SAnirudh Venkataramanan 8449cdedef59SAnirudh Venkataramanan netif_carrier_off(netdev); 8450cdedef59SAnirudh Venkataramanan 84516d599946STony Nguyen pi = vsi->port_info; 84522ccc1c1cSTony Nguyen err = ice_update_link_info(pi); 84532ccc1c1cSTony Nguyen if (err) { 84542ccc1c1cSTony Nguyen netdev_err(netdev, "Failed to get link info, error %d\n", err); 8455c1484691STony Nguyen return err; 84566d599946STony Nguyen } 84576d599946STony Nguyen 845899d40752SBrett Creeley ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 8459c77849f5SAnirudh Venkataramanan 84606d599946STony Nguyen /* Set PHY if there is media, otherwise, turn off PHY */ 84616d599946STony Nguyen if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 84621a3571b5SPaul Greenwalt clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 84637e408e07SAnirudh Venkataramanan if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 84641a3571b5SPaul Greenwalt err = ice_init_phy_user_cfg(pi); 84651a3571b5SPaul Greenwalt if (err) { 84661a3571b5SPaul Greenwalt netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 84671a3571b5SPaul Greenwalt err); 84681a3571b5SPaul Greenwalt return err; 84691a3571b5SPaul Greenwalt } 84701a3571b5SPaul Greenwalt } 84711a3571b5SPaul Greenwalt 84721a3571b5SPaul Greenwalt err = ice_configure_phy(vsi); 8473b6f934f0SBrett Creeley if (err) { 847419cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Failed to set physical link up, error %d\n", 84756d599946STony Nguyen err); 8476b6f934f0SBrett Creeley return err; 8477b6f934f0SBrett Creeley } 84786d599946STony Nguyen } else { 84791a3571b5SPaul Greenwalt set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8480d348d517SAnirudh Venkataramanan ice_set_link(vsi, false); 84816d599946STony Nguyen } 8482cdedef59SAnirudh Venkataramanan 8483b6f934f0SBrett Creeley err = ice_vsi_open(vsi); 8484cdedef59SAnirudh Venkataramanan if (err) 8485cdedef59SAnirudh Venkataramanan netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 8486cdedef59SAnirudh Venkataramanan vsi->vsi_num, vsi->vsw->sw_id); 8487a4e82a81STony Nguyen 8488a4e82a81STony Nguyen /* Update existing tunnels information */ 8489a4e82a81STony Nguyen udp_tunnel_get_rx_info(netdev); 8490a4e82a81STony Nguyen 8491cdedef59SAnirudh Venkataramanan return err; 8492cdedef59SAnirudh Venkataramanan } 8493cdedef59SAnirudh Venkataramanan 8494cdedef59SAnirudh Venkataramanan /** 8495cdedef59SAnirudh Venkataramanan * ice_stop - Disables a network interface 8496cdedef59SAnirudh Venkataramanan * @netdev: network interface device structure 8497cdedef59SAnirudh Venkataramanan * 8498cdedef59SAnirudh Venkataramanan * The stop entry point is called when an interface is de-activated by the OS, 8499cdedef59SAnirudh Venkataramanan * and the netdevice enters the DOWN state. The hardware is still under the 8500cdedef59SAnirudh Venkataramanan * driver's control, but the netdev interface is disabled. 8501cdedef59SAnirudh Venkataramanan * 8502cdedef59SAnirudh Venkataramanan * Returns success only - not allowed to fail 8503cdedef59SAnirudh Venkataramanan */ 85040e674aebSAnirudh Venkataramanan int ice_stop(struct net_device *netdev) 8505cdedef59SAnirudh Venkataramanan { 8506cdedef59SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 8507cdedef59SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 8508e95fc857SKrzysztof Goreczny struct ice_pf *pf = vsi->back; 8509e95fc857SKrzysztof Goreczny 8510e95fc857SKrzysztof Goreczny if (ice_is_reset_in_progress(pf->state)) { 8511e95fc857SKrzysztof Goreczny netdev_err(netdev, "can't stop net device while reset is in progress"); 8512e95fc857SKrzysztof Goreczny return -EBUSY; 8513e95fc857SKrzysztof Goreczny } 8514cdedef59SAnirudh Venkataramanan 8515cdedef59SAnirudh Venkataramanan ice_vsi_close(vsi); 8516cdedef59SAnirudh Venkataramanan 8517cdedef59SAnirudh Venkataramanan return 0; 8518cdedef59SAnirudh Venkataramanan } 8519cdedef59SAnirudh Venkataramanan 8520e94d4478SAnirudh Venkataramanan /** 8521e94d4478SAnirudh Venkataramanan * ice_features_check - Validate encapsulated packet conforms to limits 8522e94d4478SAnirudh Venkataramanan * @skb: skb buffer 8523e94d4478SAnirudh Venkataramanan * @netdev: This port's netdev 8524e94d4478SAnirudh Venkataramanan * @features: Offload features that the stack believes apply 8525e94d4478SAnirudh Venkataramanan */ 8526e94d4478SAnirudh Venkataramanan static netdev_features_t 8527e94d4478SAnirudh Venkataramanan ice_features_check(struct sk_buff *skb, 8528e94d4478SAnirudh Venkataramanan struct net_device __always_unused *netdev, 8529e94d4478SAnirudh Venkataramanan netdev_features_t features) 8530e94d4478SAnirudh Venkataramanan { 853146b699c5SJesse Brandeburg bool gso = skb_is_gso(skb); 8532e94d4478SAnirudh Venkataramanan size_t len; 8533e94d4478SAnirudh Venkataramanan 8534e94d4478SAnirudh Venkataramanan /* No point in doing any of this if neither checksum nor GSO are 8535e94d4478SAnirudh Venkataramanan * being requested for this frame. We can rule out both by just 8536e94d4478SAnirudh Venkataramanan * checking for CHECKSUM_PARTIAL 8537e94d4478SAnirudh Venkataramanan */ 8538e94d4478SAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 8539e94d4478SAnirudh Venkataramanan return features; 8540e94d4478SAnirudh Venkataramanan 8541e94d4478SAnirudh Venkataramanan /* We cannot support GSO if the MSS is going to be less than 8542e94d4478SAnirudh Venkataramanan * 64 bytes. If it is then we need to drop support for GSO. 8543e94d4478SAnirudh Venkataramanan */ 854446b699c5SJesse Brandeburg if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 8545e94d4478SAnirudh Venkataramanan features &= ~NETIF_F_GSO_MASK; 8546e94d4478SAnirudh Venkataramanan 854746b699c5SJesse Brandeburg len = skb_network_offset(skb); 8548a4e82a81STony Nguyen if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 8549e94d4478SAnirudh Venkataramanan goto out_rm_features; 8550e94d4478SAnirudh Venkataramanan 855146b699c5SJesse Brandeburg len = skb_network_header_len(skb); 8552a4e82a81STony Nguyen if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 8553e94d4478SAnirudh Venkataramanan goto out_rm_features; 8554e94d4478SAnirudh Venkataramanan 8555e94d4478SAnirudh Venkataramanan if (skb->encapsulation) { 855646b699c5SJesse Brandeburg /* this must work for VXLAN frames AND IPIP/SIT frames, and in 855746b699c5SJesse Brandeburg * the case of IPIP frames, the transport header pointer is 855846b699c5SJesse Brandeburg * after the inner header! So check to make sure that this 855946b699c5SJesse Brandeburg * is a GRE or UDP_TUNNEL frame before doing that math. 856046b699c5SJesse Brandeburg */ 856146b699c5SJesse Brandeburg if (gso && (skb_shinfo(skb)->gso_type & 856246b699c5SJesse Brandeburg (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 856346b699c5SJesse Brandeburg len = skb_inner_network_header(skb) - 856446b699c5SJesse Brandeburg skb_transport_header(skb); 8565a4e82a81STony Nguyen if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 8566e94d4478SAnirudh Venkataramanan goto out_rm_features; 856746b699c5SJesse Brandeburg } 8568e94d4478SAnirudh Venkataramanan 856946b699c5SJesse Brandeburg len = skb_inner_network_header_len(skb); 8570a4e82a81STony Nguyen if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 8571e94d4478SAnirudh Venkataramanan goto out_rm_features; 8572e94d4478SAnirudh Venkataramanan } 8573e94d4478SAnirudh Venkataramanan 8574e94d4478SAnirudh Venkataramanan return features; 8575e94d4478SAnirudh Venkataramanan out_rm_features: 8576e94d4478SAnirudh Venkataramanan return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 8577e94d4478SAnirudh Venkataramanan } 8578e94d4478SAnirudh Venkataramanan 8579462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops = { 8580462acf6aSTony Nguyen .ndo_open = ice_open, 8581462acf6aSTony Nguyen .ndo_stop = ice_stop, 8582462acf6aSTony Nguyen .ndo_start_xmit = ice_start_xmit, 8583462acf6aSTony Nguyen .ndo_set_mac_address = ice_set_mac_address, 8584462acf6aSTony Nguyen .ndo_validate_addr = eth_validate_addr, 8585462acf6aSTony Nguyen .ndo_change_mtu = ice_change_mtu, 8586462acf6aSTony Nguyen .ndo_get_stats64 = ice_get_stats64, 8587462acf6aSTony Nguyen .ndo_tx_timeout = ice_tx_timeout, 8588ebc5399eSMaciej Fijalkowski .ndo_bpf = ice_xdp_safe_mode, 8589462acf6aSTony Nguyen }; 8590462acf6aSTony Nguyen 8591cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops = { 8592cdedef59SAnirudh Venkataramanan .ndo_open = ice_open, 8593cdedef59SAnirudh Venkataramanan .ndo_stop = ice_stop, 85942b245cb2SAnirudh Venkataramanan .ndo_start_xmit = ice_start_xmit, 85952a87bd73SDave Ertman .ndo_select_queue = ice_select_queue, 8596e94d4478SAnirudh Venkataramanan .ndo_features_check = ice_features_check, 8597e94d4478SAnirudh Venkataramanan .ndo_set_rx_mode = ice_set_rx_mode, 8598e94d4478SAnirudh Venkataramanan .ndo_set_mac_address = ice_set_mac_address, 8599e94d4478SAnirudh Venkataramanan .ndo_validate_addr = eth_validate_addr, 8600e94d4478SAnirudh Venkataramanan .ndo_change_mtu = ice_change_mtu, 8601fcea6f3dSAnirudh Venkataramanan .ndo_get_stats64 = ice_get_stats64, 86021ddef455SUsha Ketineni .ndo_set_tx_maxrate = ice_set_tx_maxrate, 8603a7605370SArnd Bergmann .ndo_eth_ioctl = ice_eth_ioctl, 86047c710869SAnirudh Venkataramanan .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 86057c710869SAnirudh Venkataramanan .ndo_set_vf_mac = ice_set_vf_mac, 86067c710869SAnirudh Venkataramanan .ndo_get_vf_config = ice_get_vf_cfg, 86077c710869SAnirudh Venkataramanan .ndo_set_vf_trust = ice_set_vf_trust, 86087c710869SAnirudh Venkataramanan .ndo_set_vf_vlan = ice_set_vf_port_vlan, 86097c710869SAnirudh Venkataramanan .ndo_set_vf_link_state = ice_set_vf_link_state, 8610730fdea4SJesse Brandeburg .ndo_get_vf_stats = ice_get_vf_stats, 86114ecc8633SBrett Creeley .ndo_set_vf_rate = ice_set_vf_bw, 8612d76a60baSAnirudh Venkataramanan .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 8613d76a60baSAnirudh Venkataramanan .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 86140d08a441SKiran Patil .ndo_setup_tc = ice_setup_tc, 8615d76a60baSAnirudh Venkataramanan .ndo_set_features = ice_set_features, 8616b1edc14aSMd Fahad Iqbal Polash .ndo_bridge_getlink = ice_bridge_getlink, 8617b1edc14aSMd Fahad Iqbal Polash .ndo_bridge_setlink = ice_bridge_setlink, 8618e94d4478SAnirudh Venkataramanan .ndo_fdb_add = ice_fdb_add, 8619e94d4478SAnirudh Venkataramanan .ndo_fdb_del = ice_fdb_del, 862028bf2672SBrett Creeley #ifdef CONFIG_RFS_ACCEL 862128bf2672SBrett Creeley .ndo_rx_flow_steer = ice_rx_flow_steer, 862228bf2672SBrett Creeley #endif 8623b3969fd7SSudheer Mogilappagari .ndo_tx_timeout = ice_tx_timeout, 8624efc2214bSMaciej Fijalkowski .ndo_bpf = ice_xdp, 8625efc2214bSMaciej Fijalkowski .ndo_xdp_xmit = ice_xdp_xmit, 86262d4238f5SKrzysztof Kazimierczak .ndo_xsk_wakeup = ice_xsk_wakeup, 8627cdedef59SAnirudh Venkataramanan }; 8628