1eff380aaSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2eff380aaSAnirudh Venkataramanan /* Copyright (c) 2019, Intel Corporation. */ 3eff380aaSAnirudh Venkataramanan 4eff380aaSAnirudh Venkataramanan #include "ice_base.h" 5eff380aaSAnirudh Venkataramanan #include "ice_dcb_lib.h" 6eff380aaSAnirudh Venkataramanan 7eff380aaSAnirudh Venkataramanan /** 8eff380aaSAnirudh Venkataramanan * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 9eff380aaSAnirudh Venkataramanan * @qs_cfg: gathered variables needed for PF->VSI queues assignment 10eff380aaSAnirudh Venkataramanan * 11eff380aaSAnirudh Venkataramanan * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 12eff380aaSAnirudh Venkataramanan */ 13eff380aaSAnirudh Venkataramanan static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 14eff380aaSAnirudh Venkataramanan { 15eff380aaSAnirudh Venkataramanan int offset, i; 16eff380aaSAnirudh Venkataramanan 17eff380aaSAnirudh Venkataramanan mutex_lock(qs_cfg->qs_mutex); 18eff380aaSAnirudh Venkataramanan offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 19eff380aaSAnirudh Venkataramanan 0, qs_cfg->q_count, 0); 20eff380aaSAnirudh Venkataramanan if (offset >= qs_cfg->pf_map_size) { 21eff380aaSAnirudh Venkataramanan mutex_unlock(qs_cfg->qs_mutex); 22eff380aaSAnirudh Venkataramanan return -ENOMEM; 23eff380aaSAnirudh Venkataramanan } 24eff380aaSAnirudh Venkataramanan 25eff380aaSAnirudh Venkataramanan bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 26eff380aaSAnirudh Venkataramanan for (i = 0; i < qs_cfg->q_count; i++) 27eff380aaSAnirudh Venkataramanan qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; 28eff380aaSAnirudh Venkataramanan mutex_unlock(qs_cfg->qs_mutex); 29eff380aaSAnirudh Venkataramanan 30eff380aaSAnirudh Venkataramanan return 0; 31eff380aaSAnirudh Venkataramanan } 32eff380aaSAnirudh Venkataramanan 33eff380aaSAnirudh Venkataramanan /** 34eff380aaSAnirudh Venkataramanan * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 35eff380aaSAnirudh Venkataramanan * @qs_cfg: gathered variables needed for pf->vsi queues assignment 36eff380aaSAnirudh Venkataramanan * 37eff380aaSAnirudh Venkataramanan * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 38eff380aaSAnirudh Venkataramanan */ 39eff380aaSAnirudh Venkataramanan static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 40eff380aaSAnirudh Venkataramanan { 41eff380aaSAnirudh Venkataramanan int i, index = 0; 42eff380aaSAnirudh Venkataramanan 43eff380aaSAnirudh Venkataramanan mutex_lock(qs_cfg->qs_mutex); 44eff380aaSAnirudh Venkataramanan for (i = 0; i < qs_cfg->q_count; i++) { 45eff380aaSAnirudh Venkataramanan index = find_next_zero_bit(qs_cfg->pf_map, 46eff380aaSAnirudh Venkataramanan qs_cfg->pf_map_size, index); 47eff380aaSAnirudh Venkataramanan if (index >= qs_cfg->pf_map_size) 48eff380aaSAnirudh Venkataramanan goto err_scatter; 49eff380aaSAnirudh Venkataramanan set_bit(index, qs_cfg->pf_map); 50eff380aaSAnirudh Venkataramanan qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; 51eff380aaSAnirudh Venkataramanan } 52eff380aaSAnirudh Venkataramanan mutex_unlock(qs_cfg->qs_mutex); 53eff380aaSAnirudh Venkataramanan 54eff380aaSAnirudh Venkataramanan return 0; 55eff380aaSAnirudh Venkataramanan err_scatter: 56eff380aaSAnirudh Venkataramanan for (index = 0; index < i; index++) { 57eff380aaSAnirudh Venkataramanan clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 58eff380aaSAnirudh Venkataramanan qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 59eff380aaSAnirudh Venkataramanan } 60eff380aaSAnirudh Venkataramanan mutex_unlock(qs_cfg->qs_mutex); 61eff380aaSAnirudh Venkataramanan 62eff380aaSAnirudh Venkataramanan return -ENOMEM; 63eff380aaSAnirudh Venkataramanan } 64eff380aaSAnirudh Venkataramanan 65eff380aaSAnirudh Venkataramanan /** 66eff380aaSAnirudh Venkataramanan * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 67eff380aaSAnirudh Venkataramanan * @pf: the PF being configured 68eff380aaSAnirudh Venkataramanan * @pf_q: the PF queue 69eff380aaSAnirudh Venkataramanan * @ena: enable or disable state of the queue 70eff380aaSAnirudh Venkataramanan * 71eff380aaSAnirudh Venkataramanan * This routine will wait for the given Rx queue of the PF to reach the 72eff380aaSAnirudh Venkataramanan * enabled or disabled state. 73eff380aaSAnirudh Venkataramanan * Returns -ETIMEDOUT in case of failing to reach the requested state after 74eff380aaSAnirudh Venkataramanan * multiple retries; else will return 0 in case of success. 75eff380aaSAnirudh Venkataramanan */ 76eff380aaSAnirudh Venkataramanan static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 77eff380aaSAnirudh Venkataramanan { 78eff380aaSAnirudh Venkataramanan int i; 79eff380aaSAnirudh Venkataramanan 80eff380aaSAnirudh Venkataramanan for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 81eff380aaSAnirudh Venkataramanan if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 82eff380aaSAnirudh Venkataramanan QRX_CTRL_QENA_STAT_M)) 83eff380aaSAnirudh Venkataramanan return 0; 84eff380aaSAnirudh Venkataramanan 85eff380aaSAnirudh Venkataramanan usleep_range(20, 40); 86eff380aaSAnirudh Venkataramanan } 87eff380aaSAnirudh Venkataramanan 88eff380aaSAnirudh Venkataramanan return -ETIMEDOUT; 89eff380aaSAnirudh Venkataramanan } 90eff380aaSAnirudh Venkataramanan 91eff380aaSAnirudh Venkataramanan /** 92eff380aaSAnirudh Venkataramanan * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 93eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 94eff380aaSAnirudh Venkataramanan * @v_idx: index of the vector in the VSI struct 95eff380aaSAnirudh Venkataramanan * 96118e0e10SMichal Swiatkowski * We allocate one q_vector and set default value for ITR setting associated 97118e0e10SMichal Swiatkowski * with this q_vector. If allocation fails we return -ENOMEM. 98eff380aaSAnirudh Venkataramanan */ 99eff380aaSAnirudh Venkataramanan static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) 100eff380aaSAnirudh Venkataramanan { 101eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 102eff380aaSAnirudh Venkataramanan struct ice_q_vector *q_vector; 103eff380aaSAnirudh Venkataramanan 104eff380aaSAnirudh Venkataramanan /* allocate q_vector */ 1054015d11eSBrett Creeley q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), 1064015d11eSBrett Creeley GFP_KERNEL); 107eff380aaSAnirudh Venkataramanan if (!q_vector) 108eff380aaSAnirudh Venkataramanan return -ENOMEM; 109eff380aaSAnirudh Venkataramanan 110eff380aaSAnirudh Venkataramanan q_vector->vsi = vsi; 111eff380aaSAnirudh Venkataramanan q_vector->v_idx = v_idx; 112118e0e10SMichal Swiatkowski q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; 113118e0e10SMichal Swiatkowski q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; 114eff380aaSAnirudh Venkataramanan if (vsi->type == ICE_VSI_VF) 115eff380aaSAnirudh Venkataramanan goto out; 116eff380aaSAnirudh Venkataramanan /* only set affinity_mask if the CPU is online */ 117eff380aaSAnirudh Venkataramanan if (cpu_online(v_idx)) 118eff380aaSAnirudh Venkataramanan cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 119eff380aaSAnirudh Venkataramanan 120eff380aaSAnirudh Venkataramanan /* This will not be called in the driver load path because the netdev 121eff380aaSAnirudh Venkataramanan * will not be created yet. All other cases with register the NAPI 122eff380aaSAnirudh Venkataramanan * handler here (i.e. resume, reset/rebuild, etc.) 123eff380aaSAnirudh Venkataramanan */ 124eff380aaSAnirudh Venkataramanan if (vsi->netdev) 125eff380aaSAnirudh Venkataramanan netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 126eff380aaSAnirudh Venkataramanan NAPI_POLL_WEIGHT); 127eff380aaSAnirudh Venkataramanan 128eff380aaSAnirudh Venkataramanan out: 129eff380aaSAnirudh Venkataramanan /* tie q_vector and VSI together */ 130eff380aaSAnirudh Venkataramanan vsi->q_vectors[v_idx] = q_vector; 131eff380aaSAnirudh Venkataramanan 132eff380aaSAnirudh Venkataramanan return 0; 133eff380aaSAnirudh Venkataramanan } 134eff380aaSAnirudh Venkataramanan 135eff380aaSAnirudh Venkataramanan /** 136eff380aaSAnirudh Venkataramanan * ice_free_q_vector - Free memory allocated for a specific interrupt vector 137eff380aaSAnirudh Venkataramanan * @vsi: VSI having the memory freed 138eff380aaSAnirudh Venkataramanan * @v_idx: index of the vector to be freed 139eff380aaSAnirudh Venkataramanan */ 140eff380aaSAnirudh Venkataramanan static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 141eff380aaSAnirudh Venkataramanan { 142eff380aaSAnirudh Venkataramanan struct ice_q_vector *q_vector; 143eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 144eff380aaSAnirudh Venkataramanan struct ice_ring *ring; 1454015d11eSBrett Creeley struct device *dev; 146eff380aaSAnirudh Venkataramanan 1474015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 148eff380aaSAnirudh Venkataramanan if (!vsi->q_vectors[v_idx]) { 1494015d11eSBrett Creeley dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); 150eff380aaSAnirudh Venkataramanan return; 151eff380aaSAnirudh Venkataramanan } 152eff380aaSAnirudh Venkataramanan q_vector = vsi->q_vectors[v_idx]; 153eff380aaSAnirudh Venkataramanan 154eff380aaSAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->tx) 155eff380aaSAnirudh Venkataramanan ring->q_vector = NULL; 156eff380aaSAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) 157eff380aaSAnirudh Venkataramanan ring->q_vector = NULL; 158eff380aaSAnirudh Venkataramanan 159eff380aaSAnirudh Venkataramanan /* only VSI with an associated netdev is set up with NAPI */ 160eff380aaSAnirudh Venkataramanan if (vsi->netdev) 161eff380aaSAnirudh Venkataramanan netif_napi_del(&q_vector->napi); 162eff380aaSAnirudh Venkataramanan 1634015d11eSBrett Creeley devm_kfree(dev, q_vector); 164eff380aaSAnirudh Venkataramanan vsi->q_vectors[v_idx] = NULL; 165eff380aaSAnirudh Venkataramanan } 166eff380aaSAnirudh Venkataramanan 167eff380aaSAnirudh Venkataramanan /** 168eff380aaSAnirudh Venkataramanan * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 169eff380aaSAnirudh Venkataramanan * @hw: board specific structure 170eff380aaSAnirudh Venkataramanan */ 171eff380aaSAnirudh Venkataramanan static void ice_cfg_itr_gran(struct ice_hw *hw) 172eff380aaSAnirudh Venkataramanan { 173eff380aaSAnirudh Venkataramanan u32 regval = rd32(hw, GLINT_CTL); 174eff380aaSAnirudh Venkataramanan 175eff380aaSAnirudh Venkataramanan /* no need to update global register if ITR gran is already set */ 176eff380aaSAnirudh Venkataramanan if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 177eff380aaSAnirudh Venkataramanan (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 178eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 179eff380aaSAnirudh Venkataramanan (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 180eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 181eff380aaSAnirudh Venkataramanan (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 182eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 183eff380aaSAnirudh Venkataramanan (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 184eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 185eff380aaSAnirudh Venkataramanan return; 186eff380aaSAnirudh Venkataramanan 187eff380aaSAnirudh Venkataramanan regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 188eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_200_M) | 189eff380aaSAnirudh Venkataramanan ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 190eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_100_M) | 191eff380aaSAnirudh Venkataramanan ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 192eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_50_M) | 193eff380aaSAnirudh Venkataramanan ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 194eff380aaSAnirudh Venkataramanan GLINT_CTL_ITR_GRAN_25_M); 195eff380aaSAnirudh Venkataramanan wr32(hw, GLINT_CTL, regval); 196eff380aaSAnirudh Venkataramanan } 197eff380aaSAnirudh Venkataramanan 198eff380aaSAnirudh Venkataramanan /** 199e75d1b2cSMaciej Fijalkowski * ice_calc_q_handle - calculate the queue handle 200e75d1b2cSMaciej Fijalkowski * @vsi: VSI that ring belongs to 201e75d1b2cSMaciej Fijalkowski * @ring: ring to get the absolute queue index 202e75d1b2cSMaciej Fijalkowski * @tc: traffic class number 203e75d1b2cSMaciej Fijalkowski */ 204e75d1b2cSMaciej Fijalkowski static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) 205e75d1b2cSMaciej Fijalkowski { 206efc2214bSMaciej Fijalkowski WARN_ONCE(ice_ring_is_xdp(ring) && tc, 207efc2214bSMaciej Fijalkowski "XDP ring can't belong to TC other than 0"); 208efc2214bSMaciej Fijalkowski 209e75d1b2cSMaciej Fijalkowski /* Idea here for calculation is that we subtract the number of queue 210e75d1b2cSMaciej Fijalkowski * count from TC that ring belongs to from it's absolute queue index 211e75d1b2cSMaciej Fijalkowski * and as a result we get the queue's index within TC. 212e75d1b2cSMaciej Fijalkowski */ 213e75d1b2cSMaciej Fijalkowski return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; 214e75d1b2cSMaciej Fijalkowski } 215e75d1b2cSMaciej Fijalkowski 216e75d1b2cSMaciej Fijalkowski /** 217eff380aaSAnirudh Venkataramanan * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 218eff380aaSAnirudh Venkataramanan * @ring: The Tx ring to configure 219eff380aaSAnirudh Venkataramanan * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 220eff380aaSAnirudh Venkataramanan * @pf_q: queue index in the PF space 221eff380aaSAnirudh Venkataramanan * 222eff380aaSAnirudh Venkataramanan * Configure the Tx descriptor ring in TLAN context. 223eff380aaSAnirudh Venkataramanan */ 224eff380aaSAnirudh Venkataramanan static void 225eff380aaSAnirudh Venkataramanan ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 226eff380aaSAnirudh Venkataramanan { 227eff380aaSAnirudh Venkataramanan struct ice_vsi *vsi = ring->vsi; 228eff380aaSAnirudh Venkataramanan struct ice_hw *hw = &vsi->back->hw; 229eff380aaSAnirudh Venkataramanan 230eff380aaSAnirudh Venkataramanan tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 231eff380aaSAnirudh Venkataramanan 232eff380aaSAnirudh Venkataramanan tlan_ctx->port_num = vsi->port_info->lport; 233eff380aaSAnirudh Venkataramanan 234eff380aaSAnirudh Venkataramanan /* Transmit Queue Length */ 235eff380aaSAnirudh Venkataramanan tlan_ctx->qlen = ring->count; 236eff380aaSAnirudh Venkataramanan 237eff380aaSAnirudh Venkataramanan ice_set_cgd_num(tlan_ctx, ring); 238eff380aaSAnirudh Venkataramanan 239eff380aaSAnirudh Venkataramanan /* PF number */ 240eff380aaSAnirudh Venkataramanan tlan_ctx->pf_num = hw->pf_id; 241eff380aaSAnirudh Venkataramanan 242eff380aaSAnirudh Venkataramanan /* queue belongs to a specific VSI type 243eff380aaSAnirudh Venkataramanan * VF / VM index should be programmed per vmvf_type setting: 244eff380aaSAnirudh Venkataramanan * for vmvf_type = VF, it is VF number between 0-256 245eff380aaSAnirudh Venkataramanan * for vmvf_type = VM, it is VM number between 0-767 246eff380aaSAnirudh Venkataramanan * for PF or EMP this field should be set to zero 247eff380aaSAnirudh Venkataramanan */ 248eff380aaSAnirudh Venkataramanan switch (vsi->type) { 249eff380aaSAnirudh Venkataramanan case ICE_VSI_LB: 250eff380aaSAnirudh Venkataramanan /* fall through */ 251eff380aaSAnirudh Venkataramanan case ICE_VSI_PF: 252eff380aaSAnirudh Venkataramanan tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 253eff380aaSAnirudh Venkataramanan break; 254eff380aaSAnirudh Venkataramanan case ICE_VSI_VF: 255eff380aaSAnirudh Venkataramanan /* Firmware expects vmvf_num to be absolute VF ID */ 256eff380aaSAnirudh Venkataramanan tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 257eff380aaSAnirudh Venkataramanan tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 258eff380aaSAnirudh Venkataramanan break; 259eff380aaSAnirudh Venkataramanan default: 260eff380aaSAnirudh Venkataramanan return; 261eff380aaSAnirudh Venkataramanan } 262eff380aaSAnirudh Venkataramanan 263eff380aaSAnirudh Venkataramanan /* make sure the context is associated with the right VSI */ 264eff380aaSAnirudh Venkataramanan tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 265eff380aaSAnirudh Venkataramanan 266eff380aaSAnirudh Venkataramanan tlan_ctx->tso_ena = ICE_TX_LEGACY; 267eff380aaSAnirudh Venkataramanan tlan_ctx->tso_qnum = pf_q; 268eff380aaSAnirudh Venkataramanan 269eff380aaSAnirudh Venkataramanan /* Legacy or Advanced Host Interface: 270eff380aaSAnirudh Venkataramanan * 0: Advanced Host Interface 271eff380aaSAnirudh Venkataramanan * 1: Legacy Host Interface 272eff380aaSAnirudh Venkataramanan */ 273eff380aaSAnirudh Venkataramanan tlan_ctx->legacy_int = ICE_TX_LEGACY; 274eff380aaSAnirudh Venkataramanan } 275eff380aaSAnirudh Venkataramanan 276eff380aaSAnirudh Venkataramanan /** 277eff380aaSAnirudh Venkataramanan * ice_setup_rx_ctx - Configure a receive ring context 278eff380aaSAnirudh Venkataramanan * @ring: The Rx ring to configure 279eff380aaSAnirudh Venkataramanan * 280eff380aaSAnirudh Venkataramanan * Configure the Rx descriptor ring in RLAN context. 281eff380aaSAnirudh Venkataramanan */ 282eff380aaSAnirudh Venkataramanan int ice_setup_rx_ctx(struct ice_ring *ring) 283eff380aaSAnirudh Venkataramanan { 2842d4238f5SKrzysztof Kazimierczak int chain_len = ICE_MAX_CHAINED_RX_BUFS; 285eff380aaSAnirudh Venkataramanan struct ice_vsi *vsi = ring->vsi; 286eff380aaSAnirudh Venkataramanan u32 rxdid = ICE_RXDID_FLEX_NIC; 287eff380aaSAnirudh Venkataramanan struct ice_rlan_ctx rlan_ctx; 2882d4238f5SKrzysztof Kazimierczak struct ice_hw *hw; 289eff380aaSAnirudh Venkataramanan u32 regval; 290eff380aaSAnirudh Venkataramanan u16 pf_q; 291eff380aaSAnirudh Venkataramanan int err; 292eff380aaSAnirudh Venkataramanan 2932d4238f5SKrzysztof Kazimierczak hw = &vsi->back->hw; 2942d4238f5SKrzysztof Kazimierczak 295eff380aaSAnirudh Venkataramanan /* what is Rx queue number in global space of 2K Rx queues */ 296eff380aaSAnirudh Venkataramanan pf_q = vsi->rxq_map[ring->q_index]; 297eff380aaSAnirudh Venkataramanan 298eff380aaSAnirudh Venkataramanan /* clear the context structure first */ 299eff380aaSAnirudh Venkataramanan memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 300eff380aaSAnirudh Venkataramanan 301efc2214bSMaciej Fijalkowski ring->rx_buf_len = vsi->rx_buf_len; 302efc2214bSMaciej Fijalkowski 303efc2214bSMaciej Fijalkowski if (ring->vsi->type == ICE_VSI_PF) { 304efc2214bSMaciej Fijalkowski if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 305efc2214bSMaciej Fijalkowski xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 306efc2214bSMaciej Fijalkowski ring->q_index); 307efc2214bSMaciej Fijalkowski 3082d4238f5SKrzysztof Kazimierczak ring->xsk_umem = ice_xsk_umem(ring); 3092d4238f5SKrzysztof Kazimierczak if (ring->xsk_umem) { 3102d4238f5SKrzysztof Kazimierczak xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 3112d4238f5SKrzysztof Kazimierczak 3122d4238f5SKrzysztof Kazimierczak ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - 3132d4238f5SKrzysztof Kazimierczak XDP_PACKET_HEADROOM; 3142d4238f5SKrzysztof Kazimierczak /* For AF_XDP ZC, we disallow packets to span on 3152d4238f5SKrzysztof Kazimierczak * multiple buffers, thus letting us skip that 3162d4238f5SKrzysztof Kazimierczak * handling in the fast-path. 3172d4238f5SKrzysztof Kazimierczak */ 3182d4238f5SKrzysztof Kazimierczak chain_len = 1; 3192d4238f5SKrzysztof Kazimierczak ring->zca.free = ice_zca_free; 320efc2214bSMaciej Fijalkowski err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3212d4238f5SKrzysztof Kazimierczak MEM_TYPE_ZERO_COPY, 3222d4238f5SKrzysztof Kazimierczak &ring->zca); 323efc2214bSMaciej Fijalkowski if (err) 324efc2214bSMaciej Fijalkowski return err; 3252d4238f5SKrzysztof Kazimierczak 3262d4238f5SKrzysztof Kazimierczak dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", 3272d4238f5SKrzysztof Kazimierczak ring->q_index); 3282d4238f5SKrzysztof Kazimierczak } else { 3292d4238f5SKrzysztof Kazimierczak if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 3302d4238f5SKrzysztof Kazimierczak xdp_rxq_info_reg(&ring->xdp_rxq, 3312d4238f5SKrzysztof Kazimierczak ring->netdev, 3322d4238f5SKrzysztof Kazimierczak ring->q_index); 3332d4238f5SKrzysztof Kazimierczak 3342d4238f5SKrzysztof Kazimierczak err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3352d4238f5SKrzysztof Kazimierczak MEM_TYPE_PAGE_SHARED, 3362d4238f5SKrzysztof Kazimierczak NULL); 3372d4238f5SKrzysztof Kazimierczak if (err) 3382d4238f5SKrzysztof Kazimierczak return err; 3392d4238f5SKrzysztof Kazimierczak } 340efc2214bSMaciej Fijalkowski } 341efc2214bSMaciej Fijalkowski /* Receive Queue Base Address. 342efc2214bSMaciej Fijalkowski * Indicates the starting address of the descriptor queue defined in 343efc2214bSMaciej Fijalkowski * 128 Byte units. 344efc2214bSMaciej Fijalkowski */ 345eff380aaSAnirudh Venkataramanan rlan_ctx.base = ring->dma >> 7; 346eff380aaSAnirudh Venkataramanan 347eff380aaSAnirudh Venkataramanan rlan_ctx.qlen = ring->count; 348eff380aaSAnirudh Venkataramanan 349eff380aaSAnirudh Venkataramanan /* Receive Packet Data Buffer Size. 350eff380aaSAnirudh Venkataramanan * The Packet Data Buffer Size is defined in 128 byte units. 351eff380aaSAnirudh Venkataramanan */ 352efc2214bSMaciej Fijalkowski rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 353eff380aaSAnirudh Venkataramanan 354eff380aaSAnirudh Venkataramanan /* use 32 byte descriptors */ 355eff380aaSAnirudh Venkataramanan rlan_ctx.dsize = 1; 356eff380aaSAnirudh Venkataramanan 357eff380aaSAnirudh Venkataramanan /* Strip the Ethernet CRC bytes before the packet is posted to host 358eff380aaSAnirudh Venkataramanan * memory. 359eff380aaSAnirudh Venkataramanan */ 360eff380aaSAnirudh Venkataramanan rlan_ctx.crcstrip = 1; 361eff380aaSAnirudh Venkataramanan 362eff380aaSAnirudh Venkataramanan /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 363eff380aaSAnirudh Venkataramanan rlan_ctx.l2tsel = 1; 364eff380aaSAnirudh Venkataramanan 365eff380aaSAnirudh Venkataramanan rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 366eff380aaSAnirudh Venkataramanan rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 367eff380aaSAnirudh Venkataramanan rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 368eff380aaSAnirudh Venkataramanan 369eff380aaSAnirudh Venkataramanan /* This controls whether VLAN is stripped from inner headers 370eff380aaSAnirudh Venkataramanan * The VLAN in the inner L2 header is stripped to the receive 371eff380aaSAnirudh Venkataramanan * descriptor if enabled by this flag. 372eff380aaSAnirudh Venkataramanan */ 373eff380aaSAnirudh Venkataramanan rlan_ctx.showiv = 0; 374eff380aaSAnirudh Venkataramanan 375eff380aaSAnirudh Venkataramanan /* Max packet size for this queue - must not be set to a larger value 376eff380aaSAnirudh Venkataramanan * than 5 x DBUF 377eff380aaSAnirudh Venkataramanan */ 378eff380aaSAnirudh Venkataramanan rlan_ctx.rxmax = min_t(u16, vsi->max_frame, 3792d4238f5SKrzysztof Kazimierczak chain_len * ring->rx_buf_len); 380eff380aaSAnirudh Venkataramanan 381eff380aaSAnirudh Venkataramanan /* Rx queue threshold in units of 64 */ 382eff380aaSAnirudh Venkataramanan rlan_ctx.lrxqthresh = 1; 383eff380aaSAnirudh Venkataramanan 384eff380aaSAnirudh Venkataramanan /* Enable Flexible Descriptors in the queue context which 385eff380aaSAnirudh Venkataramanan * allows this driver to select a specific receive descriptor format 386eff380aaSAnirudh Venkataramanan */ 387eff380aaSAnirudh Venkataramanan if (vsi->type != ICE_VSI_VF) { 388eff380aaSAnirudh Venkataramanan regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 389eff380aaSAnirudh Venkataramanan regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 390eff380aaSAnirudh Venkataramanan QRXFLXP_CNTXT_RXDID_IDX_M; 391eff380aaSAnirudh Venkataramanan 392eff380aaSAnirudh Venkataramanan /* increasing context priority to pick up profile ID; 393eff380aaSAnirudh Venkataramanan * default is 0x01; setting to 0x03 to ensure profile 394eff380aaSAnirudh Venkataramanan * is programming if prev context is of same priority 395eff380aaSAnirudh Venkataramanan */ 396eff380aaSAnirudh Venkataramanan regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 397eff380aaSAnirudh Venkataramanan QRXFLXP_CNTXT_RXDID_PRIO_M; 398eff380aaSAnirudh Venkataramanan 399eff380aaSAnirudh Venkataramanan wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 400eff380aaSAnirudh Venkataramanan } 401eff380aaSAnirudh Venkataramanan 402eff380aaSAnirudh Venkataramanan /* Absolute queue number out of 2K needs to be passed */ 403eff380aaSAnirudh Venkataramanan err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 404eff380aaSAnirudh Venkataramanan if (err) { 405eff380aaSAnirudh Venkataramanan dev_err(&vsi->back->pdev->dev, 406eff380aaSAnirudh Venkataramanan "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 407eff380aaSAnirudh Venkataramanan pf_q, err); 408eff380aaSAnirudh Venkataramanan return -EIO; 409eff380aaSAnirudh Venkataramanan } 410eff380aaSAnirudh Venkataramanan 411eff380aaSAnirudh Venkataramanan if (vsi->type == ICE_VSI_VF) 412eff380aaSAnirudh Venkataramanan return 0; 413eff380aaSAnirudh Venkataramanan 41459bb0808SMaciej Fijalkowski /* configure Rx buffer alignment */ 41559bb0808SMaciej Fijalkowski if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 41659bb0808SMaciej Fijalkowski ice_clear_ring_build_skb_ena(ring); 41759bb0808SMaciej Fijalkowski else 41859bb0808SMaciej Fijalkowski ice_set_ring_build_skb_ena(ring); 41959bb0808SMaciej Fijalkowski 420eff380aaSAnirudh Venkataramanan /* init queue specific tail register */ 421eff380aaSAnirudh Venkataramanan ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 422eff380aaSAnirudh Venkataramanan writel(0, ring->tail); 4232d4238f5SKrzysztof Kazimierczak 4242d4238f5SKrzysztof Kazimierczak err = ring->xsk_umem ? 4252d4238f5SKrzysztof Kazimierczak ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) : 426eff380aaSAnirudh Venkataramanan ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); 4272d4238f5SKrzysztof Kazimierczak if (err) 4282d4238f5SKrzysztof Kazimierczak dev_info(&vsi->back->pdev->dev, 4292d4238f5SKrzysztof Kazimierczak "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", 4302d4238f5SKrzysztof Kazimierczak ring->xsk_umem ? "UMEM enabled " : "", 4312d4238f5SKrzysztof Kazimierczak ring->q_index, pf_q); 432eff380aaSAnirudh Venkataramanan 433eff380aaSAnirudh Venkataramanan return 0; 434eff380aaSAnirudh Venkataramanan } 435eff380aaSAnirudh Venkataramanan 436eff380aaSAnirudh Venkataramanan /** 437eff380aaSAnirudh Venkataramanan * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 438eff380aaSAnirudh Venkataramanan * @qs_cfg: gathered variables needed for pf->vsi queues assignment 439eff380aaSAnirudh Venkataramanan * 440eff380aaSAnirudh Venkataramanan * This function first tries to find contiguous space. If it is not successful, 441eff380aaSAnirudh Venkataramanan * it tries with the scatter approach. 442eff380aaSAnirudh Venkataramanan * 443eff380aaSAnirudh Venkataramanan * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 444eff380aaSAnirudh Venkataramanan */ 445eff380aaSAnirudh Venkataramanan int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 446eff380aaSAnirudh Venkataramanan { 447eff380aaSAnirudh Venkataramanan int ret = 0; 448eff380aaSAnirudh Venkataramanan 449eff380aaSAnirudh Venkataramanan ret = __ice_vsi_get_qs_contig(qs_cfg); 450eff380aaSAnirudh Venkataramanan if (ret) { 451eff380aaSAnirudh Venkataramanan /* contig failed, so try with scatter approach */ 452eff380aaSAnirudh Venkataramanan qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 453eff380aaSAnirudh Venkataramanan qs_cfg->q_count = min_t(u16, qs_cfg->q_count, 454eff380aaSAnirudh Venkataramanan qs_cfg->scatter_count); 455eff380aaSAnirudh Venkataramanan ret = __ice_vsi_get_qs_sc(qs_cfg); 456eff380aaSAnirudh Venkataramanan } 457eff380aaSAnirudh Venkataramanan return ret; 458eff380aaSAnirudh Venkataramanan } 459eff380aaSAnirudh Venkataramanan 460eff380aaSAnirudh Venkataramanan /** 461eff380aaSAnirudh Venkataramanan * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring 462eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 463eff380aaSAnirudh Venkataramanan * @ena: start or stop the Rx rings 464eff380aaSAnirudh Venkataramanan * @rxq_idx: Rx queue index 465eff380aaSAnirudh Venkataramanan */ 466eff380aaSAnirudh Venkataramanan int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) 467eff380aaSAnirudh Venkataramanan { 468eff380aaSAnirudh Venkataramanan int pf_q = vsi->rxq_map[rxq_idx]; 469eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 470eff380aaSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 471eff380aaSAnirudh Venkataramanan int ret = 0; 472eff380aaSAnirudh Venkataramanan u32 rx_reg; 473eff380aaSAnirudh Venkataramanan 474eff380aaSAnirudh Venkataramanan rx_reg = rd32(hw, QRX_CTRL(pf_q)); 475eff380aaSAnirudh Venkataramanan 476eff380aaSAnirudh Venkataramanan /* Skip if the queue is already in the requested state */ 477eff380aaSAnirudh Venkataramanan if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 478eff380aaSAnirudh Venkataramanan return 0; 479eff380aaSAnirudh Venkataramanan 480eff380aaSAnirudh Venkataramanan /* turn on/off the queue */ 481eff380aaSAnirudh Venkataramanan if (ena) 482eff380aaSAnirudh Venkataramanan rx_reg |= QRX_CTRL_QENA_REQ_M; 483eff380aaSAnirudh Venkataramanan else 484eff380aaSAnirudh Venkataramanan rx_reg &= ~QRX_CTRL_QENA_REQ_M; 485eff380aaSAnirudh Venkataramanan wr32(hw, QRX_CTRL(pf_q), rx_reg); 486eff380aaSAnirudh Venkataramanan 487eff380aaSAnirudh Venkataramanan /* wait for the change to finish */ 488eff380aaSAnirudh Venkataramanan ret = ice_pf_rxq_wait(pf, pf_q, ena); 489eff380aaSAnirudh Venkataramanan if (ret) 4904015d11eSBrett Creeley dev_err(ice_pf_to_dev(pf), 491eff380aaSAnirudh Venkataramanan "VSI idx %d Rx ring %d %sable timeout\n", 492eff380aaSAnirudh Venkataramanan vsi->idx, pf_q, (ena ? "en" : "dis")); 493eff380aaSAnirudh Venkataramanan 494eff380aaSAnirudh Venkataramanan return ret; 495eff380aaSAnirudh Venkataramanan } 496eff380aaSAnirudh Venkataramanan 497eff380aaSAnirudh Venkataramanan /** 498eff380aaSAnirudh Venkataramanan * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 499eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 500eff380aaSAnirudh Venkataramanan * 501eff380aaSAnirudh Venkataramanan * We allocate one q_vector per queue interrupt. If allocation fails we 502eff380aaSAnirudh Venkataramanan * return -ENOMEM. 503eff380aaSAnirudh Venkataramanan */ 504eff380aaSAnirudh Venkataramanan int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 505eff380aaSAnirudh Venkataramanan { 506eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 507eff380aaSAnirudh Venkataramanan int v_idx = 0, num_q_vectors; 5084015d11eSBrett Creeley struct device *dev; 509eff380aaSAnirudh Venkataramanan int err; 510eff380aaSAnirudh Venkataramanan 5114015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 512eff380aaSAnirudh Venkataramanan if (vsi->q_vectors[0]) { 5134015d11eSBrett Creeley dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); 514eff380aaSAnirudh Venkataramanan return -EEXIST; 515eff380aaSAnirudh Venkataramanan } 516eff380aaSAnirudh Venkataramanan 517eff380aaSAnirudh Venkataramanan num_q_vectors = vsi->num_q_vectors; 518eff380aaSAnirudh Venkataramanan 519eff380aaSAnirudh Venkataramanan for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 520eff380aaSAnirudh Venkataramanan err = ice_vsi_alloc_q_vector(vsi, v_idx); 521eff380aaSAnirudh Venkataramanan if (err) 522eff380aaSAnirudh Venkataramanan goto err_out; 523eff380aaSAnirudh Venkataramanan } 524eff380aaSAnirudh Venkataramanan 525eff380aaSAnirudh Venkataramanan return 0; 526eff380aaSAnirudh Venkataramanan 527eff380aaSAnirudh Venkataramanan err_out: 528eff380aaSAnirudh Venkataramanan while (v_idx--) 529eff380aaSAnirudh Venkataramanan ice_free_q_vector(vsi, v_idx); 530eff380aaSAnirudh Venkataramanan 5314015d11eSBrett Creeley dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 532eff380aaSAnirudh Venkataramanan vsi->num_q_vectors, vsi->vsi_num, err); 533eff380aaSAnirudh Venkataramanan vsi->num_q_vectors = 0; 534eff380aaSAnirudh Venkataramanan return err; 535eff380aaSAnirudh Venkataramanan } 536eff380aaSAnirudh Venkataramanan 537eff380aaSAnirudh Venkataramanan /** 538eff380aaSAnirudh Venkataramanan * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 539eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 540eff380aaSAnirudh Venkataramanan * 541eff380aaSAnirudh Venkataramanan * This function maps descriptor rings to the queue-specific vectors allotted 542eff380aaSAnirudh Venkataramanan * through the MSI-X enabling code. On a constrained vector budget, we map Tx 543eff380aaSAnirudh Venkataramanan * and Rx rings to the vector as "efficiently" as possible. 544eff380aaSAnirudh Venkataramanan */ 545eff380aaSAnirudh Venkataramanan void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 546eff380aaSAnirudh Venkataramanan { 547eff380aaSAnirudh Venkataramanan int q_vectors = vsi->num_q_vectors; 548eff380aaSAnirudh Venkataramanan int tx_rings_rem, rx_rings_rem; 549eff380aaSAnirudh Venkataramanan int v_id; 550eff380aaSAnirudh Venkataramanan 551eff380aaSAnirudh Venkataramanan /* initially assigning remaining rings count to VSIs num queue value */ 552eff380aaSAnirudh Venkataramanan tx_rings_rem = vsi->num_txq; 553eff380aaSAnirudh Venkataramanan rx_rings_rem = vsi->num_rxq; 554eff380aaSAnirudh Venkataramanan 555eff380aaSAnirudh Venkataramanan for (v_id = 0; v_id < q_vectors; v_id++) { 556eff380aaSAnirudh Venkataramanan struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 557eff380aaSAnirudh Venkataramanan int tx_rings_per_v, rx_rings_per_v, q_id, q_base; 558eff380aaSAnirudh Venkataramanan 559eff380aaSAnirudh Venkataramanan /* Tx rings mapping to vector */ 560eff380aaSAnirudh Venkataramanan tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 561eff380aaSAnirudh Venkataramanan q_vector->num_ring_tx = tx_rings_per_v; 562eff380aaSAnirudh Venkataramanan q_vector->tx.ring = NULL; 563eff380aaSAnirudh Venkataramanan q_vector->tx.itr_idx = ICE_TX_ITR; 564eff380aaSAnirudh Venkataramanan q_base = vsi->num_txq - tx_rings_rem; 565eff380aaSAnirudh Venkataramanan 566eff380aaSAnirudh Venkataramanan for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 567eff380aaSAnirudh Venkataramanan struct ice_ring *tx_ring = vsi->tx_rings[q_id]; 568eff380aaSAnirudh Venkataramanan 569eff380aaSAnirudh Venkataramanan tx_ring->q_vector = q_vector; 570eff380aaSAnirudh Venkataramanan tx_ring->next = q_vector->tx.ring; 571eff380aaSAnirudh Venkataramanan q_vector->tx.ring = tx_ring; 572eff380aaSAnirudh Venkataramanan } 573eff380aaSAnirudh Venkataramanan tx_rings_rem -= tx_rings_per_v; 574eff380aaSAnirudh Venkataramanan 575eff380aaSAnirudh Venkataramanan /* Rx rings mapping to vector */ 576eff380aaSAnirudh Venkataramanan rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 577eff380aaSAnirudh Venkataramanan q_vector->num_ring_rx = rx_rings_per_v; 578eff380aaSAnirudh Venkataramanan q_vector->rx.ring = NULL; 579eff380aaSAnirudh Venkataramanan q_vector->rx.itr_idx = ICE_RX_ITR; 580eff380aaSAnirudh Venkataramanan q_base = vsi->num_rxq - rx_rings_rem; 581eff380aaSAnirudh Venkataramanan 582eff380aaSAnirudh Venkataramanan for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 583eff380aaSAnirudh Venkataramanan struct ice_ring *rx_ring = vsi->rx_rings[q_id]; 584eff380aaSAnirudh Venkataramanan 585eff380aaSAnirudh Venkataramanan rx_ring->q_vector = q_vector; 586eff380aaSAnirudh Venkataramanan rx_ring->next = q_vector->rx.ring; 587eff380aaSAnirudh Venkataramanan q_vector->rx.ring = rx_ring; 588eff380aaSAnirudh Venkataramanan } 589eff380aaSAnirudh Venkataramanan rx_rings_rem -= rx_rings_per_v; 590eff380aaSAnirudh Venkataramanan } 591eff380aaSAnirudh Venkataramanan } 592eff380aaSAnirudh Venkataramanan 593eff380aaSAnirudh Venkataramanan /** 594eff380aaSAnirudh Venkataramanan * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 595eff380aaSAnirudh Venkataramanan * @vsi: the VSI having memory freed 596eff380aaSAnirudh Venkataramanan */ 597eff380aaSAnirudh Venkataramanan void ice_vsi_free_q_vectors(struct ice_vsi *vsi) 598eff380aaSAnirudh Venkataramanan { 599eff380aaSAnirudh Venkataramanan int v_idx; 600eff380aaSAnirudh Venkataramanan 601eff380aaSAnirudh Venkataramanan ice_for_each_q_vector(vsi, v_idx) 602eff380aaSAnirudh Venkataramanan ice_free_q_vector(vsi, v_idx); 603eff380aaSAnirudh Venkataramanan } 604eff380aaSAnirudh Venkataramanan 605eff380aaSAnirudh Venkataramanan /** 606eff380aaSAnirudh Venkataramanan * ice_vsi_cfg_txq - Configure single Tx queue 607eff380aaSAnirudh Venkataramanan * @vsi: the VSI that queue belongs to 608eff380aaSAnirudh Venkataramanan * @ring: Tx ring to be configured 609eff380aaSAnirudh Venkataramanan * @qg_buf: queue group buffer 610eff380aaSAnirudh Venkataramanan */ 611eff380aaSAnirudh Venkataramanan int 612e75d1b2cSMaciej Fijalkowski ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, 613e75d1b2cSMaciej Fijalkowski struct ice_aqc_add_tx_qgrp *qg_buf) 614eff380aaSAnirudh Venkataramanan { 615eff380aaSAnirudh Venkataramanan struct ice_tlan_ctx tlan_ctx = { 0 }; 616eff380aaSAnirudh Venkataramanan struct ice_aqc_add_txqs_perq *txq; 617eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 618eff380aaSAnirudh Venkataramanan u8 buf_len = sizeof(*qg_buf); 619eff380aaSAnirudh Venkataramanan enum ice_status status; 620eff380aaSAnirudh Venkataramanan u16 pf_q; 621e75d1b2cSMaciej Fijalkowski u8 tc; 622eff380aaSAnirudh Venkataramanan 623eff380aaSAnirudh Venkataramanan pf_q = ring->reg_idx; 624eff380aaSAnirudh Venkataramanan ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); 625eff380aaSAnirudh Venkataramanan /* copy context contents into the qg_buf */ 626eff380aaSAnirudh Venkataramanan qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 627eff380aaSAnirudh Venkataramanan ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 628eff380aaSAnirudh Venkataramanan ice_tlan_ctx_info); 629eff380aaSAnirudh Venkataramanan 630eff380aaSAnirudh Venkataramanan /* init queue specific tail reg. It is referred as 631eff380aaSAnirudh Venkataramanan * transmit comm scheduler queue doorbell. 632eff380aaSAnirudh Venkataramanan */ 633eff380aaSAnirudh Venkataramanan ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 634eff380aaSAnirudh Venkataramanan 635e75d1b2cSMaciej Fijalkowski if (IS_ENABLED(CONFIG_DCB)) 636e75d1b2cSMaciej Fijalkowski tc = ring->dcb_tc; 637e75d1b2cSMaciej Fijalkowski else 638e75d1b2cSMaciej Fijalkowski tc = 0; 639e75d1b2cSMaciej Fijalkowski 640eff380aaSAnirudh Venkataramanan /* Add unique software queue handle of the Tx queue per 641eff380aaSAnirudh Venkataramanan * TC into the VSI Tx ring 642eff380aaSAnirudh Venkataramanan */ 643e75d1b2cSMaciej Fijalkowski ring->q_handle = ice_calc_q_handle(vsi, ring, tc); 644eff380aaSAnirudh Venkataramanan 645eff380aaSAnirudh Venkataramanan status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 646eff380aaSAnirudh Venkataramanan 1, qg_buf, buf_len, NULL); 647eff380aaSAnirudh Venkataramanan if (status) { 6484015d11eSBrett Creeley dev_err(ice_pf_to_dev(pf), 649eff380aaSAnirudh Venkataramanan "Failed to set LAN Tx queue context, error: %d\n", 650eff380aaSAnirudh Venkataramanan status); 651eff380aaSAnirudh Venkataramanan return -ENODEV; 652eff380aaSAnirudh Venkataramanan } 653eff380aaSAnirudh Venkataramanan 654eff380aaSAnirudh Venkataramanan /* Add Tx Queue TEID into the VSI Tx ring from the 655eff380aaSAnirudh Venkataramanan * response. This will complete configuring and 656eff380aaSAnirudh Venkataramanan * enabling the queue. 657eff380aaSAnirudh Venkataramanan */ 658eff380aaSAnirudh Venkataramanan txq = &qg_buf->txqs[0]; 659eff380aaSAnirudh Venkataramanan if (pf_q == le16_to_cpu(txq->txq_id)) 660eff380aaSAnirudh Venkataramanan ring->txq_teid = le32_to_cpu(txq->q_teid); 661eff380aaSAnirudh Venkataramanan 662eff380aaSAnirudh Venkataramanan return 0; 663eff380aaSAnirudh Venkataramanan } 664eff380aaSAnirudh Venkataramanan 665eff380aaSAnirudh Venkataramanan /** 666eff380aaSAnirudh Venkataramanan * ice_cfg_itr - configure the initial interrupt throttle values 667eff380aaSAnirudh Venkataramanan * @hw: pointer to the HW structure 668eff380aaSAnirudh Venkataramanan * @q_vector: interrupt vector that's being configured 669eff380aaSAnirudh Venkataramanan * 670eff380aaSAnirudh Venkataramanan * Configure interrupt throttling values for the ring containers that are 671eff380aaSAnirudh Venkataramanan * associated with the interrupt vector passed in. 672eff380aaSAnirudh Venkataramanan */ 673eff380aaSAnirudh Venkataramanan void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) 674eff380aaSAnirudh Venkataramanan { 675eff380aaSAnirudh Venkataramanan ice_cfg_itr_gran(hw); 676eff380aaSAnirudh Venkataramanan 677eff380aaSAnirudh Venkataramanan if (q_vector->num_ring_rx) { 678eff380aaSAnirudh Venkataramanan struct ice_ring_container *rc = &q_vector->rx; 679eff380aaSAnirudh Venkataramanan 680eff380aaSAnirudh Venkataramanan rc->target_itr = ITR_TO_REG(rc->itr_setting); 681eff380aaSAnirudh Venkataramanan rc->next_update = jiffies + 1; 682eff380aaSAnirudh Venkataramanan rc->current_itr = rc->target_itr; 683eff380aaSAnirudh Venkataramanan wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 684eff380aaSAnirudh Venkataramanan ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 685eff380aaSAnirudh Venkataramanan } 686eff380aaSAnirudh Venkataramanan 687eff380aaSAnirudh Venkataramanan if (q_vector->num_ring_tx) { 688eff380aaSAnirudh Venkataramanan struct ice_ring_container *rc = &q_vector->tx; 689eff380aaSAnirudh Venkataramanan 690eff380aaSAnirudh Venkataramanan rc->target_itr = ITR_TO_REG(rc->itr_setting); 691eff380aaSAnirudh Venkataramanan rc->next_update = jiffies + 1; 692eff380aaSAnirudh Venkataramanan rc->current_itr = rc->target_itr; 693eff380aaSAnirudh Venkataramanan wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 694eff380aaSAnirudh Venkataramanan ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 695eff380aaSAnirudh Venkataramanan } 696eff380aaSAnirudh Venkataramanan } 697eff380aaSAnirudh Venkataramanan 698eff380aaSAnirudh Venkataramanan /** 699eff380aaSAnirudh Venkataramanan * ice_cfg_txq_interrupt - configure interrupt on Tx queue 700eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 701eff380aaSAnirudh Venkataramanan * @txq: Tx queue being mapped to MSI-X vector 702eff380aaSAnirudh Venkataramanan * @msix_idx: MSI-X vector index within the function 703eff380aaSAnirudh Venkataramanan * @itr_idx: ITR index of the interrupt cause 704eff380aaSAnirudh Venkataramanan * 705eff380aaSAnirudh Venkataramanan * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector 706eff380aaSAnirudh Venkataramanan * within the function space. 707eff380aaSAnirudh Venkataramanan */ 708eff380aaSAnirudh Venkataramanan void 709eff380aaSAnirudh Venkataramanan ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 710eff380aaSAnirudh Venkataramanan { 711eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 712eff380aaSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 713eff380aaSAnirudh Venkataramanan u32 val; 714eff380aaSAnirudh Venkataramanan 715eff380aaSAnirudh Venkataramanan itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; 716eff380aaSAnirudh Venkataramanan 717eff380aaSAnirudh Venkataramanan val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 718eff380aaSAnirudh Venkataramanan ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); 719eff380aaSAnirudh Venkataramanan 720eff380aaSAnirudh Venkataramanan wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 721efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(vsi)) { 722efc2214bSMaciej Fijalkowski u32 xdp_txq = txq + vsi->num_xdp_txq; 723efc2214bSMaciej Fijalkowski 724efc2214bSMaciej Fijalkowski wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 725efc2214bSMaciej Fijalkowski val); 726efc2214bSMaciej Fijalkowski } 727efc2214bSMaciej Fijalkowski ice_flush(hw); 728eff380aaSAnirudh Venkataramanan } 729eff380aaSAnirudh Venkataramanan 730eff380aaSAnirudh Venkataramanan /** 731eff380aaSAnirudh Venkataramanan * ice_cfg_rxq_interrupt - configure interrupt on Rx queue 732eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 733eff380aaSAnirudh Venkataramanan * @rxq: Rx queue being mapped to MSI-X vector 734eff380aaSAnirudh Venkataramanan * @msix_idx: MSI-X vector index within the function 735eff380aaSAnirudh Venkataramanan * @itr_idx: ITR index of the interrupt cause 736eff380aaSAnirudh Venkataramanan * 737eff380aaSAnirudh Venkataramanan * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector 738eff380aaSAnirudh Venkataramanan * within the function space. 739eff380aaSAnirudh Venkataramanan */ 740eff380aaSAnirudh Venkataramanan void 741eff380aaSAnirudh Venkataramanan ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 742eff380aaSAnirudh Venkataramanan { 743eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 744eff380aaSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 745eff380aaSAnirudh Venkataramanan u32 val; 746eff380aaSAnirudh Venkataramanan 747eff380aaSAnirudh Venkataramanan itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; 748eff380aaSAnirudh Venkataramanan 749eff380aaSAnirudh Venkataramanan val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 750eff380aaSAnirudh Venkataramanan ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); 751eff380aaSAnirudh Venkataramanan 752eff380aaSAnirudh Venkataramanan wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 753eff380aaSAnirudh Venkataramanan 754eff380aaSAnirudh Venkataramanan ice_flush(hw); 755eff380aaSAnirudh Venkataramanan } 756eff380aaSAnirudh Venkataramanan 757eff380aaSAnirudh Venkataramanan /** 758eff380aaSAnirudh Venkataramanan * ice_trigger_sw_intr - trigger a software interrupt 759eff380aaSAnirudh Venkataramanan * @hw: pointer to the HW structure 760eff380aaSAnirudh Venkataramanan * @q_vector: interrupt vector to trigger the software interrupt for 761eff380aaSAnirudh Venkataramanan */ 762eff380aaSAnirudh Venkataramanan void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) 763eff380aaSAnirudh Venkataramanan { 764eff380aaSAnirudh Venkataramanan wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 765eff380aaSAnirudh Venkataramanan (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | 766eff380aaSAnirudh Venkataramanan GLINT_DYN_CTL_SWINT_TRIG_M | 767eff380aaSAnirudh Venkataramanan GLINT_DYN_CTL_INTENA_M); 768eff380aaSAnirudh Venkataramanan } 769eff380aaSAnirudh Venkataramanan 770eff380aaSAnirudh Venkataramanan /** 771eff380aaSAnirudh Venkataramanan * ice_vsi_stop_tx_ring - Disable single Tx ring 772eff380aaSAnirudh Venkataramanan * @vsi: the VSI being configured 773eff380aaSAnirudh Venkataramanan * @rst_src: reset source 774eff380aaSAnirudh Venkataramanan * @rel_vmvf_num: Relative ID of VF/VM 775eff380aaSAnirudh Venkataramanan * @ring: Tx ring to be stopped 776eff380aaSAnirudh Venkataramanan * @txq_meta: Meta data of Tx ring to be stopped 777eff380aaSAnirudh Venkataramanan */ 778eff380aaSAnirudh Venkataramanan int 779eff380aaSAnirudh Venkataramanan ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 780eff380aaSAnirudh Venkataramanan u16 rel_vmvf_num, struct ice_ring *ring, 781eff380aaSAnirudh Venkataramanan struct ice_txq_meta *txq_meta) 782eff380aaSAnirudh Venkataramanan { 783eff380aaSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 784eff380aaSAnirudh Venkataramanan struct ice_q_vector *q_vector; 785eff380aaSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 786eff380aaSAnirudh Venkataramanan enum ice_status status; 787eff380aaSAnirudh Venkataramanan u32 val; 788eff380aaSAnirudh Venkataramanan 789eff380aaSAnirudh Venkataramanan /* clear cause_ena bit for disabled queues */ 790eff380aaSAnirudh Venkataramanan val = rd32(hw, QINT_TQCTL(ring->reg_idx)); 791eff380aaSAnirudh Venkataramanan val &= ~QINT_TQCTL_CAUSE_ENA_M; 792eff380aaSAnirudh Venkataramanan wr32(hw, QINT_TQCTL(ring->reg_idx), val); 793eff380aaSAnirudh Venkataramanan 794eff380aaSAnirudh Venkataramanan /* software is expected to wait for 100 ns */ 795eff380aaSAnirudh Venkataramanan ndelay(100); 796eff380aaSAnirudh Venkataramanan 797eff380aaSAnirudh Venkataramanan /* trigger a software interrupt for the vector 798eff380aaSAnirudh Venkataramanan * associated to the queue to schedule NAPI handler 799eff380aaSAnirudh Venkataramanan */ 800eff380aaSAnirudh Venkataramanan q_vector = ring->q_vector; 801eff380aaSAnirudh Venkataramanan if (q_vector) 802eff380aaSAnirudh Venkataramanan ice_trigger_sw_intr(hw, q_vector); 803eff380aaSAnirudh Venkataramanan 804eff380aaSAnirudh Venkataramanan status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, 805eff380aaSAnirudh Venkataramanan txq_meta->tc, 1, &txq_meta->q_handle, 806eff380aaSAnirudh Venkataramanan &txq_meta->q_id, &txq_meta->q_teid, rst_src, 807eff380aaSAnirudh Venkataramanan rel_vmvf_num, NULL); 808eff380aaSAnirudh Venkataramanan 809eff380aaSAnirudh Venkataramanan /* if the disable queue command was exercised during an 810eff380aaSAnirudh Venkataramanan * active reset flow, ICE_ERR_RESET_ONGOING is returned. 811eff380aaSAnirudh Venkataramanan * This is not an error as the reset operation disables 812eff380aaSAnirudh Venkataramanan * queues at the hardware level anyway. 813eff380aaSAnirudh Venkataramanan */ 814eff380aaSAnirudh Venkataramanan if (status == ICE_ERR_RESET_ONGOING) { 815eff380aaSAnirudh Venkataramanan dev_dbg(&vsi->back->pdev->dev, 816eff380aaSAnirudh Venkataramanan "Reset in progress. LAN Tx queues already disabled\n"); 817eff380aaSAnirudh Venkataramanan } else if (status == ICE_ERR_DOES_NOT_EXIST) { 818eff380aaSAnirudh Venkataramanan dev_dbg(&vsi->back->pdev->dev, 819eff380aaSAnirudh Venkataramanan "LAN Tx queues do not exist, nothing to disable\n"); 820eff380aaSAnirudh Venkataramanan } else if (status) { 821eff380aaSAnirudh Venkataramanan dev_err(&vsi->back->pdev->dev, 822eff380aaSAnirudh Venkataramanan "Failed to disable LAN Tx queues, error: %d\n", status); 823eff380aaSAnirudh Venkataramanan return -ENODEV; 824eff380aaSAnirudh Venkataramanan } 825eff380aaSAnirudh Venkataramanan 826eff380aaSAnirudh Venkataramanan return 0; 827eff380aaSAnirudh Venkataramanan } 828eff380aaSAnirudh Venkataramanan 829eff380aaSAnirudh Venkataramanan /** 830eff380aaSAnirudh Venkataramanan * ice_fill_txq_meta - Prepare the Tx queue's meta data 831eff380aaSAnirudh Venkataramanan * @vsi: VSI that ring belongs to 832eff380aaSAnirudh Venkataramanan * @ring: ring that txq_meta will be based on 833eff380aaSAnirudh Venkataramanan * @txq_meta: a helper struct that wraps Tx queue's information 834eff380aaSAnirudh Venkataramanan * 835eff380aaSAnirudh Venkataramanan * Set up a helper struct that will contain all the necessary fields that 836eff380aaSAnirudh Venkataramanan * are needed for stopping Tx queue 837eff380aaSAnirudh Venkataramanan */ 838eff380aaSAnirudh Venkataramanan void 839eff380aaSAnirudh Venkataramanan ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, 840eff380aaSAnirudh Venkataramanan struct ice_txq_meta *txq_meta) 841eff380aaSAnirudh Venkataramanan { 842eff380aaSAnirudh Venkataramanan u8 tc; 843eff380aaSAnirudh Venkataramanan 844eff380aaSAnirudh Venkataramanan if (IS_ENABLED(CONFIG_DCB)) 845eff380aaSAnirudh Venkataramanan tc = ring->dcb_tc; 846eff380aaSAnirudh Venkataramanan else 847eff380aaSAnirudh Venkataramanan tc = 0; 848eff380aaSAnirudh Venkataramanan 849eff380aaSAnirudh Venkataramanan txq_meta->q_id = ring->reg_idx; 850eff380aaSAnirudh Venkataramanan txq_meta->q_teid = ring->txq_teid; 851eff380aaSAnirudh Venkataramanan txq_meta->q_handle = ring->q_handle; 852eff380aaSAnirudh Venkataramanan txq_meta->vsi_idx = vsi->idx; 853eff380aaSAnirudh Venkataramanan txq_meta->tc = tc; 854eff380aaSAnirudh Venkataramanan } 855