1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include <net/xdp_sock_drv.h> 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_dcb_lib.h" 8 9 static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring) 10 { 11 rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL); 12 return !!rx_ring->xdp_buf; 13 } 14 15 static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring) 16 { 17 rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 18 return !!rx_ring->rx_buf; 19 } 20 21 /** 22 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 23 * @qs_cfg: gathered variables needed for PF->VSI queues assignment 24 * 25 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 26 */ 27 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 28 { 29 unsigned int offset, i; 30 31 mutex_lock(qs_cfg->qs_mutex); 32 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 33 0, qs_cfg->q_count, 0); 34 if (offset >= qs_cfg->pf_map_size) { 35 mutex_unlock(qs_cfg->qs_mutex); 36 return -ENOMEM; 37 } 38 39 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 40 for (i = 0; i < qs_cfg->q_count; i++) 41 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); 42 mutex_unlock(qs_cfg->qs_mutex); 43 44 return 0; 45 } 46 47 /** 48 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 49 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 50 * 51 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 52 */ 53 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 54 { 55 unsigned int i, index = 0; 56 57 mutex_lock(qs_cfg->qs_mutex); 58 for (i = 0; i < qs_cfg->q_count; i++) { 59 index = find_next_zero_bit(qs_cfg->pf_map, 60 qs_cfg->pf_map_size, index); 61 if (index >= qs_cfg->pf_map_size) 62 goto err_scatter; 63 set_bit(index, qs_cfg->pf_map); 64 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; 65 } 66 mutex_unlock(qs_cfg->qs_mutex); 67 68 return 0; 69 err_scatter: 70 for (index = 0; index < i; index++) { 71 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 72 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 73 } 74 mutex_unlock(qs_cfg->qs_mutex); 75 76 return -ENOMEM; 77 } 78 79 /** 80 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 81 * @pf: the PF being configured 82 * @pf_q: the PF queue 83 * @ena: enable or disable state of the queue 84 * 85 * This routine will wait for the given Rx queue of the PF to reach the 86 * enabled or disabled state. 87 * Returns -ETIMEDOUT in case of failing to reach the requested state after 88 * multiple retries; else will return 0 in case of success. 89 */ 90 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 91 { 92 int i; 93 94 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 95 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 96 QRX_CTRL_QENA_STAT_M)) 97 return 0; 98 99 usleep_range(20, 40); 100 } 101 102 return -ETIMEDOUT; 103 } 104 105 /** 106 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 107 * @vsi: the VSI being configured 108 * @v_idx: index of the vector in the VSI struct 109 * 110 * We allocate one q_vector and set default value for ITR setting associated 111 * with this q_vector. If allocation fails we return -ENOMEM. 112 */ 113 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) 114 { 115 struct ice_pf *pf = vsi->back; 116 struct ice_q_vector *q_vector; 117 118 /* allocate q_vector */ 119 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), 120 GFP_KERNEL); 121 if (!q_vector) 122 return -ENOMEM; 123 124 q_vector->vsi = vsi; 125 q_vector->v_idx = v_idx; 126 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; 127 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; 128 q_vector->tx.itr_mode = ITR_DYNAMIC; 129 q_vector->rx.itr_mode = ITR_DYNAMIC; 130 q_vector->tx.type = ICE_TX_CONTAINER; 131 q_vector->rx.type = ICE_RX_CONTAINER; 132 133 if (vsi->type == ICE_VSI_VF) 134 goto out; 135 /* only set affinity_mask if the CPU is online */ 136 if (cpu_online(v_idx)) 137 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 138 139 /* This will not be called in the driver load path because the netdev 140 * will not be created yet. All other cases with register the NAPI 141 * handler here (i.e. resume, reset/rebuild, etc.) 142 */ 143 if (vsi->netdev) 144 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 145 NAPI_POLL_WEIGHT); 146 147 out: 148 /* tie q_vector and VSI together */ 149 vsi->q_vectors[v_idx] = q_vector; 150 151 return 0; 152 } 153 154 /** 155 * ice_free_q_vector - Free memory allocated for a specific interrupt vector 156 * @vsi: VSI having the memory freed 157 * @v_idx: index of the vector to be freed 158 */ 159 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 160 { 161 struct ice_q_vector *q_vector; 162 struct ice_pf *pf = vsi->back; 163 struct ice_tx_ring *tx_ring; 164 struct ice_rx_ring *rx_ring; 165 struct device *dev; 166 167 dev = ice_pf_to_dev(pf); 168 if (!vsi->q_vectors[v_idx]) { 169 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); 170 return; 171 } 172 q_vector = vsi->q_vectors[v_idx]; 173 174 ice_for_each_tx_ring(tx_ring, q_vector->tx) 175 tx_ring->q_vector = NULL; 176 ice_for_each_rx_ring(rx_ring, q_vector->rx) 177 rx_ring->q_vector = NULL; 178 179 /* only VSI with an associated netdev is set up with NAPI */ 180 if (vsi->netdev) 181 netif_napi_del(&q_vector->napi); 182 183 devm_kfree(dev, q_vector); 184 vsi->q_vectors[v_idx] = NULL; 185 } 186 187 /** 188 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 189 * @hw: board specific structure 190 */ 191 static void ice_cfg_itr_gran(struct ice_hw *hw) 192 { 193 u32 regval = rd32(hw, GLINT_CTL); 194 195 /* no need to update global register if ITR gran is already set */ 196 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 197 (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 198 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 199 (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 200 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 201 (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 202 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 203 (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 204 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 205 return; 206 207 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 208 GLINT_CTL_ITR_GRAN_200_M) | 209 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 210 GLINT_CTL_ITR_GRAN_100_M) | 211 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 212 GLINT_CTL_ITR_GRAN_50_M) | 213 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 214 GLINT_CTL_ITR_GRAN_25_M); 215 wr32(hw, GLINT_CTL, regval); 216 } 217 218 /** 219 * ice_calc_txq_handle - calculate the queue handle 220 * @vsi: VSI that ring belongs to 221 * @ring: ring to get the absolute queue index 222 * @tc: traffic class number 223 */ 224 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc) 225 { 226 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); 227 228 if (ring->ch) 229 return ring->q_index - ring->ch->base_q; 230 231 /* Idea here for calculation is that we subtract the number of queue 232 * count from TC that ring belongs to from it's absolute queue index 233 * and as a result we get the queue's index within TC. 234 */ 235 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; 236 } 237 238 /** 239 * ice_eswitch_calc_txq_handle 240 * @ring: pointer to ring which unique index is needed 241 * 242 * To correctly work with many netdevs ring->q_index of Tx rings on switchdev 243 * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it 244 * here by finding index in vsi->tx_rings of this ring. 245 * 246 * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, 247 * because VSI is get from ring->vsi, so it has to be present in this VSI. 248 */ 249 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) 250 { 251 struct ice_vsi *vsi = ring->vsi; 252 int i; 253 254 ice_for_each_txq(vsi, i) { 255 if (vsi->tx_rings[i] == ring) 256 return i; 257 } 258 259 return ICE_INVAL_Q_INDEX; 260 } 261 262 /** 263 * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring 264 * @ring: The Tx ring to configure 265 * 266 * This enables/disables XPS for a given Tx descriptor ring 267 * based on the TCs enabled for the VSI that ring belongs to. 268 */ 269 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) 270 { 271 if (!ring->q_vector || !ring->netdev) 272 return; 273 274 /* We only initialize XPS once, so as not to overwrite user settings */ 275 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) 276 return; 277 278 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, 279 ring->q_index); 280 } 281 282 /** 283 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 284 * @ring: The Tx ring to configure 285 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 286 * @pf_q: queue index in the PF space 287 * 288 * Configure the Tx descriptor ring in TLAN context. 289 */ 290 static void 291 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 292 { 293 struct ice_vsi *vsi = ring->vsi; 294 struct ice_hw *hw = &vsi->back->hw; 295 296 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 297 298 tlan_ctx->port_num = vsi->port_info->lport; 299 300 /* Transmit Queue Length */ 301 tlan_ctx->qlen = ring->count; 302 303 ice_set_cgd_num(tlan_ctx, ring->dcb_tc); 304 305 /* PF number */ 306 tlan_ctx->pf_num = hw->pf_id; 307 308 /* queue belongs to a specific VSI type 309 * VF / VM index should be programmed per vmvf_type setting: 310 * for vmvf_type = VF, it is VF number between 0-256 311 * for vmvf_type = VM, it is VM number between 0-767 312 * for PF or EMP this field should be set to zero 313 */ 314 switch (vsi->type) { 315 case ICE_VSI_LB: 316 case ICE_VSI_CTRL: 317 case ICE_VSI_PF: 318 if (ring->ch) 319 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; 320 else 321 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 322 break; 323 case ICE_VSI_VF: 324 /* Firmware expects vmvf_num to be absolute VF ID */ 325 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 326 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 327 break; 328 case ICE_VSI_SWITCHDEV_CTRL: 329 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; 330 break; 331 default: 332 return; 333 } 334 335 /* make sure the context is associated with the right VSI */ 336 if (ring->ch) 337 tlan_ctx->src_vsi = ring->ch->vsi_num; 338 else 339 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 340 341 /* Restrict Tx timestamps to the PF VSI */ 342 switch (vsi->type) { 343 case ICE_VSI_PF: 344 tlan_ctx->tsyn_ena = 1; 345 break; 346 default: 347 break; 348 } 349 350 tlan_ctx->tso_ena = ICE_TX_LEGACY; 351 tlan_ctx->tso_qnum = pf_q; 352 353 /* Legacy or Advanced Host Interface: 354 * 0: Advanced Host Interface 355 * 1: Legacy Host Interface 356 */ 357 tlan_ctx->legacy_int = ICE_TX_LEGACY; 358 } 359 360 /** 361 * ice_rx_offset - Return expected offset into page to access data 362 * @rx_ring: Ring we are requesting offset of 363 * 364 * Returns the offset value for ring into the data buffer. 365 */ 366 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) 367 { 368 if (ice_ring_uses_build_skb(rx_ring)) 369 return ICE_SKB_PAD; 370 else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 371 return XDP_PACKET_HEADROOM; 372 373 return 0; 374 } 375 376 /** 377 * ice_setup_rx_ctx - Configure a receive ring context 378 * @ring: The Rx ring to configure 379 * 380 * Configure the Rx descriptor ring in RLAN context. 381 */ 382 static int ice_setup_rx_ctx(struct ice_rx_ring *ring) 383 { 384 int chain_len = ICE_MAX_CHAINED_RX_BUFS; 385 struct ice_vsi *vsi = ring->vsi; 386 u32 rxdid = ICE_RXDID_FLEX_NIC; 387 struct ice_rlan_ctx rlan_ctx; 388 struct ice_hw *hw; 389 u16 pf_q; 390 int err; 391 392 hw = &vsi->back->hw; 393 394 /* what is Rx queue number in global space of 2K Rx queues */ 395 pf_q = vsi->rxq_map[ring->q_index]; 396 397 /* clear the context structure first */ 398 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 399 400 /* Receive Queue Base Address. 401 * Indicates the starting address of the descriptor queue defined in 402 * 128 Byte units. 403 */ 404 rlan_ctx.base = ring->dma >> 7; 405 406 rlan_ctx.qlen = ring->count; 407 408 /* Receive Packet Data Buffer Size. 409 * The Packet Data Buffer Size is defined in 128 byte units. 410 */ 411 rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 412 413 /* use 32 byte descriptors */ 414 rlan_ctx.dsize = 1; 415 416 /* Strip the Ethernet CRC bytes before the packet is posted to host 417 * memory. 418 */ 419 rlan_ctx.crcstrip = 1; 420 421 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 422 rlan_ctx.l2tsel = 1; 423 424 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 425 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 426 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 427 428 /* This controls whether VLAN is stripped from inner headers 429 * The VLAN in the inner L2 header is stripped to the receive 430 * descriptor if enabled by this flag. 431 */ 432 rlan_ctx.showiv = 0; 433 434 /* For AF_XDP ZC, we disallow packets to span on 435 * multiple buffers, thus letting us skip that 436 * handling in the fast-path. 437 */ 438 if (ring->xsk_pool) 439 chain_len = 1; 440 /* Max packet size for this queue - must not be set to a larger value 441 * than 5 x DBUF 442 */ 443 rlan_ctx.rxmax = min_t(u32, vsi->max_frame, 444 chain_len * ring->rx_buf_len); 445 446 /* Rx queue threshold in units of 64 */ 447 rlan_ctx.lrxqthresh = 1; 448 449 /* Enable Flexible Descriptors in the queue context which 450 * allows this driver to select a specific receive descriptor format 451 * increasing context priority to pick up profile ID; default is 0x01; 452 * setting to 0x03 to ensure profile is programming if prev context is 453 * of same priority 454 */ 455 if (vsi->type != ICE_VSI_VF) 456 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); 457 else 458 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, 459 false); 460 461 /* Absolute queue number out of 2K needs to be passed */ 462 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 463 if (err) { 464 dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 465 pf_q, err); 466 return -EIO; 467 } 468 469 if (vsi->type == ICE_VSI_VF) 470 return 0; 471 472 /* configure Rx buffer alignment */ 473 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 474 ice_clear_ring_build_skb_ena(ring); 475 else 476 ice_set_ring_build_skb_ena(ring); 477 478 ring->rx_offset = ice_rx_offset(ring); 479 480 /* init queue specific tail register */ 481 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 482 writel(0, ring->tail); 483 484 return 0; 485 } 486 487 /** 488 * ice_vsi_cfg_rxq - Configure an Rx queue 489 * @ring: the ring being configured 490 * 491 * Return 0 on success and a negative value on error. 492 */ 493 int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) 494 { 495 struct device *dev = ice_pf_to_dev(ring->vsi->back); 496 u16 num_bufs = ICE_DESC_UNUSED(ring); 497 int err; 498 499 ring->rx_buf_len = ring->vsi->rx_buf_len; 500 501 if (ring->vsi->type == ICE_VSI_PF) { 502 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 503 /* coverity[check_return] */ 504 xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 505 ring->q_index, ring->q_vector->napi.napi_id); 506 507 kfree(ring->rx_buf); 508 ring->xsk_pool = ice_xsk_pool(ring); 509 if (ring->xsk_pool) { 510 if (!ice_alloc_rx_buf_zc(ring)) 511 return -ENOMEM; 512 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 513 514 ring->rx_buf_len = 515 xsk_pool_get_rx_frame_size(ring->xsk_pool); 516 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 517 MEM_TYPE_XSK_BUFF_POOL, 518 NULL); 519 if (err) 520 return err; 521 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 522 523 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", 524 ring->q_index); 525 } else { 526 if (!ice_alloc_rx_buf(ring)) 527 return -ENOMEM; 528 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 529 /* coverity[check_return] */ 530 xdp_rxq_info_reg(&ring->xdp_rxq, 531 ring->netdev, 532 ring->q_index, ring->q_vector->napi.napi_id); 533 534 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 535 MEM_TYPE_PAGE_SHARED, 536 NULL); 537 if (err) 538 return err; 539 } 540 } 541 542 err = ice_setup_rx_ctx(ring); 543 if (err) { 544 dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", 545 ring->q_index, err); 546 return err; 547 } 548 549 if (ring->xsk_pool) { 550 bool ok; 551 552 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { 553 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", 554 num_bufs, ring->q_index); 555 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); 556 557 return 0; 558 } 559 560 ok = ice_alloc_rx_bufs_zc(ring, num_bufs); 561 if (!ok) { 562 u16 pf_q = ring->vsi->rxq_map[ring->q_index]; 563 564 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", 565 ring->q_index, pf_q); 566 } 567 568 return 0; 569 } 570 571 ice_alloc_rx_bufs(ring, num_bufs); 572 573 return 0; 574 } 575 576 /** 577 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 578 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 579 * 580 * This function first tries to find contiguous space. If it is not successful, 581 * it tries with the scatter approach. 582 * 583 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 584 */ 585 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 586 { 587 int ret = 0; 588 589 ret = __ice_vsi_get_qs_contig(qs_cfg); 590 if (ret) { 591 /* contig failed, so try with scatter approach */ 592 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 593 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, 594 qs_cfg->scatter_count); 595 ret = __ice_vsi_get_qs_sc(qs_cfg); 596 } 597 return ret; 598 } 599 600 /** 601 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait 602 * @vsi: the VSI being configured 603 * @ena: start or stop the Rx ring 604 * @rxq_idx: 0-based Rx queue index for the VSI passed in 605 * @wait: wait or don't wait for configuration to finish in hardware 606 * 607 * Return 0 on success and negative on error. 608 */ 609 int 610 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait) 611 { 612 int pf_q = vsi->rxq_map[rxq_idx]; 613 struct ice_pf *pf = vsi->back; 614 struct ice_hw *hw = &pf->hw; 615 u32 rx_reg; 616 617 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 618 619 /* Skip if the queue is already in the requested state */ 620 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 621 return 0; 622 623 /* turn on/off the queue */ 624 if (ena) 625 rx_reg |= QRX_CTRL_QENA_REQ_M; 626 else 627 rx_reg &= ~QRX_CTRL_QENA_REQ_M; 628 wr32(hw, QRX_CTRL(pf_q), rx_reg); 629 630 if (!wait) 631 return 0; 632 633 ice_flush(hw); 634 return ice_pf_rxq_wait(pf, pf_q, ena); 635 } 636 637 /** 638 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started 639 * @vsi: the VSI being configured 640 * @ena: true/false to verify Rx ring has been enabled/disabled respectively 641 * @rxq_idx: 0-based Rx queue index for the VSI passed in 642 * 643 * This routine will wait for the given Rx queue of the VSI to reach the 644 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach 645 * the requested state after multiple retries; else will return 0 in case of 646 * success. 647 */ 648 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) 649 { 650 int pf_q = vsi->rxq_map[rxq_idx]; 651 struct ice_pf *pf = vsi->back; 652 653 return ice_pf_rxq_wait(pf, pf_q, ena); 654 } 655 656 /** 657 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 658 * @vsi: the VSI being configured 659 * 660 * We allocate one q_vector per queue interrupt. If allocation fails we 661 * return -ENOMEM. 662 */ 663 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 664 { 665 struct device *dev = ice_pf_to_dev(vsi->back); 666 u16 v_idx; 667 int err; 668 669 if (vsi->q_vectors[0]) { 670 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); 671 return -EEXIST; 672 } 673 674 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { 675 err = ice_vsi_alloc_q_vector(vsi, v_idx); 676 if (err) 677 goto err_out; 678 } 679 680 return 0; 681 682 err_out: 683 while (v_idx--) 684 ice_free_q_vector(vsi, v_idx); 685 686 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 687 vsi->num_q_vectors, vsi->vsi_num, err); 688 vsi->num_q_vectors = 0; 689 return err; 690 } 691 692 /** 693 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 694 * @vsi: the VSI being configured 695 * 696 * This function maps descriptor rings to the queue-specific vectors allotted 697 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 698 * and Rx rings to the vector as "efficiently" as possible. 699 */ 700 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 701 { 702 int q_vectors = vsi->num_q_vectors; 703 u16 tx_rings_rem, rx_rings_rem; 704 int v_id; 705 706 /* initially assigning remaining rings count to VSIs num queue value */ 707 tx_rings_rem = vsi->num_txq; 708 rx_rings_rem = vsi->num_rxq; 709 710 for (v_id = 0; v_id < q_vectors; v_id++) { 711 struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 712 u8 tx_rings_per_v, rx_rings_per_v; 713 u16 q_id, q_base; 714 715 /* Tx rings mapping to vector */ 716 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, 717 q_vectors - v_id); 718 q_vector->num_ring_tx = tx_rings_per_v; 719 q_vector->tx.tx_ring = NULL; 720 q_vector->tx.itr_idx = ICE_TX_ITR; 721 q_base = vsi->num_txq - tx_rings_rem; 722 723 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 724 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; 725 726 tx_ring->q_vector = q_vector; 727 tx_ring->next = q_vector->tx.tx_ring; 728 q_vector->tx.tx_ring = tx_ring; 729 } 730 tx_rings_rem -= tx_rings_per_v; 731 732 /* Rx rings mapping to vector */ 733 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, 734 q_vectors - v_id); 735 q_vector->num_ring_rx = rx_rings_per_v; 736 q_vector->rx.rx_ring = NULL; 737 q_vector->rx.itr_idx = ICE_RX_ITR; 738 q_base = vsi->num_rxq - rx_rings_rem; 739 740 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 741 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; 742 743 rx_ring->q_vector = q_vector; 744 rx_ring->next = q_vector->rx.rx_ring; 745 q_vector->rx.rx_ring = rx_ring; 746 } 747 rx_rings_rem -= rx_rings_per_v; 748 } 749 } 750 751 /** 752 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 753 * @vsi: the VSI having memory freed 754 */ 755 void ice_vsi_free_q_vectors(struct ice_vsi *vsi) 756 { 757 int v_idx; 758 759 ice_for_each_q_vector(vsi, v_idx) 760 ice_free_q_vector(vsi, v_idx); 761 } 762 763 /** 764 * ice_vsi_cfg_txq - Configure single Tx queue 765 * @vsi: the VSI that queue belongs to 766 * @ring: Tx ring to be configured 767 * @qg_buf: queue group buffer 768 */ 769 int 770 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, 771 struct ice_aqc_add_tx_qgrp *qg_buf) 772 { 773 u8 buf_len = struct_size(qg_buf, txqs, 1); 774 struct ice_tlan_ctx tlan_ctx = { 0 }; 775 struct ice_aqc_add_txqs_perq *txq; 776 struct ice_channel *ch = ring->ch; 777 struct ice_pf *pf = vsi->back; 778 struct ice_hw *hw = &pf->hw; 779 int status; 780 u16 pf_q; 781 u8 tc; 782 783 /* Configure XPS */ 784 ice_cfg_xps_tx_ring(ring); 785 786 pf_q = ring->reg_idx; 787 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); 788 /* copy context contents into the qg_buf */ 789 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 790 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 791 ice_tlan_ctx_info); 792 793 /* init queue specific tail reg. It is referred as 794 * transmit comm scheduler queue doorbell. 795 */ 796 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); 797 798 if (IS_ENABLED(CONFIG_DCB)) 799 tc = ring->dcb_tc; 800 else 801 tc = 0; 802 803 /* Add unique software queue handle of the Tx queue per 804 * TC into the VSI Tx ring 805 */ 806 if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 807 ring->q_handle = ice_eswitch_calc_txq_handle(ring); 808 809 if (ring->q_handle == ICE_INVAL_Q_INDEX) 810 return -ENODEV; 811 } else { 812 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); 813 } 814 815 if (ch) 816 status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, 817 ring->q_handle, 1, qg_buf, buf_len, 818 NULL); 819 else 820 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 821 ring->q_handle, 1, qg_buf, buf_len, 822 NULL); 823 if (status) { 824 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", 825 status); 826 return status; 827 } 828 829 /* Add Tx Queue TEID into the VSI Tx ring from the 830 * response. This will complete configuring and 831 * enabling the queue. 832 */ 833 txq = &qg_buf->txqs[0]; 834 if (pf_q == le16_to_cpu(txq->txq_id)) 835 ring->txq_teid = le32_to_cpu(txq->q_teid); 836 837 return 0; 838 } 839 840 /** 841 * ice_cfg_itr - configure the initial interrupt throttle values 842 * @hw: pointer to the HW structure 843 * @q_vector: interrupt vector that's being configured 844 * 845 * Configure interrupt throttling values for the ring containers that are 846 * associated with the interrupt vector passed in. 847 */ 848 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) 849 { 850 ice_cfg_itr_gran(hw); 851 852 if (q_vector->num_ring_rx) 853 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); 854 855 if (q_vector->num_ring_tx) 856 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); 857 858 ice_write_intrl(q_vector, q_vector->intrl); 859 } 860 861 /** 862 * ice_cfg_txq_interrupt - configure interrupt on Tx queue 863 * @vsi: the VSI being configured 864 * @txq: Tx queue being mapped to MSI-X vector 865 * @msix_idx: MSI-X vector index within the function 866 * @itr_idx: ITR index of the interrupt cause 867 * 868 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector 869 * within the function space. 870 */ 871 void 872 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 873 { 874 struct ice_pf *pf = vsi->back; 875 struct ice_hw *hw = &pf->hw; 876 u32 val; 877 878 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; 879 880 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 881 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); 882 883 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 884 if (ice_is_xdp_ena_vsi(vsi)) { 885 u32 xdp_txq = txq + vsi->num_xdp_txq; 886 887 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 888 val); 889 } 890 ice_flush(hw); 891 } 892 893 /** 894 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue 895 * @vsi: the VSI being configured 896 * @rxq: Rx queue being mapped to MSI-X vector 897 * @msix_idx: MSI-X vector index within the function 898 * @itr_idx: ITR index of the interrupt cause 899 * 900 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector 901 * within the function space. 902 */ 903 void 904 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 905 { 906 struct ice_pf *pf = vsi->back; 907 struct ice_hw *hw = &pf->hw; 908 u32 val; 909 910 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; 911 912 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 913 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); 914 915 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 916 917 ice_flush(hw); 918 } 919 920 /** 921 * ice_trigger_sw_intr - trigger a software interrupt 922 * @hw: pointer to the HW structure 923 * @q_vector: interrupt vector to trigger the software interrupt for 924 */ 925 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) 926 { 927 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 928 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | 929 GLINT_DYN_CTL_SWINT_TRIG_M | 930 GLINT_DYN_CTL_INTENA_M); 931 } 932 933 /** 934 * ice_vsi_stop_tx_ring - Disable single Tx ring 935 * @vsi: the VSI being configured 936 * @rst_src: reset source 937 * @rel_vmvf_num: Relative ID of VF/VM 938 * @ring: Tx ring to be stopped 939 * @txq_meta: Meta data of Tx ring to be stopped 940 */ 941 int 942 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 943 u16 rel_vmvf_num, struct ice_tx_ring *ring, 944 struct ice_txq_meta *txq_meta) 945 { 946 struct ice_pf *pf = vsi->back; 947 struct ice_q_vector *q_vector; 948 struct ice_hw *hw = &pf->hw; 949 int status; 950 u32 val; 951 952 /* clear cause_ena bit for disabled queues */ 953 val = rd32(hw, QINT_TQCTL(ring->reg_idx)); 954 val &= ~QINT_TQCTL_CAUSE_ENA_M; 955 wr32(hw, QINT_TQCTL(ring->reg_idx), val); 956 957 /* software is expected to wait for 100 ns */ 958 ndelay(100); 959 960 /* trigger a software interrupt for the vector 961 * associated to the queue to schedule NAPI handler 962 */ 963 q_vector = ring->q_vector; 964 if (q_vector) 965 ice_trigger_sw_intr(hw, q_vector); 966 967 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, 968 txq_meta->tc, 1, &txq_meta->q_handle, 969 &txq_meta->q_id, &txq_meta->q_teid, rst_src, 970 rel_vmvf_num, NULL); 971 972 /* if the disable queue command was exercised during an 973 * active reset flow, -EBUSY is returned. 974 * This is not an error as the reset operation disables 975 * queues at the hardware level anyway. 976 */ 977 if (status == -EBUSY) { 978 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); 979 } else if (status == -ENOENT) { 980 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); 981 } else if (status) { 982 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", 983 status); 984 return status; 985 } 986 987 return 0; 988 } 989 990 /** 991 * ice_fill_txq_meta - Prepare the Tx queue's meta data 992 * @vsi: VSI that ring belongs to 993 * @ring: ring that txq_meta will be based on 994 * @txq_meta: a helper struct that wraps Tx queue's information 995 * 996 * Set up a helper struct that will contain all the necessary fields that 997 * are needed for stopping Tx queue 998 */ 999 void 1000 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, 1001 struct ice_txq_meta *txq_meta) 1002 { 1003 struct ice_channel *ch = ring->ch; 1004 u8 tc; 1005 1006 if (IS_ENABLED(CONFIG_DCB)) 1007 tc = ring->dcb_tc; 1008 else 1009 tc = 0; 1010 1011 txq_meta->q_id = ring->reg_idx; 1012 txq_meta->q_teid = ring->txq_teid; 1013 txq_meta->q_handle = ring->q_handle; 1014 if (ch) { 1015 txq_meta->vsi_idx = ch->ch_vsi->idx; 1016 txq_meta->tc = 0; 1017 } else { 1018 txq_meta->vsi_idx = vsi->idx; 1019 txq_meta->tc = tc; 1020 } 1021 } 1022