1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_dcb_lib.h" 7 8 /** 9 * ice_setup_rx_ctx - Configure a receive ring context 10 * @ring: The Rx ring to configure 11 * 12 * Configure the Rx descriptor ring in RLAN context. 13 */ 14 static int ice_setup_rx_ctx(struct ice_ring *ring) 15 { 16 struct ice_vsi *vsi = ring->vsi; 17 struct ice_hw *hw = &vsi->back->hw; 18 u32 rxdid = ICE_RXDID_FLEX_NIC; 19 struct ice_rlan_ctx rlan_ctx; 20 u32 regval; 21 u16 pf_q; 22 int err; 23 24 /* what is Rx queue number in global space of 2K Rx queues */ 25 pf_q = vsi->rxq_map[ring->q_index]; 26 27 /* clear the context structure first */ 28 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 29 30 rlan_ctx.base = ring->dma >> 7; 31 32 rlan_ctx.qlen = ring->count; 33 34 /* Receive Packet Data Buffer Size. 35 * The Packet Data Buffer Size is defined in 128 byte units. 36 */ 37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 38 39 /* use 32 byte descriptors */ 40 rlan_ctx.dsize = 1; 41 42 /* Strip the Ethernet CRC bytes before the packet is posted to host 43 * memory. 44 */ 45 rlan_ctx.crcstrip = 1; 46 47 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 48 rlan_ctx.l2tsel = 1; 49 50 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 51 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 52 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 53 54 /* This controls whether VLAN is stripped from inner headers 55 * The VLAN in the inner L2 header is stripped to the receive 56 * descriptor if enabled by this flag. 57 */ 58 rlan_ctx.showiv = 0; 59 60 /* Max packet size for this queue - must not be set to a larger value 61 * than 5 x DBUF 62 */ 63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame, 64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); 65 66 /* Rx queue threshold in units of 64 */ 67 rlan_ctx.lrxqthresh = 1; 68 69 /* Enable Flexible Descriptors in the queue context which 70 * allows this driver to select a specific receive descriptor format 71 */ 72 if (vsi->type != ICE_VSI_VF) { 73 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 74 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 75 QRXFLXP_CNTXT_RXDID_IDX_M; 76 77 /* increasing context priority to pick up profile ID; 78 * default is 0x01; setting to 0x03 to ensure profile 79 * is programming if prev context is of same priority 80 */ 81 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 82 QRXFLXP_CNTXT_RXDID_PRIO_M; 83 84 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 85 } 86 87 /* Absolute queue number out of 2K needs to be passed */ 88 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 89 if (err) { 90 dev_err(&vsi->back->pdev->dev, 91 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 92 pf_q, err); 93 return -EIO; 94 } 95 96 if (vsi->type == ICE_VSI_VF) 97 return 0; 98 99 /* init queue specific tail register */ 100 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 101 writel(0, ring->tail); 102 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); 103 104 return 0; 105 } 106 107 /** 108 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 109 * @ring: The Tx ring to configure 110 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 111 * @pf_q: queue index in the PF space 112 * 113 * Configure the Tx descriptor ring in TLAN context. 114 */ 115 static void 116 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 117 { 118 struct ice_vsi *vsi = ring->vsi; 119 struct ice_hw *hw = &vsi->back->hw; 120 121 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 122 123 tlan_ctx->port_num = vsi->port_info->lport; 124 125 /* Transmit Queue Length */ 126 tlan_ctx->qlen = ring->count; 127 128 ice_set_cgd_num(tlan_ctx, ring); 129 130 /* PF number */ 131 tlan_ctx->pf_num = hw->pf_id; 132 133 /* queue belongs to a specific VSI type 134 * VF / VM index should be programmed per vmvf_type setting: 135 * for vmvf_type = VF, it is VF number between 0-256 136 * for vmvf_type = VM, it is VM number between 0-767 137 * for PF or EMP this field should be set to zero 138 */ 139 switch (vsi->type) { 140 case ICE_VSI_LB: 141 /* fall through */ 142 case ICE_VSI_PF: 143 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 144 break; 145 case ICE_VSI_VF: 146 /* Firmware expects vmvf_num to be absolute VF ID */ 147 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 148 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 149 break; 150 default: 151 return; 152 } 153 154 /* make sure the context is associated with the right VSI */ 155 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 156 157 tlan_ctx->tso_ena = ICE_TX_LEGACY; 158 tlan_ctx->tso_qnum = pf_q; 159 160 /* Legacy or Advanced Host Interface: 161 * 0: Advanced Host Interface 162 * 1: Legacy Host Interface 163 */ 164 tlan_ctx->legacy_int = ICE_TX_LEGACY; 165 } 166 167 /** 168 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 169 * @pf: the PF being configured 170 * @pf_q: the PF queue 171 * @ena: enable or disable state of the queue 172 * 173 * This routine will wait for the given Rx queue of the PF to reach the 174 * enabled or disabled state. 175 * Returns -ETIMEDOUT in case of failing to reach the requested state after 176 * multiple retries; else will return 0 in case of success. 177 */ 178 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 179 { 180 int i; 181 182 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 183 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 184 QRX_CTRL_QENA_STAT_M)) 185 return 0; 186 187 usleep_range(20, 40); 188 } 189 190 return -ETIMEDOUT; 191 } 192 193 /** 194 * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring 195 * @vsi: the VSI being configured 196 * @ena: start or stop the Rx rings 197 * @rxq_idx: Rx queue index 198 */ 199 #ifndef CONFIG_PCI_IOV 200 static 201 #endif /* !CONFIG_PCI_IOV */ 202 int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) 203 { 204 int pf_q = vsi->rxq_map[rxq_idx]; 205 struct ice_pf *pf = vsi->back; 206 struct ice_hw *hw = &pf->hw; 207 int ret = 0; 208 u32 rx_reg; 209 210 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 211 212 /* Skip if the queue is already in the requested state */ 213 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 214 return 0; 215 216 /* turn on/off the queue */ 217 if (ena) 218 rx_reg |= QRX_CTRL_QENA_REQ_M; 219 else 220 rx_reg &= ~QRX_CTRL_QENA_REQ_M; 221 wr32(hw, QRX_CTRL(pf_q), rx_reg); 222 223 /* wait for the change to finish */ 224 ret = ice_pf_rxq_wait(pf, pf_q, ena); 225 if (ret) 226 dev_err(&pf->pdev->dev, 227 "VSI idx %d Rx ring %d %sable timeout\n", 228 vsi->idx, pf_q, (ena ? "en" : "dis")); 229 230 return ret; 231 } 232 233 /** 234 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings 235 * @vsi: the VSI being configured 236 * @ena: start or stop the Rx rings 237 */ 238 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) 239 { 240 int i, ret = 0; 241 242 for (i = 0; i < vsi->num_rxq; i++) { 243 ret = ice_vsi_ctrl_rx_ring(vsi, ena, i); 244 if (ret) 245 break; 246 } 247 248 return ret; 249 } 250 251 /** 252 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 253 * @vsi: VSI pointer 254 * 255 * On error: returns error code (negative) 256 * On success: returns 0 257 */ 258 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 259 { 260 struct ice_pf *pf = vsi->back; 261 262 /* allocate memory for both Tx and Rx ring pointers */ 263 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 264 sizeof(*vsi->tx_rings), GFP_KERNEL); 265 if (!vsi->tx_rings) 266 return -ENOMEM; 267 268 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 269 sizeof(*vsi->rx_rings), GFP_KERNEL); 270 if (!vsi->rx_rings) 271 goto err_rings; 272 273 vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 274 sizeof(*vsi->txq_map), GFP_KERNEL); 275 276 if (!vsi->txq_map) 277 goto err_txq_map; 278 279 vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 280 sizeof(*vsi->rxq_map), GFP_KERNEL); 281 if (!vsi->rxq_map) 282 goto err_rxq_map; 283 284 285 /* There is no need to allocate q_vectors for a loopback VSI. */ 286 if (vsi->type == ICE_VSI_LB) 287 return 0; 288 289 /* allocate memory for q_vector pointers */ 290 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, 291 sizeof(*vsi->q_vectors), GFP_KERNEL); 292 if (!vsi->q_vectors) 293 goto err_vectors; 294 295 return 0; 296 297 err_vectors: 298 devm_kfree(&pf->pdev->dev, vsi->rxq_map); 299 err_rxq_map: 300 devm_kfree(&pf->pdev->dev, vsi->txq_map); 301 err_txq_map: 302 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 303 err_rings: 304 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 305 return -ENOMEM; 306 } 307 308 /** 309 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 310 * @vsi: the VSI being configured 311 */ 312 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 313 { 314 switch (vsi->type) { 315 case ICE_VSI_PF: 316 /* fall through */ 317 case ICE_VSI_LB: 318 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 319 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 320 break; 321 default: 322 dev_dbg(&vsi->back->pdev->dev, 323 "Not setting number of Tx/Rx descriptors for VSI type %d\n", 324 vsi->type); 325 break; 326 } 327 } 328 329 /** 330 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 331 * @vsi: the VSI being configured 332 * @vf_id: ID of the VF being configured 333 * 334 * Return 0 on success and a negative value on error 335 */ 336 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) 337 { 338 struct ice_pf *pf = vsi->back; 339 struct ice_vf *vf = NULL; 340 341 if (vsi->type == ICE_VSI_VF) 342 vsi->vf_id = vf_id; 343 344 switch (vsi->type) { 345 case ICE_VSI_PF: 346 vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf), 347 num_online_cpus()); 348 349 pf->num_lan_tx = vsi->alloc_txq; 350 351 /* only 1 Rx queue unless RSS is enabled */ 352 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 353 vsi->alloc_rxq = 1; 354 else 355 vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf), 356 num_online_cpus()); 357 358 pf->num_lan_rx = vsi->alloc_rxq; 359 360 vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); 361 break; 362 case ICE_VSI_VF: 363 vf = &pf->vf[vsi->vf_id]; 364 vsi->alloc_txq = vf->num_vf_qs; 365 vsi->alloc_rxq = vf->num_vf_qs; 366 /* pf->num_vf_msix includes (VF miscellaneous vector + 367 * data queue interrupts). Since vsi->num_q_vectors is number 368 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 369 * original vector count 370 */ 371 vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF; 372 break; 373 case ICE_VSI_LB: 374 vsi->alloc_txq = 1; 375 vsi->alloc_rxq = 1; 376 break; 377 default: 378 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 379 break; 380 } 381 382 ice_vsi_set_num_desc(vsi); 383 } 384 385 /** 386 * ice_get_free_slot - get the next non-NULL location index in array 387 * @array: array to search 388 * @size: size of the array 389 * @curr: last known occupied index to be used as a search hint 390 * 391 * void * is being used to keep the functionality generic. This lets us use this 392 * function on any array of pointers. 393 */ 394 static int ice_get_free_slot(void *array, int size, int curr) 395 { 396 int **tmp_array = (int **)array; 397 int next; 398 399 if (curr < (size - 1) && !tmp_array[curr + 1]) { 400 next = curr + 1; 401 } else { 402 int i = 0; 403 404 while ((i < size) && (tmp_array[i])) 405 i++; 406 if (i == size) 407 next = ICE_NO_VSI; 408 else 409 next = i; 410 } 411 return next; 412 } 413 414 /** 415 * ice_vsi_delete - delete a VSI from the switch 416 * @vsi: pointer to VSI being removed 417 */ 418 void ice_vsi_delete(struct ice_vsi *vsi) 419 { 420 struct ice_pf *pf = vsi->back; 421 struct ice_vsi_ctx *ctxt; 422 enum ice_status status; 423 424 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 425 if (!ctxt) 426 return; 427 428 if (vsi->type == ICE_VSI_VF) 429 ctxt->vf_num = vsi->vf_id; 430 ctxt->vsi_num = vsi->vsi_num; 431 432 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 433 434 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 435 if (status) 436 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", 437 vsi->vsi_num); 438 439 devm_kfree(&pf->pdev->dev, ctxt); 440 } 441 442 /** 443 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 444 * @vsi: pointer to VSI being cleared 445 */ 446 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 447 { 448 struct ice_pf *pf = vsi->back; 449 450 /* free the ring and vector containers */ 451 if (vsi->q_vectors) { 452 devm_kfree(&pf->pdev->dev, vsi->q_vectors); 453 vsi->q_vectors = NULL; 454 } 455 if (vsi->tx_rings) { 456 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 457 vsi->tx_rings = NULL; 458 } 459 if (vsi->rx_rings) { 460 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 461 vsi->rx_rings = NULL; 462 } 463 if (vsi->txq_map) { 464 devm_kfree(&pf->pdev->dev, vsi->txq_map); 465 vsi->txq_map = NULL; 466 } 467 if (vsi->rxq_map) { 468 devm_kfree(&pf->pdev->dev, vsi->rxq_map); 469 vsi->rxq_map = NULL; 470 } 471 } 472 473 /** 474 * ice_vsi_clear - clean up and deallocate the provided VSI 475 * @vsi: pointer to VSI being cleared 476 * 477 * This deallocates the VSI's queue resources, removes it from the PF's 478 * VSI array if necessary, and deallocates the VSI 479 * 480 * Returns 0 on success, negative on failure 481 */ 482 int ice_vsi_clear(struct ice_vsi *vsi) 483 { 484 struct ice_pf *pf = NULL; 485 486 if (!vsi) 487 return 0; 488 489 if (!vsi->back) 490 return -EINVAL; 491 492 pf = vsi->back; 493 494 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 495 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", 496 vsi->idx); 497 return -EINVAL; 498 } 499 500 mutex_lock(&pf->sw_mutex); 501 /* updates the PF for this cleared VSI */ 502 503 pf->vsi[vsi->idx] = NULL; 504 if (vsi->idx < pf->next_vsi) 505 pf->next_vsi = vsi->idx; 506 507 ice_vsi_free_arrays(vsi); 508 mutex_unlock(&pf->sw_mutex); 509 devm_kfree(&pf->pdev->dev, vsi); 510 511 return 0; 512 } 513 514 /** 515 * ice_msix_clean_rings - MSIX mode Interrupt Handler 516 * @irq: interrupt number 517 * @data: pointer to a q_vector 518 */ 519 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 520 { 521 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 522 523 if (!q_vector->tx.ring && !q_vector->rx.ring) 524 return IRQ_HANDLED; 525 526 napi_schedule(&q_vector->napi); 527 528 return IRQ_HANDLED; 529 } 530 531 /** 532 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 533 * @pf: board private structure 534 * @type: type of VSI 535 * @vf_id: ID of the VF being configured 536 * 537 * returns a pointer to a VSI on success, NULL on failure. 538 */ 539 static struct ice_vsi * 540 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) 541 { 542 struct ice_vsi *vsi = NULL; 543 544 /* Need to protect the allocation of the VSIs at the PF level */ 545 mutex_lock(&pf->sw_mutex); 546 547 /* If we have already allocated our maximum number of VSIs, 548 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 549 * is available to be populated 550 */ 551 if (pf->next_vsi == ICE_NO_VSI) { 552 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); 553 goto unlock_pf; 554 } 555 556 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); 557 if (!vsi) 558 goto unlock_pf; 559 560 vsi->type = type; 561 vsi->back = pf; 562 set_bit(__ICE_DOWN, vsi->state); 563 564 vsi->idx = pf->next_vsi; 565 566 if (type == ICE_VSI_VF) 567 ice_vsi_set_num_qs(vsi, vf_id); 568 else 569 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 570 571 switch (vsi->type) { 572 case ICE_VSI_PF: 573 if (ice_vsi_alloc_arrays(vsi)) 574 goto err_rings; 575 576 /* Setup default MSIX irq handler for VSI */ 577 vsi->irq_handler = ice_msix_clean_rings; 578 break; 579 case ICE_VSI_VF: 580 if (ice_vsi_alloc_arrays(vsi)) 581 goto err_rings; 582 break; 583 case ICE_VSI_LB: 584 if (ice_vsi_alloc_arrays(vsi)) 585 goto err_rings; 586 break; 587 default: 588 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 589 goto unlock_pf; 590 } 591 592 /* fill VSI slot in the PF struct */ 593 pf->vsi[pf->next_vsi] = vsi; 594 595 /* prepare pf->next_vsi for next use */ 596 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 597 pf->next_vsi); 598 goto unlock_pf; 599 600 err_rings: 601 devm_kfree(&pf->pdev->dev, vsi); 602 vsi = NULL; 603 unlock_pf: 604 mutex_unlock(&pf->sw_mutex); 605 return vsi; 606 } 607 608 /** 609 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 610 * @qs_cfg: gathered variables needed for PF->VSI queues assignment 611 * 612 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 613 */ 614 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 615 { 616 int offset, i; 617 618 mutex_lock(qs_cfg->qs_mutex); 619 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 620 0, qs_cfg->q_count, 0); 621 if (offset >= qs_cfg->pf_map_size) { 622 mutex_unlock(qs_cfg->qs_mutex); 623 return -ENOMEM; 624 } 625 626 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 627 for (i = 0; i < qs_cfg->q_count; i++) 628 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; 629 mutex_unlock(qs_cfg->qs_mutex); 630 631 return 0; 632 } 633 634 /** 635 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 636 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 637 * 638 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 639 */ 640 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 641 { 642 int i, index = 0; 643 644 mutex_lock(qs_cfg->qs_mutex); 645 for (i = 0; i < qs_cfg->q_count; i++) { 646 index = find_next_zero_bit(qs_cfg->pf_map, 647 qs_cfg->pf_map_size, index); 648 if (index >= qs_cfg->pf_map_size) 649 goto err_scatter; 650 set_bit(index, qs_cfg->pf_map); 651 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; 652 } 653 mutex_unlock(qs_cfg->qs_mutex); 654 655 return 0; 656 err_scatter: 657 for (index = 0; index < i; index++) { 658 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 659 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 660 } 661 mutex_unlock(qs_cfg->qs_mutex); 662 663 return -ENOMEM; 664 } 665 666 /** 667 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 668 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 669 * 670 * This function first tries to find contiguous space. If it is not successful, 671 * it tries with the scatter approach. 672 * 673 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 674 */ 675 static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 676 { 677 int ret = 0; 678 679 ret = __ice_vsi_get_qs_contig(qs_cfg); 680 if (ret) { 681 /* contig failed, so try with scatter approach */ 682 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 683 qs_cfg->q_count = min_t(u16, qs_cfg->q_count, 684 qs_cfg->scatter_count); 685 ret = __ice_vsi_get_qs_sc(qs_cfg); 686 } 687 return ret; 688 } 689 690 /** 691 * ice_vsi_get_qs - Assign queues from PF to VSI 692 * @vsi: the VSI to assign queues to 693 * 694 * Returns 0 on success and a negative value on error 695 */ 696 static int ice_vsi_get_qs(struct ice_vsi *vsi) 697 { 698 struct ice_pf *pf = vsi->back; 699 struct ice_qs_cfg tx_qs_cfg = { 700 .qs_mutex = &pf->avail_q_mutex, 701 .pf_map = pf->avail_txqs, 702 .pf_map_size = pf->max_pf_txqs, 703 .q_count = vsi->alloc_txq, 704 .scatter_count = ICE_MAX_SCATTER_TXQS, 705 .vsi_map = vsi->txq_map, 706 .vsi_map_offset = 0, 707 .mapping_mode = vsi->tx_mapping_mode 708 }; 709 struct ice_qs_cfg rx_qs_cfg = { 710 .qs_mutex = &pf->avail_q_mutex, 711 .pf_map = pf->avail_rxqs, 712 .pf_map_size = pf->max_pf_rxqs, 713 .q_count = vsi->alloc_rxq, 714 .scatter_count = ICE_MAX_SCATTER_RXQS, 715 .vsi_map = vsi->rxq_map, 716 .vsi_map_offset = 0, 717 .mapping_mode = vsi->rx_mapping_mode 718 }; 719 int ret = 0; 720 721 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; 722 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; 723 724 ret = __ice_vsi_get_qs(&tx_qs_cfg); 725 if (!ret) 726 ret = __ice_vsi_get_qs(&rx_qs_cfg); 727 728 return ret; 729 } 730 731 /** 732 * ice_vsi_put_qs - Release queues from VSI to PF 733 * @vsi: the VSI that is going to release queues 734 */ 735 void ice_vsi_put_qs(struct ice_vsi *vsi) 736 { 737 struct ice_pf *pf = vsi->back; 738 int i; 739 740 mutex_lock(&pf->avail_q_mutex); 741 742 for (i = 0; i < vsi->alloc_txq; i++) { 743 clear_bit(vsi->txq_map[i], pf->avail_txqs); 744 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 745 } 746 747 for (i = 0; i < vsi->alloc_rxq; i++) { 748 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 749 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 750 } 751 752 mutex_unlock(&pf->avail_q_mutex); 753 } 754 755 /** 756 * ice_is_safe_mode 757 * @pf: pointer to the PF struct 758 * 759 * returns true if driver is in safe mode, false otherwise 760 */ 761 bool ice_is_safe_mode(struct ice_pf *pf) 762 { 763 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 764 } 765 766 /** 767 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs 768 * @vsi: the VSI being removed 769 */ 770 static void ice_rss_clean(struct ice_vsi *vsi) 771 { 772 struct ice_pf *pf; 773 774 pf = vsi->back; 775 776 if (vsi->rss_hkey_user) 777 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); 778 if (vsi->rss_lut_user) 779 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); 780 } 781 782 /** 783 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 784 * @vsi: the VSI being configured 785 */ 786 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 787 { 788 struct ice_hw_common_caps *cap; 789 struct ice_pf *pf = vsi->back; 790 791 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 792 vsi->rss_size = 1; 793 return; 794 } 795 796 cap = &pf->hw.func_caps.common_cap; 797 switch (vsi->type) { 798 case ICE_VSI_PF: 799 /* PF VSI will inherit RSS instance of PF */ 800 vsi->rss_table_size = cap->rss_table_size; 801 vsi->rss_size = min_t(int, num_online_cpus(), 802 BIT(cap->rss_table_entry_width)); 803 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 804 break; 805 case ICE_VSI_VF: 806 /* VF VSI will gets a small RSS table 807 * For VSI_LUT, LUT size should be set to 64 bytes 808 */ 809 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 810 vsi->rss_size = min_t(int, num_online_cpus(), 811 BIT(cap->rss_table_entry_width)); 812 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 813 break; 814 case ICE_VSI_LB: 815 break; 816 default: 817 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", 818 vsi->type); 819 break; 820 } 821 } 822 823 /** 824 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 825 * @ctxt: the VSI context being set 826 * 827 * This initializes a default VSI context for all sections except the Queues. 828 */ 829 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) 830 { 831 u32 table = 0; 832 833 memset(&ctxt->info, 0, sizeof(ctxt->info)); 834 /* VSI's should be allocated from shared pool */ 835 ctxt->alloc_from_pool = true; 836 /* Src pruning enabled by default */ 837 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 838 /* Traffic from VSI can be sent to LAN */ 839 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 840 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy 841 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all 842 * packets untagged/tagged. 843 */ 844 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 845 ICE_AQ_VSI_VLAN_MODE_M) >> 846 ICE_AQ_VSI_VLAN_MODE_S); 847 /* Have 1:1 UP mapping for both ingress/egress tables */ 848 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 849 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 850 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 851 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 852 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 853 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 854 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 855 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 856 ctxt->info.ingress_table = cpu_to_le32(table); 857 ctxt->info.egress_table = cpu_to_le32(table); 858 /* Have 1:1 UP mapping for outer to inner UP table */ 859 ctxt->info.outer_up_table = cpu_to_le32(table); 860 /* No Outer tag support outer_tag_flags remains to zero */ 861 } 862 863 /** 864 * ice_vsi_setup_q_map - Setup a VSI queue map 865 * @vsi: the VSI being configured 866 * @ctxt: VSI context structure 867 */ 868 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 869 { 870 u16 offset = 0, qmap = 0, tx_count = 0; 871 u16 qcount_tx = vsi->alloc_txq; 872 u16 qcount_rx = vsi->alloc_rxq; 873 u16 tx_numq_tc, rx_numq_tc; 874 u16 pow = 0, max_rss = 0; 875 bool ena_tc0 = false; 876 u8 netdev_tc = 0; 877 int i; 878 879 /* at least TC0 should be enabled by default */ 880 if (vsi->tc_cfg.numtc) { 881 if (!(vsi->tc_cfg.ena_tc & BIT(0))) 882 ena_tc0 = true; 883 } else { 884 ena_tc0 = true; 885 } 886 887 if (ena_tc0) { 888 vsi->tc_cfg.numtc++; 889 vsi->tc_cfg.ena_tc |= 1; 890 } 891 892 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; 893 if (!rx_numq_tc) 894 rx_numq_tc = 1; 895 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; 896 if (!tx_numq_tc) 897 tx_numq_tc = 1; 898 899 /* TC mapping is a function of the number of Rx queues assigned to the 900 * VSI for each traffic class and the offset of these queues. 901 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 902 * queues allocated to TC0. No:of queues is a power-of-2. 903 * 904 * If TC is not enabled, the queue offset is set to 0, and allocate one 905 * queue, this way, traffic for the given TC will be sent to the default 906 * queue. 907 * 908 * Setup number and offset of Rx queues for all TCs for the VSI 909 */ 910 911 qcount_rx = rx_numq_tc; 912 913 /* qcount will change if RSS is enabled */ 914 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { 915 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { 916 if (vsi->type == ICE_VSI_PF) 917 max_rss = ICE_MAX_LG_RSS_QS; 918 else 919 max_rss = ICE_MAX_SMALL_RSS_QS; 920 qcount_rx = min_t(int, rx_numq_tc, max_rss); 921 qcount_rx = min_t(int, qcount_rx, vsi->rss_size); 922 } 923 } 924 925 /* find the (rounded up) power-of-2 of qcount */ 926 pow = order_base_2(qcount_rx); 927 928 ice_for_each_traffic_class(i) { 929 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 930 /* TC is not enabled */ 931 vsi->tc_cfg.tc_info[i].qoffset = 0; 932 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 933 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 934 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 935 ctxt->info.tc_mapping[i] = 0; 936 continue; 937 } 938 939 /* TC is enabled */ 940 vsi->tc_cfg.tc_info[i].qoffset = offset; 941 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 942 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; 943 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 944 945 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 946 ICE_AQ_VSI_TC_Q_OFFSET_M) | 947 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 948 ICE_AQ_VSI_TC_Q_NUM_M); 949 offset += qcount_rx; 950 tx_count += tx_numq_tc; 951 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 952 } 953 954 /* if offset is non-zero, means it is calculated correctly based on 955 * enabled TCs for a given VSI otherwise qcount_rx will always 956 * be correct and non-zero because it is based off - VSI's 957 * allocated Rx queues which is at least 1 (hence qcount_tx will be 958 * at least 1) 959 */ 960 if (offset) 961 vsi->num_rxq = offset; 962 else 963 vsi->num_rxq = qcount_rx; 964 965 vsi->num_txq = tx_count; 966 967 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 968 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 969 /* since there is a chance that num_rxq could have been changed 970 * in the above for loop, make num_txq equal to num_rxq. 971 */ 972 vsi->num_txq = vsi->num_rxq; 973 } 974 975 /* Rx queue mapping */ 976 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 977 /* q_mapping buffer holds the info for the first queue allocated for 978 * this VSI in the PF space and also the number of queues associated 979 * with this VSI. 980 */ 981 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 982 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 983 } 984 985 /** 986 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 987 * @ctxt: the VSI context being set 988 * @vsi: the VSI being configured 989 */ 990 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 991 { 992 u8 lut_type, hash_type; 993 struct ice_pf *pf; 994 995 pf = vsi->back; 996 997 switch (vsi->type) { 998 case ICE_VSI_PF: 999 /* PF VSI will inherit RSS instance of PF */ 1000 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 1001 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1002 break; 1003 case ICE_VSI_VF: 1004 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 1005 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 1006 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1007 break; 1008 case ICE_VSI_LB: 1009 dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type); 1010 return; 1011 default: 1012 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 1013 return; 1014 } 1015 1016 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 1017 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 1018 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 1019 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 1020 } 1021 1022 /** 1023 * ice_vsi_init - Create and initialize a VSI 1024 * @vsi: the VSI being configured 1025 * 1026 * This initializes a VSI context depending on the VSI type to be added and 1027 * passes it down to the add_vsi aq command to create a new VSI. 1028 */ 1029 static int ice_vsi_init(struct ice_vsi *vsi) 1030 { 1031 struct ice_pf *pf = vsi->back; 1032 struct ice_hw *hw = &pf->hw; 1033 struct ice_vsi_ctx *ctxt; 1034 int ret = 0; 1035 1036 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 1037 if (!ctxt) 1038 return -ENOMEM; 1039 1040 ctxt->info = vsi->info; 1041 switch (vsi->type) { 1042 case ICE_VSI_LB: 1043 /* fall through */ 1044 case ICE_VSI_PF: 1045 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 1046 break; 1047 case ICE_VSI_VF: 1048 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 1049 /* VF number here is the absolute VF number (0-255) */ 1050 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 1051 break; 1052 default: 1053 return -ENODEV; 1054 } 1055 1056 ice_set_dflt_vsi_ctx(ctxt); 1057 /* if the switch is in VEB mode, allow VSI loopback */ 1058 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 1059 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 1060 1061 /* Set LUT type and HASH type if RSS is enabled */ 1062 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 1063 ice_set_rss_vsi_ctx(ctxt, vsi); 1064 1065 ctxt->info.sw_id = vsi->port_info->sw_id; 1066 ice_vsi_setup_q_map(vsi, ctxt); 1067 1068 /* Enable MAC Antispoof with new VSI being initialized or updated */ 1069 if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { 1070 ctxt->info.valid_sections |= 1071 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1072 ctxt->info.sec_flags |= 1073 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1074 } 1075 1076 /* Allow control frames out of main VSI */ 1077 if (vsi->type == ICE_VSI_PF) { 1078 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 1079 ctxt->info.valid_sections |= 1080 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1081 } 1082 1083 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1084 if (ret) { 1085 dev_err(&pf->pdev->dev, 1086 "Add VSI failed, err %d\n", ret); 1087 return -EIO; 1088 } 1089 1090 /* keep context for update VSI operations */ 1091 vsi->info = ctxt->info; 1092 1093 /* record VSI number returned */ 1094 vsi->vsi_num = ctxt->vsi_num; 1095 1096 devm_kfree(&pf->pdev->dev, ctxt); 1097 return ret; 1098 } 1099 1100 /** 1101 * ice_free_q_vector - Free memory allocated for a specific interrupt vector 1102 * @vsi: VSI having the memory freed 1103 * @v_idx: index of the vector to be freed 1104 */ 1105 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 1106 { 1107 struct ice_q_vector *q_vector; 1108 struct ice_pf *pf = vsi->back; 1109 struct ice_ring *ring; 1110 1111 if (!vsi->q_vectors[v_idx]) { 1112 dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", 1113 v_idx); 1114 return; 1115 } 1116 q_vector = vsi->q_vectors[v_idx]; 1117 1118 ice_for_each_ring(ring, q_vector->tx) 1119 ring->q_vector = NULL; 1120 ice_for_each_ring(ring, q_vector->rx) 1121 ring->q_vector = NULL; 1122 1123 /* only VSI with an associated netdev is set up with NAPI */ 1124 if (vsi->netdev) 1125 netif_napi_del(&q_vector->napi); 1126 1127 devm_kfree(&pf->pdev->dev, q_vector); 1128 vsi->q_vectors[v_idx] = NULL; 1129 } 1130 1131 /** 1132 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 1133 * @vsi: the VSI having memory freed 1134 */ 1135 void ice_vsi_free_q_vectors(struct ice_vsi *vsi) 1136 { 1137 int v_idx; 1138 1139 ice_for_each_q_vector(vsi, v_idx) 1140 ice_free_q_vector(vsi, v_idx); 1141 } 1142 1143 /** 1144 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 1145 * @vsi: the VSI being configured 1146 * @v_idx: index of the vector in the VSI struct 1147 * 1148 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1149 */ 1150 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) 1151 { 1152 struct ice_pf *pf = vsi->back; 1153 struct ice_q_vector *q_vector; 1154 1155 /* allocate q_vector */ 1156 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); 1157 if (!q_vector) 1158 return -ENOMEM; 1159 1160 q_vector->vsi = vsi; 1161 q_vector->v_idx = v_idx; 1162 if (vsi->type == ICE_VSI_VF) 1163 goto out; 1164 /* only set affinity_mask if the CPU is online */ 1165 if (cpu_online(v_idx)) 1166 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 1167 1168 /* This will not be called in the driver load path because the netdev 1169 * will not be created yet. All other cases with register the NAPI 1170 * handler here (i.e. resume, reset/rebuild, etc.) 1171 */ 1172 if (vsi->netdev) 1173 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 1174 NAPI_POLL_WEIGHT); 1175 1176 out: 1177 /* tie q_vector and VSI together */ 1178 vsi->q_vectors[v_idx] = q_vector; 1179 1180 return 0; 1181 } 1182 1183 /** 1184 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 1185 * @vsi: the VSI being configured 1186 * 1187 * We allocate one q_vector per queue interrupt. If allocation fails we 1188 * return -ENOMEM. 1189 */ 1190 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 1191 { 1192 struct ice_pf *pf = vsi->back; 1193 int v_idx = 0, num_q_vectors; 1194 int err; 1195 1196 if (vsi->q_vectors[0]) { 1197 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 1198 vsi->vsi_num); 1199 return -EEXIST; 1200 } 1201 1202 num_q_vectors = vsi->num_q_vectors; 1203 1204 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 1205 err = ice_vsi_alloc_q_vector(vsi, v_idx); 1206 if (err) 1207 goto err_out; 1208 } 1209 1210 return 0; 1211 1212 err_out: 1213 while (v_idx--) 1214 ice_free_q_vector(vsi, v_idx); 1215 1216 dev_err(&pf->pdev->dev, 1217 "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 1218 vsi->num_q_vectors, vsi->vsi_num, err); 1219 vsi->num_q_vectors = 0; 1220 return err; 1221 } 1222 1223 /** 1224 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI 1225 * @vsi: ptr to the VSI 1226 * 1227 * This should only be called after ice_vsi_alloc() which allocates the 1228 * corresponding SW VSI structure and initializes num_queue_pairs for the 1229 * newly allocated VSI. 1230 * 1231 * Returns 0 on success or negative on failure 1232 */ 1233 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) 1234 { 1235 struct ice_pf *pf = vsi->back; 1236 u16 num_q_vectors; 1237 1238 /* SRIOV doesn't grab irq_tracker entries for each VSI */ 1239 if (vsi->type == ICE_VSI_VF) 1240 return 0; 1241 1242 if (vsi->base_vector) { 1243 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 1244 vsi->vsi_num, vsi->base_vector); 1245 return -EEXIST; 1246 } 1247 1248 num_q_vectors = vsi->num_q_vectors; 1249 /* reserve slots from OS requested IRQs */ 1250 vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, 1251 vsi->idx); 1252 if (vsi->base_vector < 0) { 1253 dev_err(&pf->pdev->dev, 1254 "Failed to get tracking for %d vectors for VSI %d, err=%d\n", 1255 num_q_vectors, vsi->vsi_num, vsi->base_vector); 1256 return -ENOENT; 1257 } 1258 pf->num_avail_sw_msix -= num_q_vectors; 1259 1260 return 0; 1261 } 1262 1263 /** 1264 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1265 * @vsi: the VSI having rings deallocated 1266 */ 1267 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1268 { 1269 int i; 1270 1271 if (vsi->tx_rings) { 1272 for (i = 0; i < vsi->alloc_txq; i++) { 1273 if (vsi->tx_rings[i]) { 1274 kfree_rcu(vsi->tx_rings[i], rcu); 1275 vsi->tx_rings[i] = NULL; 1276 } 1277 } 1278 } 1279 if (vsi->rx_rings) { 1280 for (i = 0; i < vsi->alloc_rxq; i++) { 1281 if (vsi->rx_rings[i]) { 1282 kfree_rcu(vsi->rx_rings[i], rcu); 1283 vsi->rx_rings[i] = NULL; 1284 } 1285 } 1286 } 1287 } 1288 1289 /** 1290 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1291 * @vsi: VSI which is having rings allocated 1292 */ 1293 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1294 { 1295 struct ice_pf *pf = vsi->back; 1296 int i; 1297 1298 /* Allocate Tx rings */ 1299 for (i = 0; i < vsi->alloc_txq; i++) { 1300 struct ice_ring *ring; 1301 1302 /* allocate with kzalloc(), free with kfree_rcu() */ 1303 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1304 1305 if (!ring) 1306 goto err_out; 1307 1308 ring->q_index = i; 1309 ring->reg_idx = vsi->txq_map[i]; 1310 ring->ring_active = false; 1311 ring->vsi = vsi; 1312 ring->dev = &pf->pdev->dev; 1313 ring->count = vsi->num_tx_desc; 1314 vsi->tx_rings[i] = ring; 1315 } 1316 1317 /* Allocate Rx rings */ 1318 for (i = 0; i < vsi->alloc_rxq; i++) { 1319 struct ice_ring *ring; 1320 1321 /* allocate with kzalloc(), free with kfree_rcu() */ 1322 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1323 if (!ring) 1324 goto err_out; 1325 1326 ring->q_index = i; 1327 ring->reg_idx = vsi->rxq_map[i]; 1328 ring->ring_active = false; 1329 ring->vsi = vsi; 1330 ring->netdev = vsi->netdev; 1331 ring->dev = &pf->pdev->dev; 1332 ring->count = vsi->num_rx_desc; 1333 vsi->rx_rings[i] = ring; 1334 } 1335 1336 return 0; 1337 1338 err_out: 1339 ice_vsi_clear_rings(vsi); 1340 return -ENOMEM; 1341 } 1342 1343 /** 1344 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 1345 * @vsi: the VSI being configured 1346 * 1347 * This function maps descriptor rings to the queue-specific vectors allotted 1348 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 1349 * and Rx rings to the vector as "efficiently" as possible. 1350 */ 1351 #ifdef CONFIG_DCB 1352 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1353 #else 1354 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1355 #endif /* CONFIG_DCB */ 1356 { 1357 int q_vectors = vsi->num_q_vectors; 1358 int tx_rings_rem, rx_rings_rem; 1359 int v_id; 1360 1361 /* initially assigning remaining rings count to VSIs num queue value */ 1362 tx_rings_rem = vsi->num_txq; 1363 rx_rings_rem = vsi->num_rxq; 1364 1365 for (v_id = 0; v_id < q_vectors; v_id++) { 1366 struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 1367 int tx_rings_per_v, rx_rings_per_v, q_id, q_base; 1368 1369 /* Tx rings mapping to vector */ 1370 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 1371 q_vector->num_ring_tx = tx_rings_per_v; 1372 q_vector->tx.ring = NULL; 1373 q_vector->tx.itr_idx = ICE_TX_ITR; 1374 q_base = vsi->num_txq - tx_rings_rem; 1375 1376 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 1377 struct ice_ring *tx_ring = vsi->tx_rings[q_id]; 1378 1379 tx_ring->q_vector = q_vector; 1380 tx_ring->next = q_vector->tx.ring; 1381 q_vector->tx.ring = tx_ring; 1382 } 1383 tx_rings_rem -= tx_rings_per_v; 1384 1385 /* Rx rings mapping to vector */ 1386 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 1387 q_vector->num_ring_rx = rx_rings_per_v; 1388 q_vector->rx.ring = NULL; 1389 q_vector->rx.itr_idx = ICE_RX_ITR; 1390 q_base = vsi->num_rxq - rx_rings_rem; 1391 1392 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 1393 struct ice_ring *rx_ring = vsi->rx_rings[q_id]; 1394 1395 rx_ring->q_vector = q_vector; 1396 rx_ring->next = q_vector->rx.ring; 1397 q_vector->rx.ring = rx_ring; 1398 } 1399 rx_rings_rem -= rx_rings_per_v; 1400 } 1401 } 1402 1403 /** 1404 * ice_vsi_manage_rss_lut - disable/enable RSS 1405 * @vsi: the VSI being changed 1406 * @ena: boolean value indicating if this is an enable or disable request 1407 * 1408 * In the event of disable request for RSS, this function will zero out RSS 1409 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1410 * LUT. 1411 */ 1412 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1413 { 1414 int err = 0; 1415 u8 *lut; 1416 1417 lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, 1418 GFP_KERNEL); 1419 if (!lut) 1420 return -ENOMEM; 1421 1422 if (ena) { 1423 if (vsi->rss_lut_user) 1424 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1425 else 1426 ice_fill_rss_lut(lut, vsi->rss_table_size, 1427 vsi->rss_size); 1428 } 1429 1430 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); 1431 devm_kfree(&vsi->back->pdev->dev, lut); 1432 return err; 1433 } 1434 1435 /** 1436 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1437 * @vsi: VSI to be configured 1438 */ 1439 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1440 { 1441 struct ice_aqc_get_set_rss_keys *key; 1442 struct ice_pf *pf = vsi->back; 1443 enum ice_status status; 1444 int err = 0; 1445 u8 *lut; 1446 1447 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); 1448 1449 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); 1450 if (!lut) 1451 return -ENOMEM; 1452 1453 if (vsi->rss_lut_user) 1454 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1455 else 1456 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1457 1458 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, 1459 vsi->rss_table_size); 1460 1461 if (status) { 1462 dev_err(&pf->pdev->dev, 1463 "set_rss_lut failed, error %d\n", status); 1464 err = -EIO; 1465 goto ice_vsi_cfg_rss_exit; 1466 } 1467 1468 key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL); 1469 if (!key) { 1470 err = -ENOMEM; 1471 goto ice_vsi_cfg_rss_exit; 1472 } 1473 1474 if (vsi->rss_hkey_user) 1475 memcpy(key, 1476 (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user, 1477 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1478 else 1479 netdev_rss_key_fill((void *)key, 1480 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1481 1482 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); 1483 1484 if (status) { 1485 dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n", 1486 status); 1487 err = -EIO; 1488 } 1489 1490 devm_kfree(&pf->pdev->dev, key); 1491 ice_vsi_cfg_rss_exit: 1492 devm_kfree(&pf->pdev->dev, lut); 1493 return err; 1494 } 1495 1496 /** 1497 * ice_add_mac_to_list - Add a MAC address filter entry to the list 1498 * @vsi: the VSI to be forwarded to 1499 * @add_list: pointer to the list which contains MAC filter entries 1500 * @macaddr: the MAC address to be added. 1501 * 1502 * Adds MAC address filter entry to the temp list 1503 * 1504 * Returns 0 on success or ENOMEM on failure. 1505 */ 1506 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, 1507 const u8 *macaddr) 1508 { 1509 struct ice_fltr_list_entry *tmp; 1510 struct ice_pf *pf = vsi->back; 1511 1512 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); 1513 if (!tmp) 1514 return -ENOMEM; 1515 1516 tmp->fltr_info.flag = ICE_FLTR_TX; 1517 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1518 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1519 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1520 tmp->fltr_info.vsi_handle = vsi->idx; 1521 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); 1522 1523 INIT_LIST_HEAD(&tmp->list_entry); 1524 list_add(&tmp->list_entry, add_list); 1525 1526 return 0; 1527 } 1528 1529 /** 1530 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1531 * @vsi: the VSI to be updated 1532 */ 1533 void ice_update_eth_stats(struct ice_vsi *vsi) 1534 { 1535 struct ice_eth_stats *prev_es, *cur_es; 1536 struct ice_hw *hw = &vsi->back->hw; 1537 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1538 1539 prev_es = &vsi->eth_stats_prev; 1540 cur_es = &vsi->eth_stats; 1541 1542 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1543 &prev_es->rx_bytes, &cur_es->rx_bytes); 1544 1545 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1546 &prev_es->rx_unicast, &cur_es->rx_unicast); 1547 1548 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1549 &prev_es->rx_multicast, &cur_es->rx_multicast); 1550 1551 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1552 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1553 1554 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1555 &prev_es->rx_discards, &cur_es->rx_discards); 1556 1557 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1558 &prev_es->tx_bytes, &cur_es->tx_bytes); 1559 1560 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1561 &prev_es->tx_unicast, &cur_es->tx_unicast); 1562 1563 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1564 &prev_es->tx_multicast, &cur_es->tx_multicast); 1565 1566 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1567 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1568 1569 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1570 &prev_es->tx_errors, &cur_es->tx_errors); 1571 1572 vsi->stat_offsets_loaded = true; 1573 } 1574 1575 /** 1576 * ice_free_fltr_list - free filter lists helper 1577 * @dev: pointer to the device struct 1578 * @h: pointer to the list head to be freed 1579 * 1580 * Helper function to free filter lists previously created using 1581 * ice_add_mac_to_list 1582 */ 1583 void ice_free_fltr_list(struct device *dev, struct list_head *h) 1584 { 1585 struct ice_fltr_list_entry *e, *tmp; 1586 1587 list_for_each_entry_safe(e, tmp, h, list_entry) { 1588 list_del(&e->list_entry); 1589 devm_kfree(dev, e); 1590 } 1591 } 1592 1593 /** 1594 * ice_vsi_add_vlan - Add VSI membership for given VLAN 1595 * @vsi: the VSI being configured 1596 * @vid: VLAN ID to be added 1597 */ 1598 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) 1599 { 1600 struct ice_fltr_list_entry *tmp; 1601 struct ice_pf *pf = vsi->back; 1602 LIST_HEAD(tmp_add_list); 1603 enum ice_status status; 1604 int err = 0; 1605 1606 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); 1607 if (!tmp) 1608 return -ENOMEM; 1609 1610 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1611 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1612 tmp->fltr_info.flag = ICE_FLTR_TX; 1613 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1614 tmp->fltr_info.vsi_handle = vsi->idx; 1615 tmp->fltr_info.l_data.vlan.vlan_id = vid; 1616 1617 INIT_LIST_HEAD(&tmp->list_entry); 1618 list_add(&tmp->list_entry, &tmp_add_list); 1619 1620 status = ice_add_vlan(&pf->hw, &tmp_add_list); 1621 if (status) { 1622 err = -ENODEV; 1623 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", 1624 vid, vsi->vsi_num); 1625 } 1626 1627 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1628 return err; 1629 } 1630 1631 /** 1632 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN 1633 * @vsi: the VSI being configured 1634 * @vid: VLAN ID to be removed 1635 * 1636 * Returns 0 on success and negative on failure 1637 */ 1638 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) 1639 { 1640 struct ice_fltr_list_entry *list; 1641 struct ice_pf *pf = vsi->back; 1642 LIST_HEAD(tmp_add_list); 1643 enum ice_status status; 1644 int err = 0; 1645 1646 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 1647 if (!list) 1648 return -ENOMEM; 1649 1650 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1651 list->fltr_info.vsi_handle = vsi->idx; 1652 list->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1653 list->fltr_info.l_data.vlan.vlan_id = vid; 1654 list->fltr_info.flag = ICE_FLTR_TX; 1655 list->fltr_info.src_id = ICE_SRC_ID_VSI; 1656 1657 INIT_LIST_HEAD(&list->list_entry); 1658 list_add(&list->list_entry, &tmp_add_list); 1659 1660 status = ice_remove_vlan(&pf->hw, &tmp_add_list); 1661 if (status == ICE_ERR_DOES_NOT_EXIST) { 1662 dev_dbg(&pf->pdev->dev, 1663 "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", 1664 vid, vsi->vsi_num, status); 1665 } else if (status) { 1666 dev_err(&pf->pdev->dev, 1667 "Error removing VLAN %d on vsi %i error: %d\n", 1668 vid, vsi->vsi_num, status); 1669 err = -EIO; 1670 } 1671 1672 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1673 return err; 1674 } 1675 1676 /** 1677 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1678 * @vsi: the VSI being configured 1679 * 1680 * Return 0 on success and a negative value on error 1681 * Configure the Rx VSI for operation. 1682 */ 1683 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1684 { 1685 u16 i; 1686 1687 if (vsi->type == ICE_VSI_VF) 1688 goto setup_rings; 1689 1690 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) 1691 vsi->max_frame = vsi->netdev->mtu + 1692 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1693 else 1694 vsi->max_frame = ICE_RXBUF_2048; 1695 1696 vsi->rx_buf_len = ICE_RXBUF_2048; 1697 setup_rings: 1698 /* set up individual rings */ 1699 for (i = 0; i < vsi->num_rxq; i++) { 1700 int err; 1701 1702 err = ice_setup_rx_ctx(vsi->rx_rings[i]); 1703 if (err) { 1704 dev_err(&vsi->back->pdev->dev, 1705 "ice_setup_rx_ctx failed for RxQ %d, err %d\n", 1706 i, err); 1707 return err; 1708 } 1709 } 1710 1711 return 0; 1712 } 1713 1714 /** 1715 * ice_vsi_cfg_txq - Configure single Tx queue 1716 * @vsi: the VSI that queue belongs to 1717 * @ring: Tx ring to be configured 1718 * @tc_q_idx: queue index within given TC 1719 * @qg_buf: queue group buffer 1720 * @tc: TC that Tx ring belongs to 1721 */ 1722 static int 1723 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, 1724 struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) 1725 { 1726 struct ice_tlan_ctx tlan_ctx = { 0 }; 1727 struct ice_aqc_add_txqs_perq *txq; 1728 struct ice_pf *pf = vsi->back; 1729 u8 buf_len = sizeof(*qg_buf); 1730 enum ice_status status; 1731 u16 pf_q; 1732 1733 pf_q = ring->reg_idx; 1734 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); 1735 /* copy context contents into the qg_buf */ 1736 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1737 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 1738 ice_tlan_ctx_info); 1739 1740 /* init queue specific tail reg. It is referred as 1741 * transmit comm scheduler queue doorbell. 1742 */ 1743 ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1744 1745 /* Add unique software queue handle of the Tx queue per 1746 * TC into the VSI Tx ring 1747 */ 1748 ring->q_handle = tc_q_idx; 1749 1750 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 1751 1, qg_buf, buf_len, NULL); 1752 if (status) { 1753 dev_err(&pf->pdev->dev, 1754 "Failed to set LAN Tx queue context, error: %d\n", 1755 status); 1756 return -ENODEV; 1757 } 1758 1759 /* Add Tx Queue TEID into the VSI Tx ring from the 1760 * response. This will complete configuring and 1761 * enabling the queue. 1762 */ 1763 txq = &qg_buf->txqs[0]; 1764 if (pf_q == le16_to_cpu(txq->txq_id)) 1765 ring->txq_teid = le32_to_cpu(txq->q_teid); 1766 1767 return 0; 1768 } 1769 1770 /** 1771 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1772 * @vsi: the VSI being configured 1773 * @rings: Tx ring array to be configured 1774 * @offset: offset within vsi->txq_map 1775 * 1776 * Return 0 on success and a negative value on error 1777 * Configure the Tx VSI for operation. 1778 */ 1779 static int 1780 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) 1781 { 1782 struct ice_aqc_add_tx_qgrp *qg_buf; 1783 struct ice_pf *pf = vsi->back; 1784 u16 q_idx = 0, i; 1785 int err = 0; 1786 u8 tc; 1787 1788 qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL); 1789 if (!qg_buf) 1790 return -ENOMEM; 1791 1792 qg_buf->num_txqs = 1; 1793 1794 /* set up and configure the Tx queues for each enabled TC */ 1795 ice_for_each_traffic_class(tc) { 1796 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 1797 break; 1798 1799 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 1800 err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, 1801 qg_buf, tc); 1802 if (err) 1803 goto err_cfg_txqs; 1804 1805 q_idx++; 1806 } 1807 } 1808 err_cfg_txqs: 1809 devm_kfree(&pf->pdev->dev, qg_buf); 1810 return err; 1811 } 1812 1813 /** 1814 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1815 * @vsi: the VSI being configured 1816 * 1817 * Return 0 on success and a negative value on error 1818 * Configure the Tx VSI for operation. 1819 */ 1820 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1821 { 1822 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); 1823 } 1824 1825 /** 1826 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1827 * @intrl: interrupt rate limit in usecs 1828 * @gran: interrupt rate limit granularity in usecs 1829 * 1830 * This function converts a decimal interrupt rate limit in usecs to the format 1831 * expected by firmware. 1832 */ 1833 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1834 { 1835 u32 val = intrl / gran; 1836 1837 if (val) 1838 return val | GLINT_RATE_INTRL_ENA_M; 1839 return 0; 1840 } 1841 1842 /** 1843 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 1844 * @hw: board specific structure 1845 */ 1846 static void ice_cfg_itr_gran(struct ice_hw *hw) 1847 { 1848 u32 regval = rd32(hw, GLINT_CTL); 1849 1850 /* no need to update global register if ITR gran is already set */ 1851 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 1852 (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 1853 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 1854 (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 1855 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 1856 (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 1857 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 1858 (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 1859 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 1860 return; 1861 1862 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 1863 GLINT_CTL_ITR_GRAN_200_M) | 1864 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 1865 GLINT_CTL_ITR_GRAN_100_M) | 1866 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 1867 GLINT_CTL_ITR_GRAN_50_M) | 1868 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 1869 GLINT_CTL_ITR_GRAN_25_M); 1870 wr32(hw, GLINT_CTL, regval); 1871 } 1872 1873 /** 1874 * ice_cfg_itr - configure the initial interrupt throttle values 1875 * @hw: pointer to the HW structure 1876 * @q_vector: interrupt vector that's being configured 1877 * 1878 * Configure interrupt throttling values for the ring containers that are 1879 * associated with the interrupt vector passed in. 1880 */ 1881 static void 1882 ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) 1883 { 1884 ice_cfg_itr_gran(hw); 1885 1886 if (q_vector->num_ring_rx) { 1887 struct ice_ring_container *rc = &q_vector->rx; 1888 1889 /* if this value is set then don't overwrite with default */ 1890 if (!rc->itr_setting) 1891 rc->itr_setting = ICE_DFLT_RX_ITR; 1892 1893 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1894 rc->next_update = jiffies + 1; 1895 rc->current_itr = rc->target_itr; 1896 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1897 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1898 } 1899 1900 if (q_vector->num_ring_tx) { 1901 struct ice_ring_container *rc = &q_vector->tx; 1902 1903 /* if this value is set then don't overwrite with default */ 1904 if (!rc->itr_setting) 1905 rc->itr_setting = ICE_DFLT_TX_ITR; 1906 1907 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1908 rc->next_update = jiffies + 1; 1909 rc->current_itr = rc->target_itr; 1910 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1911 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1912 } 1913 } 1914 1915 /** 1916 * ice_cfg_txq_interrupt - configure interrupt on Tx queue 1917 * @vsi: the VSI being configured 1918 * @txq: Tx queue being mapped to MSI-X vector 1919 * @msix_idx: MSI-X vector index within the function 1920 * @itr_idx: ITR index of the interrupt cause 1921 * 1922 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector 1923 * within the function space. 1924 */ 1925 #ifdef CONFIG_PCI_IOV 1926 void 1927 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 1928 #else 1929 static void 1930 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 1931 #endif /* CONFIG_PCI_IOV */ 1932 { 1933 struct ice_pf *pf = vsi->back; 1934 struct ice_hw *hw = &pf->hw; 1935 u32 val; 1936 1937 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; 1938 1939 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 1940 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); 1941 1942 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 1943 } 1944 1945 /** 1946 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue 1947 * @vsi: the VSI being configured 1948 * @rxq: Rx queue being mapped to MSI-X vector 1949 * @msix_idx: MSI-X vector index within the function 1950 * @itr_idx: ITR index of the interrupt cause 1951 * 1952 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector 1953 * within the function space. 1954 */ 1955 #ifdef CONFIG_PCI_IOV 1956 void 1957 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 1958 #else 1959 static void 1960 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 1961 #endif /* CONFIG_PCI_IOV */ 1962 { 1963 struct ice_pf *pf = vsi->back; 1964 struct ice_hw *hw = &pf->hw; 1965 u32 val; 1966 1967 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; 1968 1969 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 1970 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); 1971 1972 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 1973 1974 ice_flush(hw); 1975 } 1976 1977 /** 1978 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1979 * @vsi: the VSI being configured 1980 * 1981 * This configures MSIX mode interrupts for the PF VSI, and should not be used 1982 * for the VF VSI. 1983 */ 1984 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 1985 { 1986 struct ice_pf *pf = vsi->back; 1987 struct ice_hw *hw = &pf->hw; 1988 u32 txq = 0, rxq = 0; 1989 int i, q; 1990 1991 for (i = 0; i < vsi->num_q_vectors; i++) { 1992 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1993 u16 reg_idx = q_vector->reg_idx; 1994 1995 ice_cfg_itr(hw, q_vector); 1996 1997 wr32(hw, GLINT_RATE(reg_idx), 1998 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1999 2000 /* Both Transmit Queue Interrupt Cause Control register 2001 * and Receive Queue Interrupt Cause control register 2002 * expects MSIX_INDX field to be the vector index 2003 * within the function space and not the absolute 2004 * vector index across PF or across device. 2005 * For SR-IOV VF VSIs queue vector index always starts 2006 * with 1 since first vector index(0) is used for OICR 2007 * in VF space. Since VMDq and other PF VSIs are within 2008 * the PF function space, use the vector index that is 2009 * tracked for this PF. 2010 */ 2011 for (q = 0; q < q_vector->num_ring_tx; q++) { 2012 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 2013 q_vector->tx.itr_idx); 2014 txq++; 2015 } 2016 2017 for (q = 0; q < q_vector->num_ring_rx; q++) { 2018 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 2019 q_vector->rx.itr_idx); 2020 rxq++; 2021 } 2022 } 2023 } 2024 2025 /** 2026 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx 2027 * @vsi: the VSI being changed 2028 */ 2029 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) 2030 { 2031 struct device *dev = &vsi->back->pdev->dev; 2032 struct ice_hw *hw = &vsi->back->hw; 2033 struct ice_vsi_ctx *ctxt; 2034 enum ice_status status; 2035 int ret = 0; 2036 2037 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 2038 if (!ctxt) 2039 return -ENOMEM; 2040 2041 /* Here we are configuring the VSI to let the driver add VLAN tags by 2042 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag 2043 * insertion happens in the Tx hot path, in ice_tx_map. 2044 */ 2045 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 2046 2047 /* Preserve existing VLAN strip setting */ 2048 ctxt->info.vlan_flags |= (vsi->info.vlan_flags & 2049 ICE_AQ_VSI_VLAN_EMOD_M); 2050 2051 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 2052 2053 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 2054 if (status) { 2055 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", 2056 status, hw->adminq.sq_last_status); 2057 ret = -EIO; 2058 goto out; 2059 } 2060 2061 vsi->info.vlan_flags = ctxt->info.vlan_flags; 2062 out: 2063 devm_kfree(dev, ctxt); 2064 return ret; 2065 } 2066 2067 /** 2068 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx 2069 * @vsi: the VSI being changed 2070 * @ena: boolean value indicating if this is a enable or disable request 2071 */ 2072 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 2073 { 2074 struct device *dev = &vsi->back->pdev->dev; 2075 struct ice_hw *hw = &vsi->back->hw; 2076 struct ice_vsi_ctx *ctxt; 2077 enum ice_status status; 2078 int ret = 0; 2079 2080 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 2081 if (!ctxt) 2082 return -ENOMEM; 2083 2084 /* Here we are configuring what the VSI should do with the VLAN tag in 2085 * the Rx packet. We can either leave the tag in the packet or put it in 2086 * the Rx descriptor. 2087 */ 2088 if (ena) 2089 /* Strip VLAN tag from Rx packet and put it in the desc */ 2090 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 2091 else 2092 /* Disable stripping. Leave tag in packet */ 2093 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 2094 2095 /* Allow all packets untagged/tagged */ 2096 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 2097 2098 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 2099 2100 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 2101 if (status) { 2102 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", 2103 ena, status, hw->adminq.sq_last_status); 2104 ret = -EIO; 2105 goto out; 2106 } 2107 2108 vsi->info.vlan_flags = ctxt->info.vlan_flags; 2109 out: 2110 devm_kfree(dev, ctxt); 2111 return ret; 2112 } 2113 2114 /** 2115 * ice_vsi_start_rx_rings - start VSI's Rx rings 2116 * @vsi: the VSI whose rings are to be started 2117 * 2118 * Returns 0 on success and a negative value on error 2119 */ 2120 int ice_vsi_start_rx_rings(struct ice_vsi *vsi) 2121 { 2122 return ice_vsi_ctrl_rx_rings(vsi, true); 2123 } 2124 2125 /** 2126 * ice_vsi_stop_rx_rings - stop VSI's Rx rings 2127 * @vsi: the VSI 2128 * 2129 * Returns 0 on success and a negative value on error 2130 */ 2131 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) 2132 { 2133 return ice_vsi_ctrl_rx_rings(vsi, false); 2134 } 2135 2136 /** 2137 * ice_trigger_sw_intr - trigger a software interrupt 2138 * @hw: pointer to the HW structure 2139 * @q_vector: interrupt vector to trigger the software interrupt for 2140 */ 2141 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) 2142 { 2143 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 2144 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | 2145 GLINT_DYN_CTL_SWINT_TRIG_M | 2146 GLINT_DYN_CTL_INTENA_M); 2147 } 2148 2149 /** 2150 * ice_vsi_stop_tx_ring - Disable single Tx ring 2151 * @vsi: the VSI being configured 2152 * @rst_src: reset source 2153 * @rel_vmvf_num: Relative ID of VF/VM 2154 * @ring: Tx ring to be stopped 2155 * @txq_meta: Meta data of Tx ring to be stopped 2156 */ 2157 #ifndef CONFIG_PCI_IOV 2158 static 2159 #endif /* !CONFIG_PCI_IOV */ 2160 int 2161 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2162 u16 rel_vmvf_num, struct ice_ring *ring, 2163 struct ice_txq_meta *txq_meta) 2164 { 2165 struct ice_pf *pf = vsi->back; 2166 struct ice_q_vector *q_vector; 2167 struct ice_hw *hw = &pf->hw; 2168 enum ice_status status; 2169 u32 val; 2170 2171 /* clear cause_ena bit for disabled queues */ 2172 val = rd32(hw, QINT_TQCTL(ring->reg_idx)); 2173 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2174 wr32(hw, QINT_TQCTL(ring->reg_idx), val); 2175 2176 /* software is expected to wait for 100 ns */ 2177 ndelay(100); 2178 2179 /* trigger a software interrupt for the vector 2180 * associated to the queue to schedule NAPI handler 2181 */ 2182 q_vector = ring->q_vector; 2183 if (q_vector) 2184 ice_trigger_sw_intr(hw, q_vector); 2185 2186 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, 2187 txq_meta->tc, 1, &txq_meta->q_handle, 2188 &txq_meta->q_id, &txq_meta->q_teid, rst_src, 2189 rel_vmvf_num, NULL); 2190 2191 /* if the disable queue command was exercised during an 2192 * active reset flow, ICE_ERR_RESET_ONGOING is returned. 2193 * This is not an error as the reset operation disables 2194 * queues at the hardware level anyway. 2195 */ 2196 if (status == ICE_ERR_RESET_ONGOING) { 2197 dev_dbg(&vsi->back->pdev->dev, 2198 "Reset in progress. LAN Tx queues already disabled\n"); 2199 } else if (status == ICE_ERR_DOES_NOT_EXIST) { 2200 dev_dbg(&vsi->back->pdev->dev, 2201 "LAN Tx queues do not exist, nothing to disable\n"); 2202 } else if (status) { 2203 dev_err(&vsi->back->pdev->dev, 2204 "Failed to disable LAN Tx queues, error: %d\n", status); 2205 return -ENODEV; 2206 } 2207 2208 return 0; 2209 } 2210 2211 /** 2212 * ice_fill_txq_meta - Prepare the Tx queue's meta data 2213 * @vsi: VSI that ring belongs to 2214 * @ring: ring that txq_meta will be based on 2215 * @txq_meta: a helper struct that wraps Tx queue's information 2216 * 2217 * Set up a helper struct that will contain all the necessary fields that 2218 * are needed for stopping Tx queue 2219 */ 2220 #ifndef CONFIG_PCI_IOV 2221 static 2222 #endif /* !CONFIG_PCI_IOV */ 2223 void 2224 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, 2225 struct ice_txq_meta *txq_meta) 2226 { 2227 u8 tc = 0; 2228 2229 #ifdef CONFIG_DCB 2230 tc = ring->dcb_tc; 2231 #endif /* CONFIG_DCB */ 2232 txq_meta->q_id = ring->reg_idx; 2233 txq_meta->q_teid = ring->txq_teid; 2234 txq_meta->q_handle = ring->q_handle; 2235 txq_meta->vsi_idx = vsi->idx; 2236 txq_meta->tc = tc; 2237 } 2238 2239 /** 2240 * ice_vsi_stop_tx_rings - Disable Tx rings 2241 * @vsi: the VSI being configured 2242 * @rst_src: reset source 2243 * @rel_vmvf_num: Relative ID of VF/VM 2244 * @rings: Tx ring array to be stopped 2245 */ 2246 static int 2247 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2248 u16 rel_vmvf_num, struct ice_ring **rings) 2249 { 2250 u16 i, q_idx = 0; 2251 int status; 2252 u8 tc; 2253 2254 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2255 return -EINVAL; 2256 2257 /* set up the Tx queue list to be disabled for each enabled TC */ 2258 ice_for_each_traffic_class(tc) { 2259 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 2260 break; 2261 2262 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 2263 struct ice_txq_meta txq_meta = { }; 2264 2265 if (!rings || !rings[q_idx]) 2266 return -EINVAL; 2267 2268 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); 2269 status = ice_vsi_stop_tx_ring(vsi, rst_src, 2270 rel_vmvf_num, 2271 rings[q_idx], &txq_meta); 2272 2273 if (status) 2274 return status; 2275 2276 q_idx++; 2277 } 2278 } 2279 2280 return 0; 2281 } 2282 2283 /** 2284 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2285 * @vsi: the VSI being configured 2286 * @rst_src: reset source 2287 * @rel_vmvf_num: Relative ID of VF/VM 2288 */ 2289 int 2290 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2291 u16 rel_vmvf_num) 2292 { 2293 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings); 2294 } 2295 2296 /** 2297 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI 2298 * @vsi: VSI to enable or disable VLAN pruning on 2299 * @ena: set to true to enable VLAN pruning and false to disable it 2300 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode 2301 * 2302 * returns 0 if VSI is updated, negative otherwise 2303 */ 2304 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) 2305 { 2306 struct ice_vsi_ctx *ctxt; 2307 struct device *dev; 2308 struct ice_pf *pf; 2309 int status; 2310 2311 if (!vsi) 2312 return -EINVAL; 2313 2314 pf = vsi->back; 2315 dev = &pf->pdev->dev; 2316 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 2317 if (!ctxt) 2318 return -ENOMEM; 2319 2320 ctxt->info = vsi->info; 2321 2322 if (ena) { 2323 ctxt->info.sec_flags |= 2324 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2325 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; 2326 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2327 } else { 2328 ctxt->info.sec_flags &= 2329 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2330 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 2331 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2332 } 2333 2334 if (!vlan_promisc) 2335 ctxt->info.valid_sections = 2336 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | 2337 ICE_AQ_VSI_PROP_SW_VALID); 2338 2339 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); 2340 if (status) { 2341 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", 2342 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, 2343 pf->hw.adminq.sq_last_status); 2344 goto err_out; 2345 } 2346 2347 vsi->info.sec_flags = ctxt->info.sec_flags; 2348 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 2349 2350 devm_kfree(dev, ctxt); 2351 return 0; 2352 2353 err_out: 2354 devm_kfree(dev, ctxt); 2355 return -EIO; 2356 } 2357 2358 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2359 { 2360 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; 2361 2362 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); 2363 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); 2364 } 2365 2366 /** 2367 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors 2368 * @vsi: VSI to set the q_vectors register index on 2369 */ 2370 static int 2371 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) 2372 { 2373 u16 i; 2374 2375 if (!vsi || !vsi->q_vectors) 2376 return -EINVAL; 2377 2378 ice_for_each_q_vector(vsi, i) { 2379 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2380 2381 if (!q_vector) { 2382 dev_err(&vsi->back->pdev->dev, 2383 "Failed to set reg_idx on q_vector %d VSI %d\n", 2384 i, vsi->vsi_num); 2385 goto clear_reg_idx; 2386 } 2387 2388 if (vsi->type == ICE_VSI_VF) { 2389 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; 2390 2391 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); 2392 } else { 2393 q_vector->reg_idx = 2394 q_vector->v_idx + vsi->base_vector; 2395 } 2396 } 2397 2398 return 0; 2399 2400 clear_reg_idx: 2401 ice_for_each_q_vector(vsi, i) { 2402 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2403 2404 if (q_vector) 2405 q_vector->reg_idx = 0; 2406 } 2407 2408 return -EINVAL; 2409 } 2410 2411 /** 2412 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule 2413 * @vsi: the VSI being configured 2414 * @add_rule: boolean value to add or remove ethertype filter rule 2415 */ 2416 static void 2417 ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) 2418 { 2419 struct ice_fltr_list_entry *list; 2420 struct ice_pf *pf = vsi->back; 2421 LIST_HEAD(tmp_add_list); 2422 enum ice_status status; 2423 2424 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 2425 if (!list) 2426 return; 2427 2428 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 2429 list->fltr_info.fltr_act = ICE_DROP_PACKET; 2430 list->fltr_info.flag = ICE_FLTR_TX; 2431 list->fltr_info.src_id = ICE_SRC_ID_VSI; 2432 list->fltr_info.vsi_handle = vsi->idx; 2433 list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype; 2434 2435 INIT_LIST_HEAD(&list->list_entry); 2436 list_add(&list->list_entry, &tmp_add_list); 2437 2438 if (add_rule) 2439 status = ice_add_eth_mac(&pf->hw, &tmp_add_list); 2440 else 2441 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); 2442 2443 if (status) 2444 dev_err(&pf->pdev->dev, 2445 "Failure Adding or Removing Ethertype on VSI %i error: %d\n", 2446 vsi->vsi_num, status); 2447 2448 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 2449 } 2450 2451 /** 2452 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2453 * @vsi: the VSI being configured 2454 * @tx: bool to determine Tx or Rx rule 2455 * @create: bool to determine create or remove Rule 2456 */ 2457 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2458 { 2459 struct ice_fltr_list_entry *list; 2460 struct ice_pf *pf = vsi->back; 2461 LIST_HEAD(tmp_add_list); 2462 enum ice_status status; 2463 2464 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 2465 if (!list) 2466 return; 2467 2468 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 2469 list->fltr_info.vsi_handle = vsi->idx; 2470 list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP; 2471 2472 if (tx) { 2473 list->fltr_info.fltr_act = ICE_DROP_PACKET; 2474 list->fltr_info.flag = ICE_FLTR_TX; 2475 list->fltr_info.src_id = ICE_SRC_ID_VSI; 2476 } else { 2477 list->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2478 list->fltr_info.flag = ICE_FLTR_RX; 2479 list->fltr_info.src_id = ICE_SRC_ID_LPORT; 2480 } 2481 2482 INIT_LIST_HEAD(&list->list_entry); 2483 list_add(&list->list_entry, &tmp_add_list); 2484 2485 if (create) 2486 status = ice_add_eth_mac(&pf->hw, &tmp_add_list); 2487 else 2488 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); 2489 2490 if (status) 2491 dev_err(&pf->pdev->dev, 2492 "Fail %s %s LLDP rule on VSI %i error: %d\n", 2493 create ? "adding" : "removing", tx ? "TX" : "RX", 2494 vsi->vsi_num, status); 2495 2496 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 2497 } 2498 2499 /** 2500 * ice_vsi_setup - Set up a VSI by a given type 2501 * @pf: board private structure 2502 * @pi: pointer to the port_info instance 2503 * @type: VSI type 2504 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be 2505 * used only for ICE_VSI_VF VSI type. For other VSI types, should 2506 * fill-in ICE_INVAL_VFID as input. 2507 * 2508 * This allocates the sw VSI structure and its queue resources. 2509 * 2510 * Returns pointer to the successfully allocated and configured VSI sw struct on 2511 * success, NULL on failure. 2512 */ 2513 struct ice_vsi * 2514 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 2515 enum ice_vsi_type type, u16 vf_id) 2516 { 2517 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2518 struct device *dev = &pf->pdev->dev; 2519 enum ice_status status; 2520 struct ice_vsi *vsi; 2521 int ret, i; 2522 2523 if (type == ICE_VSI_VF) 2524 vsi = ice_vsi_alloc(pf, type, vf_id); 2525 else 2526 vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); 2527 2528 if (!vsi) { 2529 dev_err(dev, "could not allocate VSI\n"); 2530 return NULL; 2531 } 2532 2533 vsi->port_info = pi; 2534 vsi->vsw = pf->first_sw; 2535 if (vsi->type == ICE_VSI_PF) 2536 vsi->ethtype = ETH_P_PAUSE; 2537 2538 if (vsi->type == ICE_VSI_VF) 2539 vsi->vf_id = vf_id; 2540 2541 if (ice_vsi_get_qs(vsi)) { 2542 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2543 vsi->idx); 2544 goto unroll_get_qs; 2545 } 2546 2547 /* set RSS capabilities */ 2548 ice_vsi_set_rss_params(vsi); 2549 2550 /* set TC configuration */ 2551 ice_vsi_set_tc_cfg(vsi); 2552 2553 /* create the VSI */ 2554 ret = ice_vsi_init(vsi); 2555 if (ret) 2556 goto unroll_get_qs; 2557 2558 switch (vsi->type) { 2559 case ICE_VSI_PF: 2560 ret = ice_vsi_alloc_q_vectors(vsi); 2561 if (ret) 2562 goto unroll_vsi_init; 2563 2564 ret = ice_vsi_setup_vector_base(vsi); 2565 if (ret) 2566 goto unroll_alloc_q_vector; 2567 2568 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2569 if (ret) 2570 goto unroll_vector_base; 2571 2572 ret = ice_vsi_alloc_rings(vsi); 2573 if (ret) 2574 goto unroll_vector_base; 2575 2576 ice_vsi_map_rings_to_vectors(vsi); 2577 2578 /* Do not exit if configuring RSS had an issue, at least 2579 * receive traffic on first queue. Hence no need to capture 2580 * return value 2581 */ 2582 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2583 ice_vsi_cfg_rss_lut_key(vsi); 2584 break; 2585 case ICE_VSI_VF: 2586 /* VF driver will take care of creating netdev for this type and 2587 * map queues to vectors through Virtchnl, PF driver only 2588 * creates a VSI and corresponding structures for bookkeeping 2589 * purpose 2590 */ 2591 ret = ice_vsi_alloc_q_vectors(vsi); 2592 if (ret) 2593 goto unroll_vsi_init; 2594 2595 ret = ice_vsi_alloc_rings(vsi); 2596 if (ret) 2597 goto unroll_alloc_q_vector; 2598 2599 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2600 if (ret) 2601 goto unroll_vector_base; 2602 2603 /* Do not exit if configuring RSS had an issue, at least 2604 * receive traffic on first queue. Hence no need to capture 2605 * return value 2606 */ 2607 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2608 ice_vsi_cfg_rss_lut_key(vsi); 2609 break; 2610 case ICE_VSI_LB: 2611 ret = ice_vsi_alloc_rings(vsi); 2612 if (ret) 2613 goto unroll_vsi_init; 2614 break; 2615 default: 2616 /* clean up the resources and exit */ 2617 goto unroll_vsi_init; 2618 } 2619 2620 /* configure VSI nodes based on number of queues and TC's */ 2621 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2622 max_txqs[i] = vsi->alloc_txq; 2623 2624 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2625 max_txqs); 2626 if (status) { 2627 dev_err(&pf->pdev->dev, 2628 "VSI %d failed lan queue config, error %d\n", 2629 vsi->vsi_num, status); 2630 goto unroll_vector_base; 2631 } 2632 2633 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2634 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2635 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2636 * The rule is added once for PF VSI in order to create appropriate 2637 * recipe, since VSI/VSI list is ignored with drop action... 2638 * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets 2639 * need to be dropped so that VFs cannot send LLDP packets to reconfig 2640 * DCB settings in the HW. Also, if the FW DCBX engine is not running 2641 * then Rx LLDP packets need to be redirected up the stack. 2642 */ 2643 if (!ice_is_safe_mode(pf)) { 2644 if (vsi->type == ICE_VSI_PF) { 2645 ice_vsi_add_rem_eth_mac(vsi, true); 2646 2647 /* Tx LLDP packets */ 2648 ice_cfg_sw_lldp(vsi, true, true); 2649 2650 /* Rx LLDP packets */ 2651 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 2652 ice_cfg_sw_lldp(vsi, false, true); 2653 } 2654 } 2655 2656 return vsi; 2657 2658 unroll_vector_base: 2659 /* reclaim SW interrupts back to the common pool */ 2660 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2661 pf->num_avail_sw_msix += vsi->num_q_vectors; 2662 unroll_alloc_q_vector: 2663 ice_vsi_free_q_vectors(vsi); 2664 unroll_vsi_init: 2665 ice_vsi_delete(vsi); 2666 unroll_get_qs: 2667 ice_vsi_put_qs(vsi); 2668 ice_vsi_clear(vsi); 2669 2670 return NULL; 2671 } 2672 2673 /** 2674 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2675 * @vsi: the VSI being cleaned up 2676 */ 2677 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2678 { 2679 struct ice_pf *pf = vsi->back; 2680 struct ice_hw *hw = &pf->hw; 2681 u32 txq = 0; 2682 u32 rxq = 0; 2683 int i, q; 2684 2685 for (i = 0; i < vsi->num_q_vectors; i++) { 2686 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2687 u16 reg_idx = q_vector->reg_idx; 2688 2689 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0); 2690 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0); 2691 for (q = 0; q < q_vector->num_ring_tx; q++) { 2692 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2693 txq++; 2694 } 2695 2696 for (q = 0; q < q_vector->num_ring_rx; q++) { 2697 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2698 rxq++; 2699 } 2700 } 2701 2702 ice_flush(hw); 2703 } 2704 2705 /** 2706 * ice_vsi_free_irq - Free the IRQ association with the OS 2707 * @vsi: the VSI being configured 2708 */ 2709 void ice_vsi_free_irq(struct ice_vsi *vsi) 2710 { 2711 struct ice_pf *pf = vsi->back; 2712 int base = vsi->base_vector; 2713 int i; 2714 2715 if (!vsi->q_vectors || !vsi->irqs_ready) 2716 return; 2717 2718 ice_vsi_release_msix(vsi); 2719 if (vsi->type == ICE_VSI_VF) 2720 return; 2721 2722 vsi->irqs_ready = false; 2723 ice_for_each_q_vector(vsi, i) { 2724 u16 vector = i + base; 2725 int irq_num; 2726 2727 irq_num = pf->msix_entries[vector].vector; 2728 2729 /* free only the irqs that were actually requested */ 2730 if (!vsi->q_vectors[i] || 2731 !(vsi->q_vectors[i]->num_ring_tx || 2732 vsi->q_vectors[i]->num_ring_rx)) 2733 continue; 2734 2735 /* clear the affinity notifier in the IRQ descriptor */ 2736 irq_set_affinity_notifier(irq_num, NULL); 2737 2738 /* clear the affinity_mask in the IRQ descriptor */ 2739 irq_set_affinity_hint(irq_num, NULL); 2740 synchronize_irq(irq_num); 2741 devm_free_irq(&pf->pdev->dev, irq_num, 2742 vsi->q_vectors[i]); 2743 } 2744 } 2745 2746 /** 2747 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2748 * @vsi: the VSI having resources freed 2749 */ 2750 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2751 { 2752 int i; 2753 2754 if (!vsi->tx_rings) 2755 return; 2756 2757 ice_for_each_txq(vsi, i) 2758 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2759 ice_free_tx_ring(vsi->tx_rings[i]); 2760 } 2761 2762 /** 2763 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2764 * @vsi: the VSI having resources freed 2765 */ 2766 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2767 { 2768 int i; 2769 2770 if (!vsi->rx_rings) 2771 return; 2772 2773 ice_for_each_rxq(vsi, i) 2774 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2775 ice_free_rx_ring(vsi->rx_rings[i]); 2776 } 2777 2778 /** 2779 * ice_vsi_close - Shut down a VSI 2780 * @vsi: the VSI being shut down 2781 */ 2782 void ice_vsi_close(struct ice_vsi *vsi) 2783 { 2784 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) 2785 ice_down(vsi); 2786 2787 ice_vsi_free_irq(vsi); 2788 ice_vsi_free_tx_rings(vsi); 2789 ice_vsi_free_rx_rings(vsi); 2790 } 2791 2792 /** 2793 * ice_free_res - free a block of resources 2794 * @res: pointer to the resource 2795 * @index: starting index previously returned by ice_get_res 2796 * @id: identifier to track owner 2797 * 2798 * Returns number of resources freed 2799 */ 2800 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) 2801 { 2802 int count = 0; 2803 int i; 2804 2805 if (!res || index >= res->end) 2806 return -EINVAL; 2807 2808 id |= ICE_RES_VALID_BIT; 2809 for (i = index; i < res->end && res->list[i] == id; i++) { 2810 res->list[i] = 0; 2811 count++; 2812 } 2813 2814 return count; 2815 } 2816 2817 /** 2818 * ice_search_res - Search the tracker for a block of resources 2819 * @res: pointer to the resource 2820 * @needed: size of the block needed 2821 * @id: identifier to track owner 2822 * 2823 * Returns the base item index of the block, or -ENOMEM for error 2824 */ 2825 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) 2826 { 2827 int start = 0, end = 0; 2828 2829 if (needed > res->end) 2830 return -ENOMEM; 2831 2832 id |= ICE_RES_VALID_BIT; 2833 2834 do { 2835 /* skip already allocated entries */ 2836 if (res->list[end++] & ICE_RES_VALID_BIT) { 2837 start = end; 2838 if ((start + needed) > res->end) 2839 break; 2840 } 2841 2842 if (end == (start + needed)) { 2843 int i = start; 2844 2845 /* there was enough, so assign it to the requestor */ 2846 while (i != end) 2847 res->list[i++] = id; 2848 2849 return start; 2850 } 2851 } while (end < res->end); 2852 2853 return -ENOMEM; 2854 } 2855 2856 /** 2857 * ice_get_res - get a block of resources 2858 * @pf: board private structure 2859 * @res: pointer to the resource 2860 * @needed: size of the block needed 2861 * @id: identifier to track owner 2862 * 2863 * Returns the base item index of the block, or negative for error 2864 */ 2865 int 2866 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) 2867 { 2868 if (!res || !pf) 2869 return -EINVAL; 2870 2871 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { 2872 dev_err(&pf->pdev->dev, 2873 "param err: needed=%d, num_entries = %d id=0x%04x\n", 2874 needed, res->num_entries, id); 2875 return -EINVAL; 2876 } 2877 2878 return ice_search_res(res, needed, id); 2879 } 2880 2881 /** 2882 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2883 * @vsi: the VSI being un-configured 2884 */ 2885 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2886 { 2887 int base = vsi->base_vector; 2888 struct ice_pf *pf = vsi->back; 2889 struct ice_hw *hw = &pf->hw; 2890 u32 val; 2891 int i; 2892 2893 /* disable interrupt causation from each queue */ 2894 if (vsi->tx_rings) { 2895 ice_for_each_txq(vsi, i) { 2896 if (vsi->tx_rings[i]) { 2897 u16 reg; 2898 2899 reg = vsi->tx_rings[i]->reg_idx; 2900 val = rd32(hw, QINT_TQCTL(reg)); 2901 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2902 wr32(hw, QINT_TQCTL(reg), val); 2903 } 2904 } 2905 } 2906 2907 if (vsi->rx_rings) { 2908 ice_for_each_rxq(vsi, i) { 2909 if (vsi->rx_rings[i]) { 2910 u16 reg; 2911 2912 reg = vsi->rx_rings[i]->reg_idx; 2913 val = rd32(hw, QINT_RQCTL(reg)); 2914 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2915 wr32(hw, QINT_RQCTL(reg), val); 2916 } 2917 } 2918 } 2919 2920 /* disable each interrupt */ 2921 ice_for_each_q_vector(vsi, i) { 2922 if (!vsi->q_vectors[i]) 2923 continue; 2924 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 2925 } 2926 2927 ice_flush(hw); 2928 2929 /* don't call synchronize_irq() for VF's from the host */ 2930 if (vsi->type == ICE_VSI_VF) 2931 return; 2932 2933 ice_for_each_q_vector(vsi, i) 2934 synchronize_irq(pf->msix_entries[i + base].vector); 2935 } 2936 2937 /** 2938 * ice_napi_del - Remove NAPI handler for the VSI 2939 * @vsi: VSI for which NAPI handler is to be removed 2940 */ 2941 void ice_napi_del(struct ice_vsi *vsi) 2942 { 2943 int v_idx; 2944 2945 if (!vsi->netdev) 2946 return; 2947 2948 ice_for_each_q_vector(vsi, v_idx) 2949 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 2950 } 2951 2952 /** 2953 * ice_vsi_release - Delete a VSI and free its resources 2954 * @vsi: the VSI being removed 2955 * 2956 * Returns 0 on success or < 0 on error 2957 */ 2958 int ice_vsi_release(struct ice_vsi *vsi) 2959 { 2960 struct ice_pf *pf; 2961 2962 if (!vsi->back) 2963 return -ENODEV; 2964 pf = vsi->back; 2965 2966 /* do not unregister while driver is in the reset recovery pending 2967 * state. Since reset/rebuild happens through PF service task workqueue, 2968 * it's not a good idea to unregister netdev that is associated to the 2969 * PF that is running the work queue items currently. This is done to 2970 * avoid check_flush_dependency() warning on this wq 2971 */ 2972 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) 2973 unregister_netdev(vsi->netdev); 2974 2975 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2976 ice_rss_clean(vsi); 2977 2978 /* Disable VSI and free resources */ 2979 if (vsi->type != ICE_VSI_LB) 2980 ice_vsi_dis_irq(vsi); 2981 ice_vsi_close(vsi); 2982 2983 /* SR-IOV determines needed MSIX resources all at once instead of per 2984 * VSI since when VFs are spawned we know how many VFs there are and how 2985 * many interrupts each VF needs. SR-IOV MSIX resources are also 2986 * cleared in the same manner. 2987 */ 2988 if (vsi->type != ICE_VSI_VF) { 2989 /* reclaim SW interrupts back to the common pool */ 2990 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2991 pf->num_avail_sw_msix += vsi->num_q_vectors; 2992 } 2993 2994 if (!ice_is_safe_mode(pf)) { 2995 if (vsi->type == ICE_VSI_PF) { 2996 ice_vsi_add_rem_eth_mac(vsi, false); 2997 ice_cfg_sw_lldp(vsi, true, false); 2998 /* The Rx rule will only exist to remove if the LLDP FW 2999 * engine is currently stopped 3000 */ 3001 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 3002 ice_cfg_sw_lldp(vsi, false, false); 3003 } 3004 } 3005 3006 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 3007 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 3008 ice_vsi_delete(vsi); 3009 ice_vsi_free_q_vectors(vsi); 3010 3011 /* make sure unregister_netdev() was called by checking __ICE_DOWN */ 3012 if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) { 3013 free_netdev(vsi->netdev); 3014 vsi->netdev = NULL; 3015 } 3016 3017 ice_vsi_clear_rings(vsi); 3018 3019 ice_vsi_put_qs(vsi); 3020 3021 /* retain SW VSI data structure since it is needed to unregister and 3022 * free VSI netdev when PF is not in reset recovery pending state,\ 3023 * for ex: during rmmod. 3024 */ 3025 if (!ice_is_reset_in_progress(pf->state)) 3026 ice_vsi_clear(vsi); 3027 3028 return 0; 3029 } 3030 3031 /** 3032 * ice_vsi_rebuild - Rebuild VSI after reset 3033 * @vsi: VSI to be rebuild 3034 * 3035 * Returns 0 on success and negative value on failure 3036 */ 3037 int ice_vsi_rebuild(struct ice_vsi *vsi) 3038 { 3039 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3040 struct ice_vf *vf = NULL; 3041 enum ice_status status; 3042 struct ice_pf *pf; 3043 int ret, i; 3044 3045 if (!vsi) 3046 return -EINVAL; 3047 3048 pf = vsi->back; 3049 if (vsi->type == ICE_VSI_VF) 3050 vf = &pf->vf[vsi->vf_id]; 3051 3052 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 3053 ice_vsi_free_q_vectors(vsi); 3054 3055 /* SR-IOV determines needed MSIX resources all at once instead of per 3056 * VSI since when VFs are spawned we know how many VFs there are and how 3057 * many interrupts each VF needs. SR-IOV MSIX resources are also 3058 * cleared in the same manner. 3059 */ 3060 if (vsi->type != ICE_VSI_VF) { 3061 /* reclaim SW interrupts back to the common pool */ 3062 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 3063 pf->num_avail_sw_msix += vsi->num_q_vectors; 3064 vsi->base_vector = 0; 3065 } 3066 3067 ice_vsi_put_qs(vsi); 3068 ice_vsi_clear_rings(vsi); 3069 ice_vsi_free_arrays(vsi); 3070 ice_dev_onetime_setup(&pf->hw); 3071 if (vsi->type == ICE_VSI_VF) 3072 ice_vsi_set_num_qs(vsi, vf->vf_id); 3073 else 3074 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 3075 3076 ret = ice_vsi_alloc_arrays(vsi); 3077 if (ret < 0) 3078 goto err_vsi; 3079 3080 ice_vsi_get_qs(vsi); 3081 ice_vsi_set_tc_cfg(vsi); 3082 3083 /* Initialize VSI struct elements and create VSI in FW */ 3084 ret = ice_vsi_init(vsi); 3085 if (ret < 0) 3086 goto err_vsi; 3087 3088 3089 switch (vsi->type) { 3090 case ICE_VSI_PF: 3091 ret = ice_vsi_alloc_q_vectors(vsi); 3092 if (ret) 3093 goto err_rings; 3094 3095 ret = ice_vsi_setup_vector_base(vsi); 3096 if (ret) 3097 goto err_vectors; 3098 3099 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 3100 if (ret) 3101 goto err_vectors; 3102 3103 ret = ice_vsi_alloc_rings(vsi); 3104 if (ret) 3105 goto err_vectors; 3106 3107 ice_vsi_map_rings_to_vectors(vsi); 3108 /* Do not exit if configuring RSS had an issue, at least 3109 * receive traffic on first queue. Hence no need to capture 3110 * return value 3111 */ 3112 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 3113 ice_vsi_cfg_rss_lut_key(vsi); 3114 break; 3115 case ICE_VSI_VF: 3116 ret = ice_vsi_alloc_q_vectors(vsi); 3117 if (ret) 3118 goto err_rings; 3119 3120 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 3121 if (ret) 3122 goto err_vectors; 3123 3124 ret = ice_vsi_alloc_rings(vsi); 3125 if (ret) 3126 goto err_vectors; 3127 3128 break; 3129 default: 3130 break; 3131 } 3132 3133 /* configure VSI nodes based on number of queues and TC's */ 3134 for (i = 0; i < vsi->tc_cfg.numtc; i++) 3135 max_txqs[i] = vsi->alloc_txq; 3136 3137 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3138 max_txqs); 3139 if (status) { 3140 dev_err(&pf->pdev->dev, 3141 "VSI %d failed lan queue config, error %d\n", 3142 vsi->vsi_num, status); 3143 goto err_vectors; 3144 } 3145 return 0; 3146 3147 err_vectors: 3148 ice_vsi_free_q_vectors(vsi); 3149 err_rings: 3150 if (vsi->netdev) { 3151 vsi->current_netdev_flags = 0; 3152 unregister_netdev(vsi->netdev); 3153 free_netdev(vsi->netdev); 3154 vsi->netdev = NULL; 3155 } 3156 err_vsi: 3157 ice_vsi_clear(vsi); 3158 set_bit(__ICE_RESET_FAILED, pf->state); 3159 return ret; 3160 } 3161 3162 /** 3163 * ice_is_reset_in_progress - check for a reset in progress 3164 * @state: PF state field 3165 */ 3166 bool ice_is_reset_in_progress(unsigned long *state) 3167 { 3168 return test_bit(__ICE_RESET_OICR_RECV, state) || 3169 test_bit(__ICE_PFR_REQ, state) || 3170 test_bit(__ICE_CORER_REQ, state) || 3171 test_bit(__ICE_GLOBR_REQ, state); 3172 } 3173 3174 #ifdef CONFIG_DCB 3175 /** 3176 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3177 * @vsi: VSI being configured 3178 * @ctx: the context buffer returned from AQ VSI update command 3179 */ 3180 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3181 { 3182 vsi->info.mapping_flags = ctx->info.mapping_flags; 3183 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3184 sizeof(vsi->info.q_mapping)); 3185 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3186 sizeof(vsi->info.tc_mapping)); 3187 } 3188 3189 /** 3190 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3191 * @vsi: VSI to be configured 3192 * @ena_tc: TC bitmap 3193 * 3194 * VSI queues expected to be quiesced before calling this function 3195 */ 3196 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3197 { 3198 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3199 struct ice_vsi_ctx *ctx; 3200 struct ice_pf *pf = vsi->back; 3201 enum ice_status status; 3202 int i, ret = 0; 3203 u8 num_tc = 0; 3204 3205 ice_for_each_traffic_class(i) { 3206 /* build bitmap of enabled TCs */ 3207 if (ena_tc & BIT(i)) 3208 num_tc++; 3209 /* populate max_txqs per TC */ 3210 max_txqs[i] = vsi->alloc_txq; 3211 } 3212 3213 vsi->tc_cfg.ena_tc = ena_tc; 3214 vsi->tc_cfg.numtc = num_tc; 3215 3216 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); 3217 if (!ctx) 3218 return -ENOMEM; 3219 3220 ctx->vf_num = 0; 3221 ctx->info = vsi->info; 3222 3223 ice_vsi_setup_q_map(vsi, ctx); 3224 3225 /* must to indicate which section of VSI context are being modified */ 3226 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3227 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3228 if (status) { 3229 dev_info(&pf->pdev->dev, "Failed VSI Update\n"); 3230 ret = -EIO; 3231 goto out; 3232 } 3233 3234 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3235 max_txqs); 3236 3237 if (status) { 3238 dev_err(&pf->pdev->dev, 3239 "VSI %d failed TC config, error %d\n", 3240 vsi->vsi_num, status); 3241 ret = -EIO; 3242 goto out; 3243 } 3244 ice_vsi_update_q_map(vsi, ctx); 3245 vsi->info.valid_sections = 0; 3246 3247 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3248 out: 3249 devm_kfree(&pf->pdev->dev, ctx); 3250 return ret; 3251 } 3252 #endif /* CONFIG_DCB */ 3253 3254 /** 3255 * ice_nvm_version_str - format the NVM version strings 3256 * @hw: ptr to the hardware info 3257 */ 3258 char *ice_nvm_version_str(struct ice_hw *hw) 3259 { 3260 u8 oem_ver, oem_patch, ver_hi, ver_lo; 3261 static char buf[ICE_NVM_VER_LEN]; 3262 u16 oem_build; 3263 3264 ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi, 3265 &ver_lo); 3266 3267 snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo, 3268 hw->nvm.eetrack, oem_ver, oem_build, oem_patch); 3269 3270 return buf; 3271 } 3272 3273 /** 3274 * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI 3275 * @vsi: the VSI being configured MAC filter 3276 * @macaddr: the MAC address to be added. 3277 * @set: Add or delete a MAC filter 3278 * 3279 * Adds or removes MAC address filter entry for VF VSI 3280 */ 3281 enum ice_status 3282 ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set) 3283 { 3284 LIST_HEAD(tmp_add_list); 3285 enum ice_status status; 3286 3287 /* Update MAC filter list to be added or removed for a VSI */ 3288 if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) { 3289 status = ICE_ERR_NO_MEMORY; 3290 goto cfg_mac_fltr_exit; 3291 } 3292 3293 if (set) 3294 status = ice_add_mac(&vsi->back->hw, &tmp_add_list); 3295 else 3296 status = ice_remove_mac(&vsi->back->hw, &tmp_add_list); 3297 3298 cfg_mac_fltr_exit: 3299 ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list); 3300 return status; 3301 } 3302