1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_dcb_lib.h" 7 8 /** 9 * ice_setup_rx_ctx - Configure a receive ring context 10 * @ring: The Rx ring to configure 11 * 12 * Configure the Rx descriptor ring in RLAN context. 13 */ 14 static int ice_setup_rx_ctx(struct ice_ring *ring) 15 { 16 struct ice_vsi *vsi = ring->vsi; 17 struct ice_hw *hw = &vsi->back->hw; 18 u32 rxdid = ICE_RXDID_FLEX_NIC; 19 struct ice_rlan_ctx rlan_ctx; 20 u32 regval; 21 u16 pf_q; 22 int err; 23 24 /* what is Rx queue number in global space of 2K Rx queues */ 25 pf_q = vsi->rxq_map[ring->q_index]; 26 27 /* clear the context structure first */ 28 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 29 30 rlan_ctx.base = ring->dma >> 7; 31 32 rlan_ctx.qlen = ring->count; 33 34 /* Receive Packet Data Buffer Size. 35 * The Packet Data Buffer Size is defined in 128 byte units. 36 */ 37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 38 39 /* use 32 byte descriptors */ 40 rlan_ctx.dsize = 1; 41 42 /* Strip the Ethernet CRC bytes before the packet is posted to host 43 * memory. 44 */ 45 rlan_ctx.crcstrip = 1; 46 47 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 48 rlan_ctx.l2tsel = 1; 49 50 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 51 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 52 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 53 54 /* This controls whether VLAN is stripped from inner headers 55 * The VLAN in the inner L2 header is stripped to the receive 56 * descriptor if enabled by this flag. 57 */ 58 rlan_ctx.showiv = 0; 59 60 /* Max packet size for this queue - must not be set to a larger value 61 * than 5 x DBUF 62 */ 63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame, 64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); 65 66 /* Rx queue threshold in units of 64 */ 67 rlan_ctx.lrxqthresh = 1; 68 69 /* Enable Flexible Descriptors in the queue context which 70 * allows this driver to select a specific receive descriptor format 71 */ 72 if (vsi->type != ICE_VSI_VF) { 73 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 74 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 75 QRXFLXP_CNTXT_RXDID_IDX_M; 76 77 /* increasing context priority to pick up profile ID; 78 * default is 0x01; setting to 0x03 to ensure profile 79 * is programming if prev context is of same priority 80 */ 81 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 82 QRXFLXP_CNTXT_RXDID_PRIO_M; 83 84 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 85 } 86 87 /* Absolute queue number out of 2K needs to be passed */ 88 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 89 if (err) { 90 dev_err(&vsi->back->pdev->dev, 91 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 92 pf_q, err); 93 return -EIO; 94 } 95 96 if (vsi->type == ICE_VSI_VF) 97 return 0; 98 99 /* init queue specific tail register */ 100 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 101 writel(0, ring->tail); 102 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); 103 104 return 0; 105 } 106 107 /** 108 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 109 * @ring: The Tx ring to configure 110 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 111 * @pf_q: queue index in the PF space 112 * 113 * Configure the Tx descriptor ring in TLAN context. 114 */ 115 static void 116 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 117 { 118 struct ice_vsi *vsi = ring->vsi; 119 struct ice_hw *hw = &vsi->back->hw; 120 121 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 122 123 tlan_ctx->port_num = vsi->port_info->lport; 124 125 /* Transmit Queue Length */ 126 tlan_ctx->qlen = ring->count; 127 128 ice_set_cgd_num(tlan_ctx, ring); 129 130 /* PF number */ 131 tlan_ctx->pf_num = hw->pf_id; 132 133 /* queue belongs to a specific VSI type 134 * VF / VM index should be programmed per vmvf_type setting: 135 * for vmvf_type = VF, it is VF number between 0-256 136 * for vmvf_type = VM, it is VM number between 0-767 137 * for PF or EMP this field should be set to zero 138 */ 139 switch (vsi->type) { 140 case ICE_VSI_PF: 141 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 142 break; 143 case ICE_VSI_VF: 144 /* Firmware expects vmvf_num to be absolute VF ID */ 145 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 146 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 147 break; 148 default: 149 return; 150 } 151 152 /* make sure the context is associated with the right VSI */ 153 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 154 155 tlan_ctx->tso_ena = ICE_TX_LEGACY; 156 tlan_ctx->tso_qnum = pf_q; 157 158 /* Legacy or Advanced Host Interface: 159 * 0: Advanced Host Interface 160 * 1: Legacy Host Interface 161 */ 162 tlan_ctx->legacy_int = ICE_TX_LEGACY; 163 } 164 165 /** 166 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 167 * @pf: the PF being configured 168 * @pf_q: the PF queue 169 * @ena: enable or disable state of the queue 170 * 171 * This routine will wait for the given Rx queue of the PF to reach the 172 * enabled or disabled state. 173 * Returns -ETIMEDOUT in case of failing to reach the requested state after 174 * multiple retries; else will return 0 in case of success. 175 */ 176 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 177 { 178 int i; 179 180 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 181 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 182 QRX_CTRL_QENA_STAT_M)) 183 return 0; 184 185 usleep_range(20, 40); 186 } 187 188 return -ETIMEDOUT; 189 } 190 191 /** 192 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings 193 * @vsi: the VSI being configured 194 * @ena: start or stop the Rx rings 195 */ 196 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) 197 { 198 struct ice_pf *pf = vsi->back; 199 struct ice_hw *hw = &pf->hw; 200 int i, j, ret = 0; 201 202 for (i = 0; i < vsi->num_rxq; i++) { 203 int pf_q = vsi->rxq_map[i]; 204 u32 rx_reg; 205 206 for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { 207 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 208 if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == 209 ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) 210 break; 211 usleep_range(1000, 2000); 212 } 213 214 /* Skip if the queue is already in the requested state */ 215 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 216 continue; 217 218 /* turn on/off the queue */ 219 if (ena) 220 rx_reg |= QRX_CTRL_QENA_REQ_M; 221 else 222 rx_reg &= ~QRX_CTRL_QENA_REQ_M; 223 wr32(hw, QRX_CTRL(pf_q), rx_reg); 224 225 /* wait for the change to finish */ 226 ret = ice_pf_rxq_wait(pf, pf_q, ena); 227 if (ret) { 228 dev_err(&pf->pdev->dev, 229 "VSI idx %d Rx ring %d %sable timeout\n", 230 vsi->idx, pf_q, (ena ? "en" : "dis")); 231 break; 232 } 233 } 234 235 return ret; 236 } 237 238 /** 239 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 240 * @vsi: VSI pointer 241 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 242 * 243 * On error: returns error code (negative) 244 * On success: returns 0 245 */ 246 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) 247 { 248 struct ice_pf *pf = vsi->back; 249 250 /* allocate memory for both Tx and Rx ring pointers */ 251 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 252 sizeof(*vsi->tx_rings), GFP_KERNEL); 253 if (!vsi->tx_rings) 254 goto err_txrings; 255 256 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 257 sizeof(*vsi->rx_rings), GFP_KERNEL); 258 if (!vsi->rx_rings) 259 goto err_rxrings; 260 261 if (alloc_qvectors) { 262 /* allocate memory for q_vector pointers */ 263 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, 264 vsi->num_q_vectors, 265 sizeof(*vsi->q_vectors), 266 GFP_KERNEL); 267 if (!vsi->q_vectors) 268 goto err_vectors; 269 } 270 271 return 0; 272 273 err_vectors: 274 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 275 err_rxrings: 276 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 277 err_txrings: 278 return -ENOMEM; 279 } 280 281 /** 282 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 283 * @vsi: the VSI being configured 284 */ 285 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 286 { 287 switch (vsi->type) { 288 case ICE_VSI_PF: 289 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 290 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 291 break; 292 default: 293 dev_dbg(&vsi->back->pdev->dev, 294 "Not setting number of Tx/Rx descriptors for VSI type %d\n", 295 vsi->type); 296 break; 297 } 298 } 299 300 /** 301 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 302 * @vsi: the VSI being configured 303 * @vf_id: ID of the VF being configured 304 * 305 * Return 0 on success and a negative value on error 306 */ 307 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) 308 { 309 struct ice_pf *pf = vsi->back; 310 311 struct ice_vf *vf = NULL; 312 313 if (vsi->type == ICE_VSI_VF) 314 vsi->vf_id = vf_id; 315 316 switch (vsi->type) { 317 case ICE_VSI_PF: 318 vsi->alloc_txq = pf->num_lan_tx; 319 vsi->alloc_rxq = pf->num_lan_rx; 320 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); 321 break; 322 case ICE_VSI_VF: 323 vf = &pf->vf[vsi->vf_id]; 324 vsi->alloc_txq = vf->num_vf_qs; 325 vsi->alloc_rxq = vf->num_vf_qs; 326 /* pf->num_vf_msix includes (VF miscellaneous vector + 327 * data queue interrupts). Since vsi->num_q_vectors is number 328 * of queues vectors, subtract 1 from the original vector 329 * count 330 */ 331 vsi->num_q_vectors = pf->num_vf_msix - 1; 332 break; 333 default: 334 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", 335 vsi->type); 336 break; 337 } 338 339 ice_vsi_set_num_desc(vsi); 340 } 341 342 /** 343 * ice_get_free_slot - get the next non-NULL location index in array 344 * @array: array to search 345 * @size: size of the array 346 * @curr: last known occupied index to be used as a search hint 347 * 348 * void * is being used to keep the functionality generic. This lets us use this 349 * function on any array of pointers. 350 */ 351 static int ice_get_free_slot(void *array, int size, int curr) 352 { 353 int **tmp_array = (int **)array; 354 int next; 355 356 if (curr < (size - 1) && !tmp_array[curr + 1]) { 357 next = curr + 1; 358 } else { 359 int i = 0; 360 361 while ((i < size) && (tmp_array[i])) 362 i++; 363 if (i == size) 364 next = ICE_NO_VSI; 365 else 366 next = i; 367 } 368 return next; 369 } 370 371 /** 372 * ice_vsi_delete - delete a VSI from the switch 373 * @vsi: pointer to VSI being removed 374 */ 375 void ice_vsi_delete(struct ice_vsi *vsi) 376 { 377 struct ice_pf *pf = vsi->back; 378 struct ice_vsi_ctx *ctxt; 379 enum ice_status status; 380 381 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 382 if (!ctxt) 383 return; 384 385 if (vsi->type == ICE_VSI_VF) 386 ctxt->vf_num = vsi->vf_id; 387 ctxt->vsi_num = vsi->vsi_num; 388 389 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 390 391 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 392 if (status) 393 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", 394 vsi->vsi_num); 395 396 devm_kfree(&pf->pdev->dev, ctxt); 397 } 398 399 /** 400 * ice_vsi_free_arrays - clean up VSI resources 401 * @vsi: pointer to VSI being cleared 402 * @free_qvectors: bool to specify if q_vectors should be deallocated 403 */ 404 static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) 405 { 406 struct ice_pf *pf = vsi->back; 407 408 /* free the ring and vector containers */ 409 if (free_qvectors && vsi->q_vectors) { 410 devm_kfree(&pf->pdev->dev, vsi->q_vectors); 411 vsi->q_vectors = NULL; 412 } 413 if (vsi->tx_rings) { 414 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 415 vsi->tx_rings = NULL; 416 } 417 if (vsi->rx_rings) { 418 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 419 vsi->rx_rings = NULL; 420 } 421 } 422 423 /** 424 * ice_vsi_clear - clean up and deallocate the provided VSI 425 * @vsi: pointer to VSI being cleared 426 * 427 * This deallocates the VSI's queue resources, removes it from the PF's 428 * VSI array if necessary, and deallocates the VSI 429 * 430 * Returns 0 on success, negative on failure 431 */ 432 int ice_vsi_clear(struct ice_vsi *vsi) 433 { 434 struct ice_pf *pf = NULL; 435 436 if (!vsi) 437 return 0; 438 439 if (!vsi->back) 440 return -EINVAL; 441 442 pf = vsi->back; 443 444 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 445 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", 446 vsi->idx); 447 return -EINVAL; 448 } 449 450 mutex_lock(&pf->sw_mutex); 451 /* updates the PF for this cleared VSI */ 452 453 pf->vsi[vsi->idx] = NULL; 454 if (vsi->idx < pf->next_vsi) 455 pf->next_vsi = vsi->idx; 456 457 ice_vsi_free_arrays(vsi, true); 458 mutex_unlock(&pf->sw_mutex); 459 devm_kfree(&pf->pdev->dev, vsi); 460 461 return 0; 462 } 463 464 /** 465 * ice_msix_clean_rings - MSIX mode Interrupt Handler 466 * @irq: interrupt number 467 * @data: pointer to a q_vector 468 */ 469 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 470 { 471 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 472 473 if (!q_vector->tx.ring && !q_vector->rx.ring) 474 return IRQ_HANDLED; 475 476 napi_schedule(&q_vector->napi); 477 478 return IRQ_HANDLED; 479 } 480 481 /** 482 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 483 * @pf: board private structure 484 * @type: type of VSI 485 * @vf_id: ID of the VF being configured 486 * 487 * returns a pointer to a VSI on success, NULL on failure. 488 */ 489 static struct ice_vsi * 490 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) 491 { 492 struct ice_vsi *vsi = NULL; 493 494 /* Need to protect the allocation of the VSIs at the PF level */ 495 mutex_lock(&pf->sw_mutex); 496 497 /* If we have already allocated our maximum number of VSIs, 498 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 499 * is available to be populated 500 */ 501 if (pf->next_vsi == ICE_NO_VSI) { 502 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); 503 goto unlock_pf; 504 } 505 506 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); 507 if (!vsi) 508 goto unlock_pf; 509 510 vsi->type = type; 511 vsi->back = pf; 512 set_bit(__ICE_DOWN, vsi->state); 513 vsi->idx = pf->next_vsi; 514 vsi->work_lmt = ICE_DFLT_IRQ_WORK; 515 516 if (type == ICE_VSI_VF) 517 ice_vsi_set_num_qs(vsi, vf_id); 518 else 519 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 520 521 switch (vsi->type) { 522 case ICE_VSI_PF: 523 if (ice_vsi_alloc_arrays(vsi, true)) 524 goto err_rings; 525 526 /* Setup default MSIX irq handler for VSI */ 527 vsi->irq_handler = ice_msix_clean_rings; 528 break; 529 case ICE_VSI_VF: 530 if (ice_vsi_alloc_arrays(vsi, true)) 531 goto err_rings; 532 break; 533 default: 534 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 535 goto unlock_pf; 536 } 537 538 /* fill VSI slot in the PF struct */ 539 pf->vsi[pf->next_vsi] = vsi; 540 541 /* prepare pf->next_vsi for next use */ 542 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 543 pf->next_vsi); 544 goto unlock_pf; 545 546 err_rings: 547 devm_kfree(&pf->pdev->dev, vsi); 548 vsi = NULL; 549 unlock_pf: 550 mutex_unlock(&pf->sw_mutex); 551 return vsi; 552 } 553 554 /** 555 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 556 * @qs_cfg: gathered variables needed for PF->VSI queues assignment 557 * 558 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 559 */ 560 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 561 { 562 int offset, i; 563 564 mutex_lock(qs_cfg->qs_mutex); 565 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 566 0, qs_cfg->q_count, 0); 567 if (offset >= qs_cfg->pf_map_size) { 568 mutex_unlock(qs_cfg->qs_mutex); 569 return -ENOMEM; 570 } 571 572 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 573 for (i = 0; i < qs_cfg->q_count; i++) 574 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; 575 mutex_unlock(qs_cfg->qs_mutex); 576 577 return 0; 578 } 579 580 /** 581 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 582 * @qs_cfg: gathered variables needed for PF->VSI queues assignment 583 * 584 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 585 */ 586 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 587 { 588 int i, index = 0; 589 590 mutex_lock(qs_cfg->qs_mutex); 591 for (i = 0; i < qs_cfg->q_count; i++) { 592 index = find_next_zero_bit(qs_cfg->pf_map, 593 qs_cfg->pf_map_size, index); 594 if (index >= qs_cfg->pf_map_size) 595 goto err_scatter; 596 set_bit(index, qs_cfg->pf_map); 597 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; 598 } 599 mutex_unlock(qs_cfg->qs_mutex); 600 601 return 0; 602 err_scatter: 603 for (index = 0; index < i; index++) { 604 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 605 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 606 } 607 mutex_unlock(qs_cfg->qs_mutex); 608 609 return -ENOMEM; 610 } 611 612 /** 613 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 614 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 615 * 616 * This function first tries to find contiguous space. If it is not successful, 617 * it tries with the scatter approach. 618 * 619 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 620 */ 621 static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 622 { 623 int ret = 0; 624 625 ret = __ice_vsi_get_qs_contig(qs_cfg); 626 if (ret) { 627 /* contig failed, so try with scatter approach */ 628 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 629 qs_cfg->q_count = min_t(u16, qs_cfg->q_count, 630 qs_cfg->scatter_count); 631 ret = __ice_vsi_get_qs_sc(qs_cfg); 632 } 633 return ret; 634 } 635 636 /** 637 * ice_vsi_get_qs - Assign queues from PF to VSI 638 * @vsi: the VSI to assign queues to 639 * 640 * Returns 0 on success and a negative value on error 641 */ 642 static int ice_vsi_get_qs(struct ice_vsi *vsi) 643 { 644 struct ice_pf *pf = vsi->back; 645 struct ice_qs_cfg tx_qs_cfg = { 646 .qs_mutex = &pf->avail_q_mutex, 647 .pf_map = pf->avail_txqs, 648 .pf_map_size = ICE_MAX_TXQS, 649 .q_count = vsi->alloc_txq, 650 .scatter_count = ICE_MAX_SCATTER_TXQS, 651 .vsi_map = vsi->txq_map, 652 .vsi_map_offset = 0, 653 .mapping_mode = vsi->tx_mapping_mode 654 }; 655 struct ice_qs_cfg rx_qs_cfg = { 656 .qs_mutex = &pf->avail_q_mutex, 657 .pf_map = pf->avail_rxqs, 658 .pf_map_size = ICE_MAX_RXQS, 659 .q_count = vsi->alloc_rxq, 660 .scatter_count = ICE_MAX_SCATTER_RXQS, 661 .vsi_map = vsi->rxq_map, 662 .vsi_map_offset = 0, 663 .mapping_mode = vsi->rx_mapping_mode 664 }; 665 int ret = 0; 666 667 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; 668 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; 669 670 ret = __ice_vsi_get_qs(&tx_qs_cfg); 671 if (!ret) 672 ret = __ice_vsi_get_qs(&rx_qs_cfg); 673 674 return ret; 675 } 676 677 /** 678 * ice_vsi_put_qs - Release queues from VSI to PF 679 * @vsi: the VSI that is going to release queues 680 */ 681 void ice_vsi_put_qs(struct ice_vsi *vsi) 682 { 683 struct ice_pf *pf = vsi->back; 684 int i; 685 686 mutex_lock(&pf->avail_q_mutex); 687 688 for (i = 0; i < vsi->alloc_txq; i++) { 689 clear_bit(vsi->txq_map[i], pf->avail_txqs); 690 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 691 } 692 693 for (i = 0; i < vsi->alloc_rxq; i++) { 694 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 695 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 696 } 697 698 mutex_unlock(&pf->avail_q_mutex); 699 } 700 701 /** 702 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs 703 * @vsi: the VSI being removed 704 */ 705 static void ice_rss_clean(struct ice_vsi *vsi) 706 { 707 struct ice_pf *pf; 708 709 pf = vsi->back; 710 711 if (vsi->rss_hkey_user) 712 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); 713 if (vsi->rss_lut_user) 714 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); 715 } 716 717 /** 718 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 719 * @vsi: the VSI being configured 720 */ 721 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 722 { 723 struct ice_hw_common_caps *cap; 724 struct ice_pf *pf = vsi->back; 725 726 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 727 vsi->rss_size = 1; 728 return; 729 } 730 731 cap = &pf->hw.func_caps.common_cap; 732 switch (vsi->type) { 733 case ICE_VSI_PF: 734 /* PF VSI will inherit RSS instance of PF */ 735 vsi->rss_table_size = cap->rss_table_size; 736 vsi->rss_size = min_t(int, num_online_cpus(), 737 BIT(cap->rss_table_entry_width)); 738 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 739 break; 740 case ICE_VSI_VF: 741 /* VF VSI will gets a small RSS table 742 * For VSI_LUT, LUT size should be set to 64 bytes 743 */ 744 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 745 vsi->rss_size = min_t(int, num_online_cpus(), 746 BIT(cap->rss_table_entry_width)); 747 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 748 break; 749 default: 750 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", 751 vsi->type); 752 break; 753 } 754 } 755 756 /** 757 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 758 * @ctxt: the VSI context being set 759 * 760 * This initializes a default VSI context for all sections except the Queues. 761 */ 762 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) 763 { 764 u32 table = 0; 765 766 memset(&ctxt->info, 0, sizeof(ctxt->info)); 767 /* VSI's should be allocated from shared pool */ 768 ctxt->alloc_from_pool = true; 769 /* Src pruning enabled by default */ 770 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 771 /* Traffic from VSI can be sent to LAN */ 772 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 773 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy 774 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all 775 * packets untagged/tagged. 776 */ 777 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 778 ICE_AQ_VSI_VLAN_MODE_M) >> 779 ICE_AQ_VSI_VLAN_MODE_S); 780 /* Have 1:1 UP mapping for both ingress/egress tables */ 781 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 782 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 783 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 784 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 785 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 786 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 787 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 788 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 789 ctxt->info.ingress_table = cpu_to_le32(table); 790 ctxt->info.egress_table = cpu_to_le32(table); 791 /* Have 1:1 UP mapping for outer to inner UP table */ 792 ctxt->info.outer_up_table = cpu_to_le32(table); 793 /* No Outer tag support outer_tag_flags remains to zero */ 794 } 795 796 /** 797 * ice_vsi_setup_q_map - Setup a VSI queue map 798 * @vsi: the VSI being configured 799 * @ctxt: VSI context structure 800 */ 801 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 802 { 803 u16 offset = 0, qmap = 0, tx_count = 0; 804 u16 qcount_tx = vsi->alloc_txq; 805 u16 qcount_rx = vsi->alloc_rxq; 806 u16 tx_numq_tc, rx_numq_tc; 807 u16 pow = 0, max_rss = 0; 808 bool ena_tc0 = false; 809 u8 netdev_tc = 0; 810 int i; 811 812 /* at least TC0 should be enabled by default */ 813 if (vsi->tc_cfg.numtc) { 814 if (!(vsi->tc_cfg.ena_tc & BIT(0))) 815 ena_tc0 = true; 816 } else { 817 ena_tc0 = true; 818 } 819 820 if (ena_tc0) { 821 vsi->tc_cfg.numtc++; 822 vsi->tc_cfg.ena_tc |= 1; 823 } 824 825 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; 826 if (!rx_numq_tc) 827 rx_numq_tc = 1; 828 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; 829 if (!tx_numq_tc) 830 tx_numq_tc = 1; 831 832 /* TC mapping is a function of the number of Rx queues assigned to the 833 * VSI for each traffic class and the offset of these queues. 834 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 835 * queues allocated to TC0. No:of queues is a power-of-2. 836 * 837 * If TC is not enabled, the queue offset is set to 0, and allocate one 838 * queue, this way, traffic for the given TC will be sent to the default 839 * queue. 840 * 841 * Setup number and offset of Rx queues for all TCs for the VSI 842 */ 843 844 qcount_rx = rx_numq_tc; 845 846 /* qcount will change if RSS is enabled */ 847 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { 848 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { 849 if (vsi->type == ICE_VSI_PF) 850 max_rss = ICE_MAX_LG_RSS_QS; 851 else 852 max_rss = ICE_MAX_SMALL_RSS_QS; 853 qcount_rx = min_t(int, rx_numq_tc, max_rss); 854 qcount_rx = min_t(int, qcount_rx, vsi->rss_size); 855 } 856 } 857 858 /* find the (rounded up) power-of-2 of qcount */ 859 pow = order_base_2(qcount_rx); 860 861 ice_for_each_traffic_class(i) { 862 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 863 /* TC is not enabled */ 864 vsi->tc_cfg.tc_info[i].qoffset = 0; 865 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 866 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 867 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 868 ctxt->info.tc_mapping[i] = 0; 869 continue; 870 } 871 872 /* TC is enabled */ 873 vsi->tc_cfg.tc_info[i].qoffset = offset; 874 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 875 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; 876 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 877 878 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 879 ICE_AQ_VSI_TC_Q_OFFSET_M) | 880 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 881 ICE_AQ_VSI_TC_Q_NUM_M); 882 offset += qcount_rx; 883 tx_count += tx_numq_tc; 884 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 885 } 886 887 /* if offset is non-zero, means it is calculated correctly based on 888 * enabled TCs for a given VSI otherwise qcount_rx will always 889 * be correct and non-zero because it is based off - VSI's 890 * allocated Rx queues which is at least 1 (hence qcount_tx will be 891 * at least 1) 892 */ 893 if (offset) 894 vsi->num_rxq = offset; 895 else 896 vsi->num_rxq = qcount_rx; 897 898 vsi->num_txq = tx_count; 899 900 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 901 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 902 /* since there is a chance that num_rxq could have been changed 903 * in the above for loop, make num_txq equal to num_rxq. 904 */ 905 vsi->num_txq = vsi->num_rxq; 906 } 907 908 /* Rx queue mapping */ 909 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 910 /* q_mapping buffer holds the info for the first queue allocated for 911 * this VSI in the PF space and also the number of queues associated 912 * with this VSI. 913 */ 914 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 915 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 916 } 917 918 /** 919 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 920 * @ctxt: the VSI context being set 921 * @vsi: the VSI being configured 922 */ 923 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 924 { 925 u8 lut_type, hash_type; 926 927 switch (vsi->type) { 928 case ICE_VSI_PF: 929 /* PF VSI will inherit RSS instance of PF */ 930 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 931 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 932 break; 933 case ICE_VSI_VF: 934 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 935 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 936 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 937 break; 938 default: 939 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", 940 vsi->type); 941 return; 942 } 943 944 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 945 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 946 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 947 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 948 } 949 950 /** 951 * ice_vsi_init - Create and initialize a VSI 952 * @vsi: the VSI being configured 953 * 954 * This initializes a VSI context depending on the VSI type to be added and 955 * passes it down to the add_vsi aq command to create a new VSI. 956 */ 957 static int ice_vsi_init(struct ice_vsi *vsi) 958 { 959 struct ice_pf *pf = vsi->back; 960 struct ice_hw *hw = &pf->hw; 961 struct ice_vsi_ctx *ctxt; 962 int ret = 0; 963 964 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 965 if (!ctxt) 966 return -ENOMEM; 967 968 ctxt->info = vsi->info; 969 switch (vsi->type) { 970 case ICE_VSI_PF: 971 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 972 break; 973 case ICE_VSI_VF: 974 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 975 /* VF number here is the absolute VF number (0-255) */ 976 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 977 break; 978 default: 979 return -ENODEV; 980 } 981 982 ice_set_dflt_vsi_ctx(ctxt); 983 /* if the switch is in VEB mode, allow VSI loopback */ 984 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 985 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 986 987 /* Set LUT type and HASH type if RSS is enabled */ 988 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 989 ice_set_rss_vsi_ctx(ctxt, vsi); 990 991 ctxt->info.sw_id = vsi->port_info->sw_id; 992 ice_vsi_setup_q_map(vsi, ctxt); 993 994 /* Enable MAC Antispoof with new VSI being initialized or updated */ 995 if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { 996 ctxt->info.valid_sections |= 997 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 998 ctxt->info.sec_flags |= 999 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1000 } 1001 1002 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1003 if (ret) { 1004 dev_err(&pf->pdev->dev, 1005 "Add VSI failed, err %d\n", ret); 1006 return -EIO; 1007 } 1008 1009 /* keep context for update VSI operations */ 1010 vsi->info = ctxt->info; 1011 1012 /* record VSI number returned */ 1013 vsi->vsi_num = ctxt->vsi_num; 1014 1015 devm_kfree(&pf->pdev->dev, ctxt); 1016 return ret; 1017 } 1018 1019 /** 1020 * ice_free_q_vector - Free memory allocated for a specific interrupt vector 1021 * @vsi: VSI having the memory freed 1022 * @v_idx: index of the vector to be freed 1023 */ 1024 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 1025 { 1026 struct ice_q_vector *q_vector; 1027 struct ice_ring *ring; 1028 1029 if (!vsi->q_vectors[v_idx]) { 1030 dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", 1031 v_idx); 1032 return; 1033 } 1034 q_vector = vsi->q_vectors[v_idx]; 1035 1036 ice_for_each_ring(ring, q_vector->tx) 1037 ring->q_vector = NULL; 1038 ice_for_each_ring(ring, q_vector->rx) 1039 ring->q_vector = NULL; 1040 1041 /* only VSI with an associated netdev is set up with NAPI */ 1042 if (vsi->netdev) 1043 netif_napi_del(&q_vector->napi); 1044 1045 devm_kfree(&vsi->back->pdev->dev, q_vector); 1046 vsi->q_vectors[v_idx] = NULL; 1047 } 1048 1049 /** 1050 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 1051 * @vsi: the VSI having memory freed 1052 */ 1053 void ice_vsi_free_q_vectors(struct ice_vsi *vsi) 1054 { 1055 int v_idx; 1056 1057 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1058 ice_free_q_vector(vsi, v_idx); 1059 } 1060 1061 /** 1062 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 1063 * @vsi: the VSI being configured 1064 * @v_idx: index of the vector in the VSI struct 1065 * 1066 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1067 */ 1068 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) 1069 { 1070 struct ice_pf *pf = vsi->back; 1071 struct ice_q_vector *q_vector; 1072 1073 /* allocate q_vector */ 1074 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); 1075 if (!q_vector) 1076 return -ENOMEM; 1077 1078 q_vector->vsi = vsi; 1079 q_vector->v_idx = v_idx; 1080 if (vsi->type == ICE_VSI_VF) 1081 goto out; 1082 /* only set affinity_mask if the CPU is online */ 1083 if (cpu_online(v_idx)) 1084 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 1085 1086 /* This will not be called in the driver load path because the netdev 1087 * will not be created yet. All other cases with register the NAPI 1088 * handler here (i.e. resume, reset/rebuild, etc.) 1089 */ 1090 if (vsi->netdev) 1091 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 1092 NAPI_POLL_WEIGHT); 1093 1094 out: 1095 /* tie q_vector and VSI together */ 1096 vsi->q_vectors[v_idx] = q_vector; 1097 1098 return 0; 1099 } 1100 1101 /** 1102 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 1103 * @vsi: the VSI being configured 1104 * 1105 * We allocate one q_vector per queue interrupt. If allocation fails we 1106 * return -ENOMEM. 1107 */ 1108 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 1109 { 1110 struct ice_pf *pf = vsi->back; 1111 int v_idx = 0, num_q_vectors; 1112 int err; 1113 1114 if (vsi->q_vectors[0]) { 1115 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 1116 vsi->vsi_num); 1117 return -EEXIST; 1118 } 1119 1120 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 1121 num_q_vectors = vsi->num_q_vectors; 1122 } else { 1123 err = -EINVAL; 1124 goto err_out; 1125 } 1126 1127 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 1128 err = ice_vsi_alloc_q_vector(vsi, v_idx); 1129 if (err) 1130 goto err_out; 1131 } 1132 1133 return 0; 1134 1135 err_out: 1136 while (v_idx--) 1137 ice_free_q_vector(vsi, v_idx); 1138 1139 dev_err(&pf->pdev->dev, 1140 "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 1141 vsi->num_q_vectors, vsi->vsi_num, err); 1142 vsi->num_q_vectors = 0; 1143 return err; 1144 } 1145 1146 /** 1147 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI 1148 * @vsi: ptr to the VSI 1149 * 1150 * This should only be called after ice_vsi_alloc() which allocates the 1151 * corresponding SW VSI structure and initializes num_queue_pairs for the 1152 * newly allocated VSI. 1153 * 1154 * Returns 0 on success or negative on failure 1155 */ 1156 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) 1157 { 1158 struct ice_pf *pf = vsi->back; 1159 int num_q_vectors = 0; 1160 1161 if (vsi->sw_base_vector || vsi->hw_base_vector) { 1162 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n", 1163 vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector); 1164 return -EEXIST; 1165 } 1166 1167 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1168 return -ENOENT; 1169 1170 switch (vsi->type) { 1171 case ICE_VSI_PF: 1172 num_q_vectors = vsi->num_q_vectors; 1173 /* reserve slots from OS requested IRQs */ 1174 vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker, 1175 num_q_vectors, vsi->idx); 1176 if (vsi->sw_base_vector < 0) { 1177 dev_err(&pf->pdev->dev, 1178 "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n", 1179 num_q_vectors, vsi->vsi_num, 1180 vsi->sw_base_vector); 1181 return -ENOENT; 1182 } 1183 pf->num_avail_sw_msix -= num_q_vectors; 1184 1185 /* reserve slots from HW interrupts */ 1186 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, 1187 num_q_vectors, vsi->idx); 1188 break; 1189 case ICE_VSI_VF: 1190 /* take VF misc vector and data vectors into account */ 1191 num_q_vectors = pf->num_vf_msix; 1192 /* For VF VSI, reserve slots only from HW interrupts */ 1193 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, 1194 num_q_vectors, vsi->idx); 1195 break; 1196 default: 1197 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", 1198 vsi->type); 1199 break; 1200 } 1201 1202 if (vsi->hw_base_vector < 0) { 1203 dev_err(&pf->pdev->dev, 1204 "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", 1205 num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); 1206 if (vsi->type != ICE_VSI_VF) { 1207 ice_free_res(vsi->back->sw_irq_tracker, 1208 vsi->sw_base_vector, vsi->idx); 1209 pf->num_avail_sw_msix += num_q_vectors; 1210 } 1211 return -ENOENT; 1212 } 1213 1214 pf->num_avail_hw_msix -= num_q_vectors; 1215 1216 return 0; 1217 } 1218 1219 /** 1220 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1221 * @vsi: the VSI having rings deallocated 1222 */ 1223 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1224 { 1225 int i; 1226 1227 if (vsi->tx_rings) { 1228 for (i = 0; i < vsi->alloc_txq; i++) { 1229 if (vsi->tx_rings[i]) { 1230 kfree_rcu(vsi->tx_rings[i], rcu); 1231 vsi->tx_rings[i] = NULL; 1232 } 1233 } 1234 } 1235 if (vsi->rx_rings) { 1236 for (i = 0; i < vsi->alloc_rxq; i++) { 1237 if (vsi->rx_rings[i]) { 1238 kfree_rcu(vsi->rx_rings[i], rcu); 1239 vsi->rx_rings[i] = NULL; 1240 } 1241 } 1242 } 1243 } 1244 1245 /** 1246 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1247 * @vsi: VSI which is having rings allocated 1248 */ 1249 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1250 { 1251 struct ice_pf *pf = vsi->back; 1252 int i; 1253 1254 /* Allocate Tx rings */ 1255 for (i = 0; i < vsi->alloc_txq; i++) { 1256 struct ice_ring *ring; 1257 1258 /* allocate with kzalloc(), free with kfree_rcu() */ 1259 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1260 1261 if (!ring) 1262 goto err_out; 1263 1264 ring->q_index = i; 1265 ring->reg_idx = vsi->txq_map[i]; 1266 ring->ring_active = false; 1267 ring->vsi = vsi; 1268 ring->dev = &pf->pdev->dev; 1269 ring->count = vsi->num_tx_desc; 1270 vsi->tx_rings[i] = ring; 1271 } 1272 1273 /* Allocate Rx rings */ 1274 for (i = 0; i < vsi->alloc_rxq; i++) { 1275 struct ice_ring *ring; 1276 1277 /* allocate with kzalloc(), free with kfree_rcu() */ 1278 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1279 if (!ring) 1280 goto err_out; 1281 1282 ring->q_index = i; 1283 ring->reg_idx = vsi->rxq_map[i]; 1284 ring->ring_active = false; 1285 ring->vsi = vsi; 1286 ring->netdev = vsi->netdev; 1287 ring->dev = &pf->pdev->dev; 1288 ring->count = vsi->num_rx_desc; 1289 vsi->rx_rings[i] = ring; 1290 } 1291 1292 return 0; 1293 1294 err_out: 1295 ice_vsi_clear_rings(vsi); 1296 return -ENOMEM; 1297 } 1298 1299 /** 1300 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 1301 * @vsi: the VSI being configured 1302 * 1303 * This function maps descriptor rings to the queue-specific vectors allotted 1304 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 1305 * and Rx rings to the vector as "efficiently" as possible. 1306 */ 1307 #ifdef CONFIG_DCB 1308 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1309 #else 1310 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1311 #endif /* CONFIG_DCB */ 1312 { 1313 int q_vectors = vsi->num_q_vectors; 1314 int tx_rings_rem, rx_rings_rem; 1315 int v_id; 1316 1317 /* initially assigning remaining rings count to VSIs num queue value */ 1318 tx_rings_rem = vsi->num_txq; 1319 rx_rings_rem = vsi->num_rxq; 1320 1321 for (v_id = 0; v_id < q_vectors; v_id++) { 1322 struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 1323 int tx_rings_per_v, rx_rings_per_v, q_id, q_base; 1324 1325 /* Tx rings mapping to vector */ 1326 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 1327 q_vector->num_ring_tx = tx_rings_per_v; 1328 q_vector->tx.ring = NULL; 1329 q_vector->tx.itr_idx = ICE_TX_ITR; 1330 q_base = vsi->num_txq - tx_rings_rem; 1331 1332 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 1333 struct ice_ring *tx_ring = vsi->tx_rings[q_id]; 1334 1335 tx_ring->q_vector = q_vector; 1336 tx_ring->next = q_vector->tx.ring; 1337 q_vector->tx.ring = tx_ring; 1338 } 1339 tx_rings_rem -= tx_rings_per_v; 1340 1341 /* Rx rings mapping to vector */ 1342 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 1343 q_vector->num_ring_rx = rx_rings_per_v; 1344 q_vector->rx.ring = NULL; 1345 q_vector->rx.itr_idx = ICE_RX_ITR; 1346 q_base = vsi->num_rxq - rx_rings_rem; 1347 1348 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 1349 struct ice_ring *rx_ring = vsi->rx_rings[q_id]; 1350 1351 rx_ring->q_vector = q_vector; 1352 rx_ring->next = q_vector->rx.ring; 1353 q_vector->rx.ring = rx_ring; 1354 } 1355 rx_rings_rem -= rx_rings_per_v; 1356 } 1357 } 1358 1359 /** 1360 * ice_vsi_manage_rss_lut - disable/enable RSS 1361 * @vsi: the VSI being changed 1362 * @ena: boolean value indicating if this is an enable or disable request 1363 * 1364 * In the event of disable request for RSS, this function will zero out RSS 1365 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1366 * LUT. 1367 */ 1368 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1369 { 1370 int err = 0; 1371 u8 *lut; 1372 1373 lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, 1374 GFP_KERNEL); 1375 if (!lut) 1376 return -ENOMEM; 1377 1378 if (ena) { 1379 if (vsi->rss_lut_user) 1380 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1381 else 1382 ice_fill_rss_lut(lut, vsi->rss_table_size, 1383 vsi->rss_size); 1384 } 1385 1386 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); 1387 devm_kfree(&vsi->back->pdev->dev, lut); 1388 return err; 1389 } 1390 1391 /** 1392 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1393 * @vsi: VSI to be configured 1394 */ 1395 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1396 { 1397 u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; 1398 struct ice_aqc_get_set_rss_keys *key; 1399 struct ice_pf *pf = vsi->back; 1400 enum ice_status status; 1401 int err = 0; 1402 u8 *lut; 1403 1404 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); 1405 1406 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); 1407 if (!lut) 1408 return -ENOMEM; 1409 1410 if (vsi->rss_lut_user) 1411 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1412 else 1413 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1414 1415 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, 1416 vsi->rss_table_size); 1417 1418 if (status) { 1419 dev_err(&vsi->back->pdev->dev, 1420 "set_rss_lut failed, error %d\n", status); 1421 err = -EIO; 1422 goto ice_vsi_cfg_rss_exit; 1423 } 1424 1425 key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); 1426 if (!key) { 1427 err = -ENOMEM; 1428 goto ice_vsi_cfg_rss_exit; 1429 } 1430 1431 if (vsi->rss_hkey_user) 1432 memcpy(seed, vsi->rss_hkey_user, 1433 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 1434 else 1435 netdev_rss_key_fill((void *)seed, 1436 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 1437 memcpy(&key->standard_rss_key, seed, 1438 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 1439 1440 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); 1441 1442 if (status) { 1443 dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", 1444 status); 1445 err = -EIO; 1446 } 1447 1448 devm_kfree(&pf->pdev->dev, key); 1449 ice_vsi_cfg_rss_exit: 1450 devm_kfree(&pf->pdev->dev, lut); 1451 return err; 1452 } 1453 1454 /** 1455 * ice_add_mac_to_list - Add a MAC address filter entry to the list 1456 * @vsi: the VSI to be forwarded to 1457 * @add_list: pointer to the list which contains MAC filter entries 1458 * @macaddr: the MAC address to be added. 1459 * 1460 * Adds MAC address filter entry to the temp list 1461 * 1462 * Returns 0 on success or ENOMEM on failure. 1463 */ 1464 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, 1465 const u8 *macaddr) 1466 { 1467 struct ice_fltr_list_entry *tmp; 1468 struct ice_pf *pf = vsi->back; 1469 1470 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); 1471 if (!tmp) 1472 return -ENOMEM; 1473 1474 tmp->fltr_info.flag = ICE_FLTR_TX; 1475 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1476 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1477 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1478 tmp->fltr_info.vsi_handle = vsi->idx; 1479 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); 1480 1481 INIT_LIST_HEAD(&tmp->list_entry); 1482 list_add(&tmp->list_entry, add_list); 1483 1484 return 0; 1485 } 1486 1487 /** 1488 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1489 * @vsi: the VSI to be updated 1490 */ 1491 void ice_update_eth_stats(struct ice_vsi *vsi) 1492 { 1493 struct ice_eth_stats *prev_es, *cur_es; 1494 struct ice_hw *hw = &vsi->back->hw; 1495 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1496 1497 prev_es = &vsi->eth_stats_prev; 1498 cur_es = &vsi->eth_stats; 1499 1500 ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), 1501 vsi->stat_offsets_loaded, &prev_es->rx_bytes, 1502 &cur_es->rx_bytes); 1503 1504 ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), 1505 vsi->stat_offsets_loaded, &prev_es->rx_unicast, 1506 &cur_es->rx_unicast); 1507 1508 ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), 1509 vsi->stat_offsets_loaded, &prev_es->rx_multicast, 1510 &cur_es->rx_multicast); 1511 1512 ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), 1513 vsi->stat_offsets_loaded, &prev_es->rx_broadcast, 1514 &cur_es->rx_broadcast); 1515 1516 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1517 &prev_es->rx_discards, &cur_es->rx_discards); 1518 1519 ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), 1520 vsi->stat_offsets_loaded, &prev_es->tx_bytes, 1521 &cur_es->tx_bytes); 1522 1523 ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), 1524 vsi->stat_offsets_loaded, &prev_es->tx_unicast, 1525 &cur_es->tx_unicast); 1526 1527 ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), 1528 vsi->stat_offsets_loaded, &prev_es->tx_multicast, 1529 &cur_es->tx_multicast); 1530 1531 ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), 1532 vsi->stat_offsets_loaded, &prev_es->tx_broadcast, 1533 &cur_es->tx_broadcast); 1534 1535 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1536 &prev_es->tx_errors, &cur_es->tx_errors); 1537 1538 vsi->stat_offsets_loaded = true; 1539 } 1540 1541 /** 1542 * ice_free_fltr_list - free filter lists helper 1543 * @dev: pointer to the device struct 1544 * @h: pointer to the list head to be freed 1545 * 1546 * Helper function to free filter lists previously created using 1547 * ice_add_mac_to_list 1548 */ 1549 void ice_free_fltr_list(struct device *dev, struct list_head *h) 1550 { 1551 struct ice_fltr_list_entry *e, *tmp; 1552 1553 list_for_each_entry_safe(e, tmp, h, list_entry) { 1554 list_del(&e->list_entry); 1555 devm_kfree(dev, e); 1556 } 1557 } 1558 1559 /** 1560 * ice_vsi_add_vlan - Add VSI membership for given VLAN 1561 * @vsi: the VSI being configured 1562 * @vid: VLAN ID to be added 1563 */ 1564 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) 1565 { 1566 struct ice_fltr_list_entry *tmp; 1567 struct ice_pf *pf = vsi->back; 1568 LIST_HEAD(tmp_add_list); 1569 enum ice_status status; 1570 int err = 0; 1571 1572 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); 1573 if (!tmp) 1574 return -ENOMEM; 1575 1576 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1577 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1578 tmp->fltr_info.flag = ICE_FLTR_TX; 1579 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1580 tmp->fltr_info.vsi_handle = vsi->idx; 1581 tmp->fltr_info.l_data.vlan.vlan_id = vid; 1582 1583 INIT_LIST_HEAD(&tmp->list_entry); 1584 list_add(&tmp->list_entry, &tmp_add_list); 1585 1586 status = ice_add_vlan(&pf->hw, &tmp_add_list); 1587 if (status) { 1588 err = -ENODEV; 1589 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", 1590 vid, vsi->vsi_num); 1591 } 1592 1593 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1594 return err; 1595 } 1596 1597 /** 1598 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN 1599 * @vsi: the VSI being configured 1600 * @vid: VLAN ID to be removed 1601 * 1602 * Returns 0 on success and negative on failure 1603 */ 1604 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) 1605 { 1606 struct ice_fltr_list_entry *list; 1607 struct ice_pf *pf = vsi->back; 1608 LIST_HEAD(tmp_add_list); 1609 int status = 0; 1610 1611 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 1612 if (!list) 1613 return -ENOMEM; 1614 1615 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1616 list->fltr_info.vsi_handle = vsi->idx; 1617 list->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1618 list->fltr_info.l_data.vlan.vlan_id = vid; 1619 list->fltr_info.flag = ICE_FLTR_TX; 1620 list->fltr_info.src_id = ICE_SRC_ID_VSI; 1621 1622 INIT_LIST_HEAD(&list->list_entry); 1623 list_add(&list->list_entry, &tmp_add_list); 1624 1625 if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { 1626 dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", 1627 vid, vsi->vsi_num); 1628 status = -EIO; 1629 } 1630 1631 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1632 return status; 1633 } 1634 1635 /** 1636 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1637 * @vsi: the VSI being configured 1638 * 1639 * Return 0 on success and a negative value on error 1640 * Configure the Rx VSI for operation. 1641 */ 1642 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1643 { 1644 int err = 0; 1645 u16 i; 1646 1647 if (vsi->type == ICE_VSI_VF) 1648 goto setup_rings; 1649 1650 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) 1651 vsi->max_frame = vsi->netdev->mtu + 1652 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1653 else 1654 vsi->max_frame = ICE_RXBUF_2048; 1655 1656 vsi->rx_buf_len = ICE_RXBUF_2048; 1657 setup_rings: 1658 /* set up individual rings */ 1659 for (i = 0; i < vsi->num_rxq && !err; i++) 1660 err = ice_setup_rx_ctx(vsi->rx_rings[i]); 1661 1662 if (err) { 1663 dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); 1664 return -EIO; 1665 } 1666 return err; 1667 } 1668 1669 /** 1670 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1671 * @vsi: the VSI being configured 1672 * @rings: Tx ring array to be configured 1673 * @offset: offset within vsi->txq_map 1674 * 1675 * Return 0 on success and a negative value on error 1676 * Configure the Tx VSI for operation. 1677 */ 1678 static int 1679 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) 1680 { 1681 struct ice_aqc_add_tx_qgrp *qg_buf; 1682 struct ice_aqc_add_txqs_perq *txq; 1683 struct ice_pf *pf = vsi->back; 1684 u8 num_q_grps, q_idx = 0; 1685 enum ice_status status; 1686 u16 buf_len, i, pf_q; 1687 int err = 0, tc; 1688 1689 buf_len = sizeof(*qg_buf); 1690 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); 1691 if (!qg_buf) 1692 return -ENOMEM; 1693 1694 qg_buf->num_txqs = 1; 1695 num_q_grps = 1; 1696 1697 /* set up and configure the Tx queues for each enabled TC */ 1698 ice_for_each_traffic_class(tc) { 1699 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 1700 break; 1701 1702 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 1703 struct ice_tlan_ctx tlan_ctx = { 0 }; 1704 1705 pf_q = vsi->txq_map[q_idx + offset]; 1706 ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); 1707 /* copy context contents into the qg_buf */ 1708 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1709 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 1710 ice_tlan_ctx_info); 1711 1712 /* init queue specific tail reg. It is referred as 1713 * transmit comm scheduler queue doorbell. 1714 */ 1715 rings[q_idx]->tail = 1716 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1717 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1718 num_q_grps, qg_buf, buf_len, 1719 NULL); 1720 if (status) { 1721 dev_err(&vsi->back->pdev->dev, 1722 "Failed to set LAN Tx queue context, error: %d\n", 1723 status); 1724 err = -ENODEV; 1725 goto err_cfg_txqs; 1726 } 1727 1728 /* Add Tx Queue TEID into the VSI Tx ring from the 1729 * response. This will complete configuring and 1730 * enabling the queue. 1731 */ 1732 txq = &qg_buf->txqs[0]; 1733 if (pf_q == le16_to_cpu(txq->txq_id)) 1734 rings[q_idx]->txq_teid = 1735 le32_to_cpu(txq->q_teid); 1736 1737 q_idx++; 1738 } 1739 } 1740 err_cfg_txqs: 1741 devm_kfree(&pf->pdev->dev, qg_buf); 1742 return err; 1743 } 1744 1745 /** 1746 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1747 * @vsi: the VSI being configured 1748 * 1749 * Return 0 on success and a negative value on error 1750 * Configure the Tx VSI for operation. 1751 */ 1752 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1753 { 1754 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); 1755 } 1756 1757 /** 1758 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1759 * @intrl: interrupt rate limit in usecs 1760 * @gran: interrupt rate limit granularity in usecs 1761 * 1762 * This function converts a decimal interrupt rate limit in usecs to the format 1763 * expected by firmware. 1764 */ 1765 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1766 { 1767 u32 val = intrl / gran; 1768 1769 if (val) 1770 return val | GLINT_RATE_INTRL_ENA_M; 1771 return 0; 1772 } 1773 1774 /** 1775 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 1776 * @hw: board specific structure 1777 */ 1778 static void ice_cfg_itr_gran(struct ice_hw *hw) 1779 { 1780 u32 regval = rd32(hw, GLINT_CTL); 1781 1782 /* no need to update global register if ITR gran is already set */ 1783 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 1784 (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 1785 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 1786 (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 1787 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 1788 (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 1789 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 1790 (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 1791 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 1792 return; 1793 1794 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 1795 GLINT_CTL_ITR_GRAN_200_M) | 1796 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 1797 GLINT_CTL_ITR_GRAN_100_M) | 1798 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 1799 GLINT_CTL_ITR_GRAN_50_M) | 1800 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 1801 GLINT_CTL_ITR_GRAN_25_M); 1802 wr32(hw, GLINT_CTL, regval); 1803 } 1804 1805 /** 1806 * ice_cfg_itr - configure the initial interrupt throttle values 1807 * @hw: pointer to the HW structure 1808 * @q_vector: interrupt vector that's being configured 1809 * @vector: HW vector index to apply the interrupt throttling to 1810 * 1811 * Configure interrupt throttling values for the ring containers that are 1812 * associated with the interrupt vector passed in. 1813 */ 1814 static void 1815 ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) 1816 { 1817 ice_cfg_itr_gran(hw); 1818 1819 if (q_vector->num_ring_rx) { 1820 struct ice_ring_container *rc = &q_vector->rx; 1821 1822 /* if this value is set then don't overwrite with default */ 1823 if (!rc->itr_setting) 1824 rc->itr_setting = ICE_DFLT_RX_ITR; 1825 1826 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1827 rc->next_update = jiffies + 1; 1828 rc->current_itr = rc->target_itr; 1829 wr32(hw, GLINT_ITR(rc->itr_idx, vector), 1830 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1831 } 1832 1833 if (q_vector->num_ring_tx) { 1834 struct ice_ring_container *rc = &q_vector->tx; 1835 1836 /* if this value is set then don't overwrite with default */ 1837 if (!rc->itr_setting) 1838 rc->itr_setting = ICE_DFLT_TX_ITR; 1839 1840 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1841 rc->next_update = jiffies + 1; 1842 rc->current_itr = rc->target_itr; 1843 wr32(hw, GLINT_ITR(rc->itr_idx, vector), 1844 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1845 } 1846 } 1847 1848 /** 1849 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1850 * @vsi: the VSI being configured 1851 */ 1852 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 1853 { 1854 struct ice_pf *pf = vsi->back; 1855 u16 vector = vsi->hw_base_vector; 1856 struct ice_hw *hw = &pf->hw; 1857 u32 txq = 0, rxq = 0; 1858 int i, q; 1859 1860 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1861 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1862 1863 ice_cfg_itr(hw, q_vector, vector); 1864 1865 wr32(hw, GLINT_RATE(vector), 1866 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1867 1868 /* Both Transmit Queue Interrupt Cause Control register 1869 * and Receive Queue Interrupt Cause control register 1870 * expects MSIX_INDX field to be the vector index 1871 * within the function space and not the absolute 1872 * vector index across PF or across device. 1873 * For SR-IOV VF VSIs queue vector index always starts 1874 * with 1 since first vector index(0) is used for OICR 1875 * in VF space. Since VMDq and other PF VSIs are within 1876 * the PF function space, use the vector index that is 1877 * tracked for this PF. 1878 */ 1879 for (q = 0; q < q_vector->num_ring_tx; q++) { 1880 int itr_idx = q_vector->tx.itr_idx; 1881 u32 val; 1882 1883 if (vsi->type == ICE_VSI_VF) 1884 val = QINT_TQCTL_CAUSE_ENA_M | 1885 (itr_idx << QINT_TQCTL_ITR_INDX_S) | 1886 ((i + 1) << QINT_TQCTL_MSIX_INDX_S); 1887 else 1888 val = QINT_TQCTL_CAUSE_ENA_M | 1889 (itr_idx << QINT_TQCTL_ITR_INDX_S) | 1890 (vector << QINT_TQCTL_MSIX_INDX_S); 1891 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 1892 txq++; 1893 } 1894 1895 for (q = 0; q < q_vector->num_ring_rx; q++) { 1896 int itr_idx = q_vector->rx.itr_idx; 1897 u32 val; 1898 1899 if (vsi->type == ICE_VSI_VF) 1900 val = QINT_RQCTL_CAUSE_ENA_M | 1901 (itr_idx << QINT_RQCTL_ITR_INDX_S) | 1902 ((i + 1) << QINT_RQCTL_MSIX_INDX_S); 1903 else 1904 val = QINT_RQCTL_CAUSE_ENA_M | 1905 (itr_idx << QINT_RQCTL_ITR_INDX_S) | 1906 (vector << QINT_RQCTL_MSIX_INDX_S); 1907 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 1908 rxq++; 1909 } 1910 } 1911 1912 ice_flush(hw); 1913 } 1914 1915 /** 1916 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx 1917 * @vsi: the VSI being changed 1918 */ 1919 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) 1920 { 1921 struct device *dev = &vsi->back->pdev->dev; 1922 struct ice_hw *hw = &vsi->back->hw; 1923 struct ice_vsi_ctx *ctxt; 1924 enum ice_status status; 1925 int ret = 0; 1926 1927 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1928 if (!ctxt) 1929 return -ENOMEM; 1930 1931 /* Here we are configuring the VSI to let the driver add VLAN tags by 1932 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag 1933 * insertion happens in the Tx hot path, in ice_tx_map. 1934 */ 1935 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1936 1937 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1938 1939 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1940 if (status) { 1941 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", 1942 status, hw->adminq.sq_last_status); 1943 ret = -EIO; 1944 goto out; 1945 } 1946 1947 vsi->info.vlan_flags = ctxt->info.vlan_flags; 1948 out: 1949 devm_kfree(dev, ctxt); 1950 return ret; 1951 } 1952 1953 /** 1954 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx 1955 * @vsi: the VSI being changed 1956 * @ena: boolean value indicating if this is a enable or disable request 1957 */ 1958 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 1959 { 1960 struct device *dev = &vsi->back->pdev->dev; 1961 struct ice_hw *hw = &vsi->back->hw; 1962 struct ice_vsi_ctx *ctxt; 1963 enum ice_status status; 1964 int ret = 0; 1965 1966 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1967 if (!ctxt) 1968 return -ENOMEM; 1969 1970 /* Here we are configuring what the VSI should do with the VLAN tag in 1971 * the Rx packet. We can either leave the tag in the packet or put it in 1972 * the Rx descriptor. 1973 */ 1974 if (ena) 1975 /* Strip VLAN tag from Rx packet and put it in the desc */ 1976 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 1977 else 1978 /* Disable stripping. Leave tag in packet */ 1979 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 1980 1981 /* Allow all packets untagged/tagged */ 1982 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 1983 1984 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1985 1986 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1987 if (status) { 1988 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", 1989 ena, status, hw->adminq.sq_last_status); 1990 ret = -EIO; 1991 goto out; 1992 } 1993 1994 vsi->info.vlan_flags = ctxt->info.vlan_flags; 1995 out: 1996 devm_kfree(dev, ctxt); 1997 return ret; 1998 } 1999 2000 /** 2001 * ice_vsi_start_rx_rings - start VSI's Rx rings 2002 * @vsi: the VSI whose rings are to be started 2003 * 2004 * Returns 0 on success and a negative value on error 2005 */ 2006 int ice_vsi_start_rx_rings(struct ice_vsi *vsi) 2007 { 2008 return ice_vsi_ctrl_rx_rings(vsi, true); 2009 } 2010 2011 /** 2012 * ice_vsi_stop_rx_rings - stop VSI's Rx rings 2013 * @vsi: the VSI 2014 * 2015 * Returns 0 on success and a negative value on error 2016 */ 2017 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) 2018 { 2019 return ice_vsi_ctrl_rx_rings(vsi, false); 2020 } 2021 2022 /** 2023 * ice_vsi_stop_tx_rings - Disable Tx rings 2024 * @vsi: the VSI being configured 2025 * @rst_src: reset source 2026 * @rel_vmvf_num: Relative ID of VF/VM 2027 * @rings: Tx ring array to be stopped 2028 * @offset: offset within vsi->txq_map 2029 */ 2030 static int 2031 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2032 u16 rel_vmvf_num, struct ice_ring **rings, int offset) 2033 { 2034 struct ice_pf *pf = vsi->back; 2035 struct ice_hw *hw = &pf->hw; 2036 enum ice_status status; 2037 u32 *q_teids, val; 2038 u16 *q_ids, i; 2039 int err = 0; 2040 2041 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2042 return -EINVAL; 2043 2044 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), 2045 GFP_KERNEL); 2046 if (!q_teids) 2047 return -ENOMEM; 2048 2049 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), 2050 GFP_KERNEL); 2051 if (!q_ids) { 2052 err = -ENOMEM; 2053 goto err_alloc_q_ids; 2054 } 2055 2056 /* set up the Tx queue list to be disabled */ 2057 ice_for_each_txq(vsi, i) { 2058 u16 v_idx; 2059 2060 if (!rings || !rings[i] || !rings[i]->q_vector) { 2061 err = -EINVAL; 2062 goto err_out; 2063 } 2064 2065 q_ids[i] = vsi->txq_map[i + offset]; 2066 q_teids[i] = rings[i]->txq_teid; 2067 2068 /* clear cause_ena bit for disabled queues */ 2069 val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); 2070 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2071 wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); 2072 2073 /* software is expected to wait for 100 ns */ 2074 ndelay(100); 2075 2076 /* trigger a software interrupt for the vector associated to 2077 * the queue to schedule NAPI handler 2078 */ 2079 v_idx = rings[i]->q_vector->v_idx; 2080 wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 2081 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 2082 } 2083 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, 2084 rst_src, rel_vmvf_num, NULL); 2085 /* if the disable queue command was exercised during an active reset 2086 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as 2087 * the reset operation disables queues at the hardware level anyway. 2088 */ 2089 if (status == ICE_ERR_RESET_ONGOING) { 2090 dev_info(&pf->pdev->dev, 2091 "Reset in progress. LAN Tx queues already disabled\n"); 2092 } else if (status) { 2093 dev_err(&pf->pdev->dev, 2094 "Failed to disable LAN Tx queues, error: %d\n", 2095 status); 2096 err = -ENODEV; 2097 } 2098 2099 err_out: 2100 devm_kfree(&pf->pdev->dev, q_ids); 2101 2102 err_alloc_q_ids: 2103 devm_kfree(&pf->pdev->dev, q_teids); 2104 2105 return err; 2106 } 2107 2108 /** 2109 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2110 * @vsi: the VSI being configured 2111 * @rst_src: reset source 2112 * @rel_vmvf_num: Relative ID of VF/VM 2113 */ 2114 int 2115 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2116 u16 rel_vmvf_num) 2117 { 2118 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, 2119 0); 2120 } 2121 2122 /** 2123 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI 2124 * @vsi: VSI to enable or disable VLAN pruning on 2125 * @ena: set to true to enable VLAN pruning and false to disable it 2126 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode 2127 * 2128 * returns 0 if VSI is updated, negative otherwise 2129 */ 2130 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) 2131 { 2132 struct ice_vsi_ctx *ctxt; 2133 struct device *dev; 2134 int status; 2135 2136 if (!vsi) 2137 return -EINVAL; 2138 2139 dev = &vsi->back->pdev->dev; 2140 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 2141 if (!ctxt) 2142 return -ENOMEM; 2143 2144 ctxt->info = vsi->info; 2145 2146 if (ena) { 2147 ctxt->info.sec_flags |= 2148 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2149 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; 2150 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2151 } else { 2152 ctxt->info.sec_flags &= 2153 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2154 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 2155 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2156 } 2157 2158 if (!vlan_promisc) 2159 ctxt->info.valid_sections = 2160 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | 2161 ICE_AQ_VSI_PROP_SW_VALID); 2162 2163 status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); 2164 if (status) { 2165 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", 2166 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, 2167 vsi->back->hw.adminq.sq_last_status); 2168 goto err_out; 2169 } 2170 2171 vsi->info.sec_flags = ctxt->info.sec_flags; 2172 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 2173 2174 devm_kfree(dev, ctxt); 2175 return 0; 2176 2177 err_out: 2178 devm_kfree(dev, ctxt); 2179 return -EIO; 2180 } 2181 2182 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2183 { 2184 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; 2185 2186 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); 2187 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); 2188 } 2189 2190 /** 2191 * ice_vsi_setup - Set up a VSI by a given type 2192 * @pf: board private structure 2193 * @pi: pointer to the port_info instance 2194 * @type: VSI type 2195 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be 2196 * used only for ICE_VSI_VF VSI type. For other VSI types, should 2197 * fill-in ICE_INVAL_VFID as input. 2198 * 2199 * This allocates the sw VSI structure and its queue resources. 2200 * 2201 * Returns pointer to the successfully allocated and configured VSI sw struct on 2202 * success, NULL on failure. 2203 */ 2204 struct ice_vsi * 2205 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 2206 enum ice_vsi_type type, u16 vf_id) 2207 { 2208 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2209 struct device *dev = &pf->pdev->dev; 2210 struct ice_vsi *vsi; 2211 int ret, i; 2212 2213 if (type == ICE_VSI_VF) 2214 vsi = ice_vsi_alloc(pf, type, vf_id); 2215 else 2216 vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); 2217 2218 if (!vsi) { 2219 dev_err(dev, "could not allocate VSI\n"); 2220 return NULL; 2221 } 2222 2223 vsi->port_info = pi; 2224 vsi->vsw = pf->first_sw; 2225 if (vsi->type == ICE_VSI_VF) 2226 vsi->vf_id = vf_id; 2227 2228 if (ice_vsi_get_qs(vsi)) { 2229 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2230 vsi->idx); 2231 goto unroll_get_qs; 2232 } 2233 2234 /* set RSS capabilities */ 2235 ice_vsi_set_rss_params(vsi); 2236 2237 /* set TC configuration */ 2238 ice_vsi_set_tc_cfg(vsi); 2239 2240 /* create the VSI */ 2241 ret = ice_vsi_init(vsi); 2242 if (ret) 2243 goto unroll_get_qs; 2244 2245 switch (vsi->type) { 2246 case ICE_VSI_PF: 2247 ret = ice_vsi_alloc_q_vectors(vsi); 2248 if (ret) 2249 goto unroll_vsi_init; 2250 2251 ret = ice_vsi_setup_vector_base(vsi); 2252 if (ret) 2253 goto unroll_alloc_q_vector; 2254 2255 ret = ice_vsi_alloc_rings(vsi); 2256 if (ret) 2257 goto unroll_vector_base; 2258 2259 ice_vsi_map_rings_to_vectors(vsi); 2260 2261 /* Do not exit if configuring RSS had an issue, at least 2262 * receive traffic on first queue. Hence no need to capture 2263 * return value 2264 */ 2265 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2266 ice_vsi_cfg_rss_lut_key(vsi); 2267 break; 2268 case ICE_VSI_VF: 2269 /* VF driver will take care of creating netdev for this type and 2270 * map queues to vectors through Virtchnl, PF driver only 2271 * creates a VSI and corresponding structures for bookkeeping 2272 * purpose 2273 */ 2274 ret = ice_vsi_alloc_q_vectors(vsi); 2275 if (ret) 2276 goto unroll_vsi_init; 2277 2278 ret = ice_vsi_alloc_rings(vsi); 2279 if (ret) 2280 goto unroll_alloc_q_vector; 2281 2282 /* Setup Vector base only during VF init phase or when VF asks 2283 * for more vectors than assigned number. In all other cases, 2284 * assign hw_base_vector to the value given earlier. 2285 */ 2286 if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { 2287 ret = ice_vsi_setup_vector_base(vsi); 2288 if (ret) 2289 goto unroll_vector_base; 2290 } else { 2291 vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; 2292 } 2293 pf->q_left_tx -= vsi->alloc_txq; 2294 pf->q_left_rx -= vsi->alloc_rxq; 2295 break; 2296 default: 2297 /* clean up the resources and exit */ 2298 goto unroll_vsi_init; 2299 } 2300 2301 /* configure VSI nodes based on number of queues and TC's */ 2302 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2303 max_txqs[i] = pf->num_lan_tx; 2304 2305 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2306 max_txqs); 2307 if (ret) { 2308 dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); 2309 goto unroll_vector_base; 2310 } 2311 2312 return vsi; 2313 2314 unroll_vector_base: 2315 /* reclaim SW interrupts back to the common pool */ 2316 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2317 pf->num_avail_sw_msix += vsi->num_q_vectors; 2318 /* reclaim HW interrupt back to the common pool */ 2319 ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); 2320 pf->num_avail_hw_msix += vsi->num_q_vectors; 2321 unroll_alloc_q_vector: 2322 ice_vsi_free_q_vectors(vsi); 2323 unroll_vsi_init: 2324 ice_vsi_delete(vsi); 2325 unroll_get_qs: 2326 ice_vsi_put_qs(vsi); 2327 pf->q_left_tx += vsi->alloc_txq; 2328 pf->q_left_rx += vsi->alloc_rxq; 2329 ice_vsi_clear(vsi); 2330 2331 return NULL; 2332 } 2333 2334 /** 2335 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2336 * @vsi: the VSI being cleaned up 2337 */ 2338 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2339 { 2340 struct ice_pf *pf = vsi->back; 2341 u16 vector = vsi->hw_base_vector; 2342 struct ice_hw *hw = &pf->hw; 2343 u32 txq = 0; 2344 u32 rxq = 0; 2345 int i, q; 2346 2347 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2348 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2349 2350 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); 2351 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); 2352 for (q = 0; q < q_vector->num_ring_tx; q++) { 2353 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2354 txq++; 2355 } 2356 2357 for (q = 0; q < q_vector->num_ring_rx; q++) { 2358 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2359 rxq++; 2360 } 2361 } 2362 2363 ice_flush(hw); 2364 } 2365 2366 /** 2367 * ice_vsi_free_irq - Free the IRQ association with the OS 2368 * @vsi: the VSI being configured 2369 */ 2370 void ice_vsi_free_irq(struct ice_vsi *vsi) 2371 { 2372 struct ice_pf *pf = vsi->back; 2373 int base = vsi->sw_base_vector; 2374 2375 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2376 int i; 2377 2378 if (!vsi->q_vectors || !vsi->irqs_ready) 2379 return; 2380 2381 ice_vsi_release_msix(vsi); 2382 if (vsi->type == ICE_VSI_VF) 2383 return; 2384 2385 vsi->irqs_ready = false; 2386 for (i = 0; i < vsi->num_q_vectors; i++) { 2387 u16 vector = i + base; 2388 int irq_num; 2389 2390 irq_num = pf->msix_entries[vector].vector; 2391 2392 /* free only the irqs that were actually requested */ 2393 if (!vsi->q_vectors[i] || 2394 !(vsi->q_vectors[i]->num_ring_tx || 2395 vsi->q_vectors[i]->num_ring_rx)) 2396 continue; 2397 2398 /* clear the affinity notifier in the IRQ descriptor */ 2399 irq_set_affinity_notifier(irq_num, NULL); 2400 2401 /* clear the affinity_mask in the IRQ descriptor */ 2402 irq_set_affinity_hint(irq_num, NULL); 2403 synchronize_irq(irq_num); 2404 devm_free_irq(&pf->pdev->dev, irq_num, 2405 vsi->q_vectors[i]); 2406 } 2407 } 2408 } 2409 2410 /** 2411 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2412 * @vsi: the VSI having resources freed 2413 */ 2414 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2415 { 2416 int i; 2417 2418 if (!vsi->tx_rings) 2419 return; 2420 2421 ice_for_each_txq(vsi, i) 2422 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2423 ice_free_tx_ring(vsi->tx_rings[i]); 2424 } 2425 2426 /** 2427 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2428 * @vsi: the VSI having resources freed 2429 */ 2430 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2431 { 2432 int i; 2433 2434 if (!vsi->rx_rings) 2435 return; 2436 2437 ice_for_each_rxq(vsi, i) 2438 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2439 ice_free_rx_ring(vsi->rx_rings[i]); 2440 } 2441 2442 /** 2443 * ice_vsi_close - Shut down a VSI 2444 * @vsi: the VSI being shut down 2445 */ 2446 void ice_vsi_close(struct ice_vsi *vsi) 2447 { 2448 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) 2449 ice_down(vsi); 2450 2451 ice_vsi_free_irq(vsi); 2452 ice_vsi_free_tx_rings(vsi); 2453 ice_vsi_free_rx_rings(vsi); 2454 } 2455 2456 /** 2457 * ice_free_res - free a block of resources 2458 * @res: pointer to the resource 2459 * @index: starting index previously returned by ice_get_res 2460 * @id: identifier to track owner 2461 * 2462 * Returns number of resources freed 2463 */ 2464 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) 2465 { 2466 int count = 0; 2467 int i; 2468 2469 if (!res || index >= res->num_entries) 2470 return -EINVAL; 2471 2472 id |= ICE_RES_VALID_BIT; 2473 for (i = index; i < res->num_entries && res->list[i] == id; i++) { 2474 res->list[i] = 0; 2475 count++; 2476 } 2477 2478 return count; 2479 } 2480 2481 /** 2482 * ice_search_res - Search the tracker for a block of resources 2483 * @res: pointer to the resource 2484 * @needed: size of the block needed 2485 * @id: identifier to track owner 2486 * 2487 * Returns the base item index of the block, or -ENOMEM for error 2488 */ 2489 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) 2490 { 2491 int start = res->search_hint; 2492 int end = start; 2493 2494 if ((start + needed) > res->num_entries) 2495 return -ENOMEM; 2496 2497 id |= ICE_RES_VALID_BIT; 2498 2499 do { 2500 /* skip already allocated entries */ 2501 if (res->list[end++] & ICE_RES_VALID_BIT) { 2502 start = end; 2503 if ((start + needed) > res->num_entries) 2504 break; 2505 } 2506 2507 if (end == (start + needed)) { 2508 int i = start; 2509 2510 /* there was enough, so assign it to the requestor */ 2511 while (i != end) 2512 res->list[i++] = id; 2513 2514 if (end == res->num_entries) 2515 end = 0; 2516 2517 res->search_hint = end; 2518 return start; 2519 } 2520 } while (1); 2521 2522 return -ENOMEM; 2523 } 2524 2525 /** 2526 * ice_get_res - get a block of resources 2527 * @pf: board private structure 2528 * @res: pointer to the resource 2529 * @needed: size of the block needed 2530 * @id: identifier to track owner 2531 * 2532 * Returns the base item index of the block, or -ENOMEM for error 2533 * The search_hint trick and lack of advanced fit-finding only works 2534 * because we're highly likely to have all the same sized requests. 2535 * Linear search time and any fragmentation should be minimal. 2536 */ 2537 int 2538 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) 2539 { 2540 int ret; 2541 2542 if (!res || !pf) 2543 return -EINVAL; 2544 2545 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { 2546 dev_err(&pf->pdev->dev, 2547 "param err: needed=%d, num_entries = %d id=0x%04x\n", 2548 needed, res->num_entries, id); 2549 return -EINVAL; 2550 } 2551 2552 /* search based on search_hint */ 2553 ret = ice_search_res(res, needed, id); 2554 2555 if (ret < 0) { 2556 /* previous search failed. Reset search hint and try again */ 2557 res->search_hint = 0; 2558 ret = ice_search_res(res, needed, id); 2559 } 2560 2561 return ret; 2562 } 2563 2564 /** 2565 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2566 * @vsi: the VSI being un-configured 2567 */ 2568 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2569 { 2570 int base = vsi->sw_base_vector; 2571 struct ice_pf *pf = vsi->back; 2572 struct ice_hw *hw = &pf->hw; 2573 u32 val; 2574 int i; 2575 2576 /* disable interrupt causation from each queue */ 2577 if (vsi->tx_rings) { 2578 ice_for_each_txq(vsi, i) { 2579 if (vsi->tx_rings[i]) { 2580 u16 reg; 2581 2582 reg = vsi->tx_rings[i]->reg_idx; 2583 val = rd32(hw, QINT_TQCTL(reg)); 2584 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2585 wr32(hw, QINT_TQCTL(reg), val); 2586 } 2587 } 2588 } 2589 2590 if (vsi->rx_rings) { 2591 ice_for_each_rxq(vsi, i) { 2592 if (vsi->rx_rings[i]) { 2593 u16 reg; 2594 2595 reg = vsi->rx_rings[i]->reg_idx; 2596 val = rd32(hw, QINT_RQCTL(reg)); 2597 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2598 wr32(hw, QINT_RQCTL(reg), val); 2599 } 2600 } 2601 } 2602 2603 /* disable each interrupt */ 2604 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2605 for (i = vsi->hw_base_vector; 2606 i < (vsi->num_q_vectors + vsi->hw_base_vector); i++) 2607 wr32(hw, GLINT_DYN_CTL(i), 0); 2608 2609 ice_flush(hw); 2610 for (i = 0; i < vsi->num_q_vectors; i++) 2611 synchronize_irq(pf->msix_entries[i + base].vector); 2612 } 2613 } 2614 2615 /** 2616 * ice_vsi_release - Delete a VSI and free its resources 2617 * @vsi: the VSI being removed 2618 * 2619 * Returns 0 on success or < 0 on error 2620 */ 2621 int ice_vsi_release(struct ice_vsi *vsi) 2622 { 2623 struct ice_vf *vf = NULL; 2624 struct ice_pf *pf; 2625 2626 if (!vsi->back) 2627 return -ENODEV; 2628 pf = vsi->back; 2629 2630 if (vsi->type == ICE_VSI_VF) 2631 vf = &pf->vf[vsi->vf_id]; 2632 /* do not unregister and free netdevs while driver is in the reset 2633 * recovery pending state. Since reset/rebuild happens through PF 2634 * service task workqueue, its not a good idea to unregister netdev 2635 * that is associated to the PF that is running the work queue items 2636 * currently. This is done to avoid check_flush_dependency() warning 2637 * on this wq 2638 */ 2639 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { 2640 ice_napi_del(vsi); 2641 unregister_netdev(vsi->netdev); 2642 free_netdev(vsi->netdev); 2643 vsi->netdev = NULL; 2644 } 2645 2646 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2647 ice_rss_clean(vsi); 2648 2649 /* Disable VSI and free resources */ 2650 ice_vsi_dis_irq(vsi); 2651 ice_vsi_close(vsi); 2652 2653 /* reclaim interrupt vectors back to PF */ 2654 if (vsi->type != ICE_VSI_VF) { 2655 /* reclaim SW interrupts back to the common pool */ 2656 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, 2657 vsi->idx); 2658 pf->num_avail_sw_msix += vsi->num_q_vectors; 2659 /* reclaim HW interrupts back to the common pool */ 2660 ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, 2661 vsi->idx); 2662 pf->num_avail_hw_msix += vsi->num_q_vectors; 2663 } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { 2664 /* Reclaim VF resources back only while freeing all VFs or 2665 * vector reassignment is requested 2666 */ 2667 ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx, 2668 vsi->idx); 2669 pf->num_avail_hw_msix += pf->num_vf_msix; 2670 } 2671 2672 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 2673 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2674 ice_vsi_delete(vsi); 2675 ice_vsi_free_q_vectors(vsi); 2676 ice_vsi_clear_rings(vsi); 2677 2678 ice_vsi_put_qs(vsi); 2679 pf->q_left_tx += vsi->alloc_txq; 2680 pf->q_left_rx += vsi->alloc_rxq; 2681 2682 /* retain SW VSI data structure since it is needed to unregister and 2683 * free VSI netdev when PF is not in reset recovery pending state,\ 2684 * for ex: during rmmod. 2685 */ 2686 if (!ice_is_reset_in_progress(pf->state)) 2687 ice_vsi_clear(vsi); 2688 2689 return 0; 2690 } 2691 2692 /** 2693 * ice_vsi_rebuild - Rebuild VSI after reset 2694 * @vsi: VSI to be rebuild 2695 * 2696 * Returns 0 on success and negative value on failure 2697 */ 2698 int ice_vsi_rebuild(struct ice_vsi *vsi) 2699 { 2700 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2701 struct ice_vf *vf = NULL; 2702 struct ice_pf *pf; 2703 int ret, i; 2704 2705 if (!vsi) 2706 return -EINVAL; 2707 2708 pf = vsi->back; 2709 if (vsi->type == ICE_VSI_VF) 2710 vf = &pf->vf[vsi->vf_id]; 2711 2712 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2713 ice_vsi_free_q_vectors(vsi); 2714 2715 if (vsi->type != ICE_VSI_VF) { 2716 /* reclaim SW interrupts back to the common pool */ 2717 ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2718 pf->num_avail_sw_msix += vsi->num_q_vectors; 2719 vsi->sw_base_vector = 0; 2720 /* reclaim HW interrupts back to the common pool */ 2721 ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, 2722 vsi->idx); 2723 pf->num_avail_hw_msix += vsi->num_q_vectors; 2724 } else { 2725 /* Reclaim VF resources back to the common pool for reset and 2726 * and rebuild, with vector reassignment 2727 */ 2728 ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, 2729 vsi->idx); 2730 pf->num_avail_hw_msix += pf->num_vf_msix; 2731 } 2732 vsi->hw_base_vector = 0; 2733 2734 ice_vsi_clear_rings(vsi); 2735 ice_vsi_free_arrays(vsi, false); 2736 ice_dev_onetime_setup(&vsi->back->hw); 2737 if (vsi->type == ICE_VSI_VF) 2738 ice_vsi_set_num_qs(vsi, vf->vf_id); 2739 else 2740 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 2741 ice_vsi_set_tc_cfg(vsi); 2742 2743 /* Initialize VSI struct elements and create VSI in FW */ 2744 ret = ice_vsi_init(vsi); 2745 if (ret < 0) 2746 goto err_vsi; 2747 2748 ret = ice_vsi_alloc_arrays(vsi, false); 2749 if (ret < 0) 2750 goto err_vsi; 2751 2752 switch (vsi->type) { 2753 case ICE_VSI_PF: 2754 ret = ice_vsi_alloc_q_vectors(vsi); 2755 if (ret) 2756 goto err_rings; 2757 2758 ret = ice_vsi_setup_vector_base(vsi); 2759 if (ret) 2760 goto err_vectors; 2761 2762 ret = ice_vsi_alloc_rings(vsi); 2763 if (ret) 2764 goto err_vectors; 2765 2766 ice_vsi_map_rings_to_vectors(vsi); 2767 /* Do not exit if configuring RSS had an issue, at least 2768 * receive traffic on first queue. Hence no need to capture 2769 * return value 2770 */ 2771 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) 2772 ice_vsi_cfg_rss_lut_key(vsi); 2773 break; 2774 case ICE_VSI_VF: 2775 ret = ice_vsi_alloc_q_vectors(vsi); 2776 if (ret) 2777 goto err_rings; 2778 2779 ret = ice_vsi_setup_vector_base(vsi); 2780 if (ret) 2781 goto err_vectors; 2782 2783 ret = ice_vsi_alloc_rings(vsi); 2784 if (ret) 2785 goto err_vectors; 2786 2787 vsi->back->q_left_tx -= vsi->alloc_txq; 2788 vsi->back->q_left_rx -= vsi->alloc_rxq; 2789 break; 2790 default: 2791 break; 2792 } 2793 2794 /* configure VSI nodes based on number of queues and TC's */ 2795 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2796 max_txqs[i] = pf->num_lan_tx; 2797 2798 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2799 max_txqs); 2800 if (ret) { 2801 dev_info(&vsi->back->pdev->dev, 2802 "Failed VSI lan queue config\n"); 2803 goto err_vectors; 2804 } 2805 return 0; 2806 2807 err_vectors: 2808 ice_vsi_free_q_vectors(vsi); 2809 err_rings: 2810 if (vsi->netdev) { 2811 vsi->current_netdev_flags = 0; 2812 unregister_netdev(vsi->netdev); 2813 free_netdev(vsi->netdev); 2814 vsi->netdev = NULL; 2815 } 2816 err_vsi: 2817 ice_vsi_clear(vsi); 2818 set_bit(__ICE_RESET_FAILED, vsi->back->state); 2819 return ret; 2820 } 2821 2822 /** 2823 * ice_is_reset_in_progress - check for a reset in progress 2824 * @state: pf state field 2825 */ 2826 bool ice_is_reset_in_progress(unsigned long *state) 2827 { 2828 return test_bit(__ICE_RESET_OICR_RECV, state) || 2829 test_bit(__ICE_PFR_REQ, state) || 2830 test_bit(__ICE_CORER_REQ, state) || 2831 test_bit(__ICE_GLOBR_REQ, state); 2832 } 2833 2834 #ifdef CONFIG_DCB 2835 /** 2836 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 2837 * @vsi: VSI being configured 2838 * @ctx: the context buffer returned from AQ VSI update command 2839 */ 2840 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 2841 { 2842 vsi->info.mapping_flags = ctx->info.mapping_flags; 2843 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 2844 sizeof(vsi->info.q_mapping)); 2845 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 2846 sizeof(vsi->info.tc_mapping)); 2847 } 2848 2849 /** 2850 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 2851 * @vsi: the VSI being configured 2852 * @ena_tc: TC map to be enabled 2853 */ 2854 static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 2855 { 2856 struct net_device *netdev = vsi->netdev; 2857 struct ice_pf *pf = vsi->back; 2858 struct ice_dcbx_cfg *dcbcfg; 2859 u8 netdev_tc; 2860 int i; 2861 2862 if (!netdev) 2863 return; 2864 2865 if (!ena_tc) { 2866 netdev_reset_tc(netdev); 2867 return; 2868 } 2869 2870 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) 2871 return; 2872 2873 dcbcfg = &pf->hw.port_info->local_dcbx_cfg; 2874 2875 ice_for_each_traffic_class(i) 2876 if (vsi->tc_cfg.ena_tc & BIT(i)) 2877 netdev_set_tc_queue(netdev, 2878 vsi->tc_cfg.tc_info[i].netdev_tc, 2879 vsi->tc_cfg.tc_info[i].qcount_tx, 2880 vsi->tc_cfg.tc_info[i].qoffset); 2881 2882 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 2883 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 2884 2885 /* Get the mapped netdev TC# for the UP */ 2886 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 2887 netdev_set_prio_tc_map(netdev, i, netdev_tc); 2888 } 2889 } 2890 2891 /** 2892 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 2893 * @vsi: VSI to be configured 2894 * @ena_tc: TC bitmap 2895 * 2896 * VSI queues expected to be quiesced before calling this function 2897 */ 2898 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 2899 { 2900 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2901 struct ice_vsi_ctx *ctx; 2902 struct ice_pf *pf = vsi->back; 2903 enum ice_status status; 2904 int i, ret = 0; 2905 u8 num_tc = 0; 2906 2907 ice_for_each_traffic_class(i) { 2908 /* build bitmap of enabled TCs */ 2909 if (ena_tc & BIT(i)) 2910 num_tc++; 2911 /* populate max_txqs per TC */ 2912 max_txqs[i] = pf->num_lan_tx; 2913 } 2914 2915 vsi->tc_cfg.ena_tc = ena_tc; 2916 vsi->tc_cfg.numtc = num_tc; 2917 2918 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); 2919 if (!ctx) 2920 return -ENOMEM; 2921 2922 ctx->vf_num = 0; 2923 ctx->info = vsi->info; 2924 2925 ice_vsi_setup_q_map(vsi, ctx); 2926 2927 /* must to indicate which section of VSI context are being modified */ 2928 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 2929 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 2930 if (status) { 2931 dev_info(&pf->pdev->dev, "Failed VSI Update\n"); 2932 ret = -EIO; 2933 goto out; 2934 } 2935 2936 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2937 max_txqs); 2938 2939 if (status) { 2940 dev_err(&pf->pdev->dev, 2941 "VSI %d failed TC config, error %d\n", 2942 vsi->vsi_num, status); 2943 ret = -EIO; 2944 goto out; 2945 } 2946 ice_vsi_update_q_map(vsi, ctx); 2947 vsi->info.valid_sections = 0; 2948 2949 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 2950 out: 2951 devm_kfree(&pf->pdev->dev, ctx); 2952 return ret; 2953 } 2954 #endif /* CONFIG_DCB */ 2955