1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_dcb_lib.h" 7 8 /** 9 * ice_setup_rx_ctx - Configure a receive ring context 10 * @ring: The Rx ring to configure 11 * 12 * Configure the Rx descriptor ring in RLAN context. 13 */ 14 static int ice_setup_rx_ctx(struct ice_ring *ring) 15 { 16 struct ice_vsi *vsi = ring->vsi; 17 struct ice_hw *hw = &vsi->back->hw; 18 u32 rxdid = ICE_RXDID_FLEX_NIC; 19 struct ice_rlan_ctx rlan_ctx; 20 u32 regval; 21 u16 pf_q; 22 int err; 23 24 /* what is Rx queue number in global space of 2K Rx queues */ 25 pf_q = vsi->rxq_map[ring->q_index]; 26 27 /* clear the context structure first */ 28 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 29 30 rlan_ctx.base = ring->dma >> 7; 31 32 rlan_ctx.qlen = ring->count; 33 34 /* Receive Packet Data Buffer Size. 35 * The Packet Data Buffer Size is defined in 128 byte units. 36 */ 37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 38 39 /* use 32 byte descriptors */ 40 rlan_ctx.dsize = 1; 41 42 /* Strip the Ethernet CRC bytes before the packet is posted to host 43 * memory. 44 */ 45 rlan_ctx.crcstrip = 1; 46 47 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 48 rlan_ctx.l2tsel = 1; 49 50 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 51 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 52 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 53 54 /* This controls whether VLAN is stripped from inner headers 55 * The VLAN in the inner L2 header is stripped to the receive 56 * descriptor if enabled by this flag. 57 */ 58 rlan_ctx.showiv = 0; 59 60 /* Max packet size for this queue - must not be set to a larger value 61 * than 5 x DBUF 62 */ 63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame, 64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); 65 66 /* Rx queue threshold in units of 64 */ 67 rlan_ctx.lrxqthresh = 1; 68 69 /* Enable Flexible Descriptors in the queue context which 70 * allows this driver to select a specific receive descriptor format 71 */ 72 if (vsi->type != ICE_VSI_VF) { 73 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 74 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 75 QRXFLXP_CNTXT_RXDID_IDX_M; 76 77 /* increasing context priority to pick up profile ID; 78 * default is 0x01; setting to 0x03 to ensure profile 79 * is programming if prev context is of same priority 80 */ 81 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 82 QRXFLXP_CNTXT_RXDID_PRIO_M; 83 84 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 85 } 86 87 /* Absolute queue number out of 2K needs to be passed */ 88 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 89 if (err) { 90 dev_err(&vsi->back->pdev->dev, 91 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 92 pf_q, err); 93 return -EIO; 94 } 95 96 if (vsi->type == ICE_VSI_VF) 97 return 0; 98 99 /* init queue specific tail register */ 100 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 101 writel(0, ring->tail); 102 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); 103 104 return 0; 105 } 106 107 /** 108 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 109 * @ring: The Tx ring to configure 110 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 111 * @pf_q: queue index in the PF space 112 * 113 * Configure the Tx descriptor ring in TLAN context. 114 */ 115 static void 116 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 117 { 118 struct ice_vsi *vsi = ring->vsi; 119 struct ice_hw *hw = &vsi->back->hw; 120 121 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 122 123 tlan_ctx->port_num = vsi->port_info->lport; 124 125 /* Transmit Queue Length */ 126 tlan_ctx->qlen = ring->count; 127 128 ice_set_cgd_num(tlan_ctx, ring); 129 130 /* PF number */ 131 tlan_ctx->pf_num = hw->pf_id; 132 133 /* queue belongs to a specific VSI type 134 * VF / VM index should be programmed per vmvf_type setting: 135 * for vmvf_type = VF, it is VF number between 0-256 136 * for vmvf_type = VM, it is VM number between 0-767 137 * for PF or EMP this field should be set to zero 138 */ 139 switch (vsi->type) { 140 case ICE_VSI_LB: 141 /* fall through */ 142 case ICE_VSI_PF: 143 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 144 break; 145 case ICE_VSI_VF: 146 /* Firmware expects vmvf_num to be absolute VF ID */ 147 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 148 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 149 break; 150 default: 151 return; 152 } 153 154 /* make sure the context is associated with the right VSI */ 155 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 156 157 tlan_ctx->tso_ena = ICE_TX_LEGACY; 158 tlan_ctx->tso_qnum = pf_q; 159 160 /* Legacy or Advanced Host Interface: 161 * 0: Advanced Host Interface 162 * 1: Legacy Host Interface 163 */ 164 tlan_ctx->legacy_int = ICE_TX_LEGACY; 165 } 166 167 /** 168 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 169 * @pf: the PF being configured 170 * @pf_q: the PF queue 171 * @ena: enable or disable state of the queue 172 * 173 * This routine will wait for the given Rx queue of the PF to reach the 174 * enabled or disabled state. 175 * Returns -ETIMEDOUT in case of failing to reach the requested state after 176 * multiple retries; else will return 0 in case of success. 177 */ 178 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 179 { 180 int i; 181 182 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 183 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 184 QRX_CTRL_QENA_STAT_M)) 185 return 0; 186 187 usleep_range(20, 40); 188 } 189 190 return -ETIMEDOUT; 191 } 192 193 /** 194 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings 195 * @vsi: the VSI being configured 196 * @ena: start or stop the Rx rings 197 */ 198 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) 199 { 200 struct ice_pf *pf = vsi->back; 201 struct ice_hw *hw = &pf->hw; 202 int i, ret = 0; 203 204 for (i = 0; i < vsi->num_rxq; i++) { 205 int pf_q = vsi->rxq_map[i]; 206 u32 rx_reg; 207 208 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 209 210 /* Skip if the queue is already in the requested state */ 211 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 212 continue; 213 214 /* turn on/off the queue */ 215 if (ena) 216 rx_reg |= QRX_CTRL_QENA_REQ_M; 217 else 218 rx_reg &= ~QRX_CTRL_QENA_REQ_M; 219 wr32(hw, QRX_CTRL(pf_q), rx_reg); 220 221 /* wait for the change to finish */ 222 ret = ice_pf_rxq_wait(pf, pf_q, ena); 223 if (ret) { 224 dev_err(&pf->pdev->dev, 225 "VSI idx %d Rx ring %d %sable timeout\n", 226 vsi->idx, pf_q, (ena ? "en" : "dis")); 227 break; 228 } 229 } 230 231 return ret; 232 } 233 234 /** 235 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 236 * @vsi: VSI pointer 237 * 238 * On error: returns error code (negative) 239 * On success: returns 0 240 */ 241 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 242 { 243 struct ice_pf *pf = vsi->back; 244 245 /* allocate memory for both Tx and Rx ring pointers */ 246 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 247 sizeof(*vsi->tx_rings), GFP_KERNEL); 248 if (!vsi->tx_rings) 249 goto err_txrings; 250 251 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 252 sizeof(*vsi->rx_rings), GFP_KERNEL); 253 if (!vsi->rx_rings) 254 goto err_rxrings; 255 256 /* There is no need to allocate q_vectors for a loopback VSI. */ 257 if (vsi->type == ICE_VSI_LB) 258 return 0; 259 260 /* allocate memory for q_vector pointers */ 261 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, 262 sizeof(*vsi->q_vectors), GFP_KERNEL); 263 if (!vsi->q_vectors) 264 goto err_vectors; 265 266 return 0; 267 268 err_vectors: 269 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 270 err_rxrings: 271 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 272 err_txrings: 273 return -ENOMEM; 274 } 275 276 /** 277 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 278 * @vsi: the VSI being configured 279 */ 280 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 281 { 282 switch (vsi->type) { 283 case ICE_VSI_PF: 284 /* fall through */ 285 case ICE_VSI_LB: 286 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 287 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 288 break; 289 default: 290 dev_dbg(&vsi->back->pdev->dev, 291 "Not setting number of Tx/Rx descriptors for VSI type %d\n", 292 vsi->type); 293 break; 294 } 295 } 296 297 /** 298 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 299 * @vsi: the VSI being configured 300 * @vf_id: ID of the VF being configured 301 * 302 * Return 0 on success and a negative value on error 303 */ 304 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) 305 { 306 struct ice_pf *pf = vsi->back; 307 struct ice_vf *vf = NULL; 308 309 if (vsi->type == ICE_VSI_VF) 310 vsi->vf_id = vf_id; 311 312 switch (vsi->type) { 313 case ICE_VSI_PF: 314 vsi->alloc_txq = pf->num_lan_tx; 315 vsi->alloc_rxq = pf->num_lan_rx; 316 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); 317 break; 318 case ICE_VSI_VF: 319 vf = &pf->vf[vsi->vf_id]; 320 vsi->alloc_txq = vf->num_vf_qs; 321 vsi->alloc_rxq = vf->num_vf_qs; 322 /* pf->num_vf_msix includes (VF miscellaneous vector + 323 * data queue interrupts). Since vsi->num_q_vectors is number 324 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 325 * original vector count 326 */ 327 vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF; 328 break; 329 case ICE_VSI_LB: 330 vsi->alloc_txq = 1; 331 vsi->alloc_rxq = 1; 332 break; 333 default: 334 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 335 break; 336 } 337 338 ice_vsi_set_num_desc(vsi); 339 } 340 341 /** 342 * ice_get_free_slot - get the next non-NULL location index in array 343 * @array: array to search 344 * @size: size of the array 345 * @curr: last known occupied index to be used as a search hint 346 * 347 * void * is being used to keep the functionality generic. This lets us use this 348 * function on any array of pointers. 349 */ 350 static int ice_get_free_slot(void *array, int size, int curr) 351 { 352 int **tmp_array = (int **)array; 353 int next; 354 355 if (curr < (size - 1) && !tmp_array[curr + 1]) { 356 next = curr + 1; 357 } else { 358 int i = 0; 359 360 while ((i < size) && (tmp_array[i])) 361 i++; 362 if (i == size) 363 next = ICE_NO_VSI; 364 else 365 next = i; 366 } 367 return next; 368 } 369 370 /** 371 * ice_vsi_delete - delete a VSI from the switch 372 * @vsi: pointer to VSI being removed 373 */ 374 void ice_vsi_delete(struct ice_vsi *vsi) 375 { 376 struct ice_pf *pf = vsi->back; 377 struct ice_vsi_ctx *ctxt; 378 enum ice_status status; 379 380 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 381 if (!ctxt) 382 return; 383 384 if (vsi->type == ICE_VSI_VF) 385 ctxt->vf_num = vsi->vf_id; 386 ctxt->vsi_num = vsi->vsi_num; 387 388 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 389 390 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 391 if (status) 392 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", 393 vsi->vsi_num); 394 395 devm_kfree(&pf->pdev->dev, ctxt); 396 } 397 398 /** 399 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 400 * @vsi: pointer to VSI being cleared 401 */ 402 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 403 { 404 struct ice_pf *pf = vsi->back; 405 406 /* free the ring and vector containers */ 407 if (vsi->q_vectors) { 408 devm_kfree(&pf->pdev->dev, vsi->q_vectors); 409 vsi->q_vectors = NULL; 410 } 411 if (vsi->tx_rings) { 412 devm_kfree(&pf->pdev->dev, vsi->tx_rings); 413 vsi->tx_rings = NULL; 414 } 415 if (vsi->rx_rings) { 416 devm_kfree(&pf->pdev->dev, vsi->rx_rings); 417 vsi->rx_rings = NULL; 418 } 419 } 420 421 /** 422 * ice_vsi_clear - clean up and deallocate the provided VSI 423 * @vsi: pointer to VSI being cleared 424 * 425 * This deallocates the VSI's queue resources, removes it from the PF's 426 * VSI array if necessary, and deallocates the VSI 427 * 428 * Returns 0 on success, negative on failure 429 */ 430 int ice_vsi_clear(struct ice_vsi *vsi) 431 { 432 struct ice_pf *pf = NULL; 433 434 if (!vsi) 435 return 0; 436 437 if (!vsi->back) 438 return -EINVAL; 439 440 pf = vsi->back; 441 442 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 443 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", 444 vsi->idx); 445 return -EINVAL; 446 } 447 448 mutex_lock(&pf->sw_mutex); 449 /* updates the PF for this cleared VSI */ 450 451 pf->vsi[vsi->idx] = NULL; 452 if (vsi->idx < pf->next_vsi) 453 pf->next_vsi = vsi->idx; 454 455 ice_vsi_free_arrays(vsi); 456 mutex_unlock(&pf->sw_mutex); 457 devm_kfree(&pf->pdev->dev, vsi); 458 459 return 0; 460 } 461 462 /** 463 * ice_msix_clean_rings - MSIX mode Interrupt Handler 464 * @irq: interrupt number 465 * @data: pointer to a q_vector 466 */ 467 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 468 { 469 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 470 471 if (!q_vector->tx.ring && !q_vector->rx.ring) 472 return IRQ_HANDLED; 473 474 napi_schedule(&q_vector->napi); 475 476 return IRQ_HANDLED; 477 } 478 479 /** 480 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 481 * @pf: board private structure 482 * @type: type of VSI 483 * @vf_id: ID of the VF being configured 484 * 485 * returns a pointer to a VSI on success, NULL on failure. 486 */ 487 static struct ice_vsi * 488 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) 489 { 490 struct ice_vsi *vsi = NULL; 491 492 /* Need to protect the allocation of the VSIs at the PF level */ 493 mutex_lock(&pf->sw_mutex); 494 495 /* If we have already allocated our maximum number of VSIs, 496 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 497 * is available to be populated 498 */ 499 if (pf->next_vsi == ICE_NO_VSI) { 500 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); 501 goto unlock_pf; 502 } 503 504 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); 505 if (!vsi) 506 goto unlock_pf; 507 508 vsi->type = type; 509 vsi->back = pf; 510 set_bit(__ICE_DOWN, vsi->state); 511 vsi->idx = pf->next_vsi; 512 vsi->work_lmt = ICE_DFLT_IRQ_WORK; 513 514 if (type == ICE_VSI_VF) 515 ice_vsi_set_num_qs(vsi, vf_id); 516 else 517 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 518 519 switch (vsi->type) { 520 case ICE_VSI_PF: 521 if (ice_vsi_alloc_arrays(vsi)) 522 goto err_rings; 523 524 /* Setup default MSIX irq handler for VSI */ 525 vsi->irq_handler = ice_msix_clean_rings; 526 break; 527 case ICE_VSI_VF: 528 if (ice_vsi_alloc_arrays(vsi)) 529 goto err_rings; 530 break; 531 case ICE_VSI_LB: 532 if (ice_vsi_alloc_arrays(vsi)) 533 goto err_rings; 534 break; 535 default: 536 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 537 goto unlock_pf; 538 } 539 540 /* fill VSI slot in the PF struct */ 541 pf->vsi[pf->next_vsi] = vsi; 542 543 /* prepare pf->next_vsi for next use */ 544 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 545 pf->next_vsi); 546 goto unlock_pf; 547 548 err_rings: 549 devm_kfree(&pf->pdev->dev, vsi); 550 vsi = NULL; 551 unlock_pf: 552 mutex_unlock(&pf->sw_mutex); 553 return vsi; 554 } 555 556 /** 557 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 558 * @qs_cfg: gathered variables needed for PF->VSI queues assignment 559 * 560 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 561 */ 562 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 563 { 564 int offset, i; 565 566 mutex_lock(qs_cfg->qs_mutex); 567 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 568 0, qs_cfg->q_count, 0); 569 if (offset >= qs_cfg->pf_map_size) { 570 mutex_unlock(qs_cfg->qs_mutex); 571 return -ENOMEM; 572 } 573 574 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 575 for (i = 0; i < qs_cfg->q_count; i++) 576 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; 577 mutex_unlock(qs_cfg->qs_mutex); 578 579 return 0; 580 } 581 582 /** 583 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 584 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 585 * 586 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 587 */ 588 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 589 { 590 int i, index = 0; 591 592 mutex_lock(qs_cfg->qs_mutex); 593 for (i = 0; i < qs_cfg->q_count; i++) { 594 index = find_next_zero_bit(qs_cfg->pf_map, 595 qs_cfg->pf_map_size, index); 596 if (index >= qs_cfg->pf_map_size) 597 goto err_scatter; 598 set_bit(index, qs_cfg->pf_map); 599 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; 600 } 601 mutex_unlock(qs_cfg->qs_mutex); 602 603 return 0; 604 err_scatter: 605 for (index = 0; index < i; index++) { 606 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 607 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 608 } 609 mutex_unlock(qs_cfg->qs_mutex); 610 611 return -ENOMEM; 612 } 613 614 /** 615 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 616 * @qs_cfg: gathered variables needed for pf->vsi queues assignment 617 * 618 * This function first tries to find contiguous space. If it is not successful, 619 * it tries with the scatter approach. 620 * 621 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 622 */ 623 static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 624 { 625 int ret = 0; 626 627 ret = __ice_vsi_get_qs_contig(qs_cfg); 628 if (ret) { 629 /* contig failed, so try with scatter approach */ 630 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 631 qs_cfg->q_count = min_t(u16, qs_cfg->q_count, 632 qs_cfg->scatter_count); 633 ret = __ice_vsi_get_qs_sc(qs_cfg); 634 } 635 return ret; 636 } 637 638 /** 639 * ice_vsi_get_qs - Assign queues from PF to VSI 640 * @vsi: the VSI to assign queues to 641 * 642 * Returns 0 on success and a negative value on error 643 */ 644 static int ice_vsi_get_qs(struct ice_vsi *vsi) 645 { 646 struct ice_pf *pf = vsi->back; 647 struct ice_qs_cfg tx_qs_cfg = { 648 .qs_mutex = &pf->avail_q_mutex, 649 .pf_map = pf->avail_txqs, 650 .pf_map_size = ICE_MAX_TXQS, 651 .q_count = vsi->alloc_txq, 652 .scatter_count = ICE_MAX_SCATTER_TXQS, 653 .vsi_map = vsi->txq_map, 654 .vsi_map_offset = 0, 655 .mapping_mode = vsi->tx_mapping_mode 656 }; 657 struct ice_qs_cfg rx_qs_cfg = { 658 .qs_mutex = &pf->avail_q_mutex, 659 .pf_map = pf->avail_rxqs, 660 .pf_map_size = ICE_MAX_RXQS, 661 .q_count = vsi->alloc_rxq, 662 .scatter_count = ICE_MAX_SCATTER_RXQS, 663 .vsi_map = vsi->rxq_map, 664 .vsi_map_offset = 0, 665 .mapping_mode = vsi->rx_mapping_mode 666 }; 667 int ret = 0; 668 669 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; 670 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; 671 672 ret = __ice_vsi_get_qs(&tx_qs_cfg); 673 if (!ret) 674 ret = __ice_vsi_get_qs(&rx_qs_cfg); 675 676 return ret; 677 } 678 679 /** 680 * ice_vsi_put_qs - Release queues from VSI to PF 681 * @vsi: the VSI that is going to release queues 682 */ 683 void ice_vsi_put_qs(struct ice_vsi *vsi) 684 { 685 struct ice_pf *pf = vsi->back; 686 int i; 687 688 mutex_lock(&pf->avail_q_mutex); 689 690 for (i = 0; i < vsi->alloc_txq; i++) { 691 clear_bit(vsi->txq_map[i], pf->avail_txqs); 692 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 693 } 694 695 for (i = 0; i < vsi->alloc_rxq; i++) { 696 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 697 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 698 } 699 700 mutex_unlock(&pf->avail_q_mutex); 701 } 702 703 /** 704 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs 705 * @vsi: the VSI being removed 706 */ 707 static void ice_rss_clean(struct ice_vsi *vsi) 708 { 709 struct ice_pf *pf; 710 711 pf = vsi->back; 712 713 if (vsi->rss_hkey_user) 714 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); 715 if (vsi->rss_lut_user) 716 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); 717 } 718 719 /** 720 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 721 * @vsi: the VSI being configured 722 */ 723 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 724 { 725 struct ice_hw_common_caps *cap; 726 struct ice_pf *pf = vsi->back; 727 728 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 729 vsi->rss_size = 1; 730 return; 731 } 732 733 cap = &pf->hw.func_caps.common_cap; 734 switch (vsi->type) { 735 case ICE_VSI_PF: 736 /* PF VSI will inherit RSS instance of PF */ 737 vsi->rss_table_size = cap->rss_table_size; 738 vsi->rss_size = min_t(int, num_online_cpus(), 739 BIT(cap->rss_table_entry_width)); 740 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 741 break; 742 case ICE_VSI_VF: 743 /* VF VSI will gets a small RSS table 744 * For VSI_LUT, LUT size should be set to 64 bytes 745 */ 746 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 747 vsi->rss_size = min_t(int, num_online_cpus(), 748 BIT(cap->rss_table_entry_width)); 749 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 750 break; 751 case ICE_VSI_LB: 752 break; 753 default: 754 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", 755 vsi->type); 756 break; 757 } 758 } 759 760 /** 761 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 762 * @ctxt: the VSI context being set 763 * 764 * This initializes a default VSI context for all sections except the Queues. 765 */ 766 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) 767 { 768 u32 table = 0; 769 770 memset(&ctxt->info, 0, sizeof(ctxt->info)); 771 /* VSI's should be allocated from shared pool */ 772 ctxt->alloc_from_pool = true; 773 /* Src pruning enabled by default */ 774 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 775 /* Traffic from VSI can be sent to LAN */ 776 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 777 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy 778 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all 779 * packets untagged/tagged. 780 */ 781 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 782 ICE_AQ_VSI_VLAN_MODE_M) >> 783 ICE_AQ_VSI_VLAN_MODE_S); 784 /* Have 1:1 UP mapping for both ingress/egress tables */ 785 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 786 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 787 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 788 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 789 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 790 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 791 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 792 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 793 ctxt->info.ingress_table = cpu_to_le32(table); 794 ctxt->info.egress_table = cpu_to_le32(table); 795 /* Have 1:1 UP mapping for outer to inner UP table */ 796 ctxt->info.outer_up_table = cpu_to_le32(table); 797 /* No Outer tag support outer_tag_flags remains to zero */ 798 } 799 800 /** 801 * ice_vsi_setup_q_map - Setup a VSI queue map 802 * @vsi: the VSI being configured 803 * @ctxt: VSI context structure 804 */ 805 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 806 { 807 u16 offset = 0, qmap = 0, tx_count = 0; 808 u16 qcount_tx = vsi->alloc_txq; 809 u16 qcount_rx = vsi->alloc_rxq; 810 u16 tx_numq_tc, rx_numq_tc; 811 u16 pow = 0, max_rss = 0; 812 bool ena_tc0 = false; 813 u8 netdev_tc = 0; 814 int i; 815 816 /* at least TC0 should be enabled by default */ 817 if (vsi->tc_cfg.numtc) { 818 if (!(vsi->tc_cfg.ena_tc & BIT(0))) 819 ena_tc0 = true; 820 } else { 821 ena_tc0 = true; 822 } 823 824 if (ena_tc0) { 825 vsi->tc_cfg.numtc++; 826 vsi->tc_cfg.ena_tc |= 1; 827 } 828 829 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; 830 if (!rx_numq_tc) 831 rx_numq_tc = 1; 832 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; 833 if (!tx_numq_tc) 834 tx_numq_tc = 1; 835 836 /* TC mapping is a function of the number of Rx queues assigned to the 837 * VSI for each traffic class and the offset of these queues. 838 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 839 * queues allocated to TC0. No:of queues is a power-of-2. 840 * 841 * If TC is not enabled, the queue offset is set to 0, and allocate one 842 * queue, this way, traffic for the given TC will be sent to the default 843 * queue. 844 * 845 * Setup number and offset of Rx queues for all TCs for the VSI 846 */ 847 848 qcount_rx = rx_numq_tc; 849 850 /* qcount will change if RSS is enabled */ 851 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { 852 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { 853 if (vsi->type == ICE_VSI_PF) 854 max_rss = ICE_MAX_LG_RSS_QS; 855 else 856 max_rss = ICE_MAX_SMALL_RSS_QS; 857 qcount_rx = min_t(int, rx_numq_tc, max_rss); 858 qcount_rx = min_t(int, qcount_rx, vsi->rss_size); 859 } 860 } 861 862 /* find the (rounded up) power-of-2 of qcount */ 863 pow = order_base_2(qcount_rx); 864 865 ice_for_each_traffic_class(i) { 866 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 867 /* TC is not enabled */ 868 vsi->tc_cfg.tc_info[i].qoffset = 0; 869 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 870 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 871 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 872 ctxt->info.tc_mapping[i] = 0; 873 continue; 874 } 875 876 /* TC is enabled */ 877 vsi->tc_cfg.tc_info[i].qoffset = offset; 878 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 879 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; 880 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 881 882 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 883 ICE_AQ_VSI_TC_Q_OFFSET_M) | 884 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 885 ICE_AQ_VSI_TC_Q_NUM_M); 886 offset += qcount_rx; 887 tx_count += tx_numq_tc; 888 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 889 } 890 891 /* if offset is non-zero, means it is calculated correctly based on 892 * enabled TCs for a given VSI otherwise qcount_rx will always 893 * be correct and non-zero because it is based off - VSI's 894 * allocated Rx queues which is at least 1 (hence qcount_tx will be 895 * at least 1) 896 */ 897 if (offset) 898 vsi->num_rxq = offset; 899 else 900 vsi->num_rxq = qcount_rx; 901 902 vsi->num_txq = tx_count; 903 904 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 905 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 906 /* since there is a chance that num_rxq could have been changed 907 * in the above for loop, make num_txq equal to num_rxq. 908 */ 909 vsi->num_txq = vsi->num_rxq; 910 } 911 912 /* Rx queue mapping */ 913 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 914 /* q_mapping buffer holds the info for the first queue allocated for 915 * this VSI in the PF space and also the number of queues associated 916 * with this VSI. 917 */ 918 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 919 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 920 } 921 922 /** 923 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 924 * @ctxt: the VSI context being set 925 * @vsi: the VSI being configured 926 */ 927 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 928 { 929 u8 lut_type, hash_type; 930 struct ice_pf *pf; 931 932 pf = vsi->back; 933 934 switch (vsi->type) { 935 case ICE_VSI_PF: 936 /* PF VSI will inherit RSS instance of PF */ 937 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 938 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 939 break; 940 case ICE_VSI_VF: 941 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 942 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 943 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 944 break; 945 case ICE_VSI_LB: 946 dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type); 947 return; 948 default: 949 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 950 return; 951 } 952 953 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 954 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 955 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 956 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 957 } 958 959 /** 960 * ice_vsi_init - Create and initialize a VSI 961 * @vsi: the VSI being configured 962 * 963 * This initializes a VSI context depending on the VSI type to be added and 964 * passes it down to the add_vsi aq command to create a new VSI. 965 */ 966 static int ice_vsi_init(struct ice_vsi *vsi) 967 { 968 struct ice_pf *pf = vsi->back; 969 struct ice_hw *hw = &pf->hw; 970 struct ice_vsi_ctx *ctxt; 971 int ret = 0; 972 973 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 974 if (!ctxt) 975 return -ENOMEM; 976 977 ctxt->info = vsi->info; 978 switch (vsi->type) { 979 case ICE_VSI_LB: 980 /* fall through */ 981 case ICE_VSI_PF: 982 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 983 break; 984 case ICE_VSI_VF: 985 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 986 /* VF number here is the absolute VF number (0-255) */ 987 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 988 break; 989 default: 990 return -ENODEV; 991 } 992 993 ice_set_dflt_vsi_ctx(ctxt); 994 /* if the switch is in VEB mode, allow VSI loopback */ 995 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 996 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 997 998 /* Set LUT type and HASH type if RSS is enabled */ 999 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 1000 ice_set_rss_vsi_ctx(ctxt, vsi); 1001 1002 ctxt->info.sw_id = vsi->port_info->sw_id; 1003 ice_vsi_setup_q_map(vsi, ctxt); 1004 1005 /* Enable MAC Antispoof with new VSI being initialized or updated */ 1006 if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { 1007 ctxt->info.valid_sections |= 1008 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1009 ctxt->info.sec_flags |= 1010 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1011 } 1012 1013 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1014 if (ret) { 1015 dev_err(&pf->pdev->dev, 1016 "Add VSI failed, err %d\n", ret); 1017 return -EIO; 1018 } 1019 1020 /* keep context for update VSI operations */ 1021 vsi->info = ctxt->info; 1022 1023 /* record VSI number returned */ 1024 vsi->vsi_num = ctxt->vsi_num; 1025 1026 devm_kfree(&pf->pdev->dev, ctxt); 1027 return ret; 1028 } 1029 1030 /** 1031 * ice_free_q_vector - Free memory allocated for a specific interrupt vector 1032 * @vsi: VSI having the memory freed 1033 * @v_idx: index of the vector to be freed 1034 */ 1035 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 1036 { 1037 struct ice_q_vector *q_vector; 1038 struct ice_pf *pf = vsi->back; 1039 struct ice_ring *ring; 1040 1041 if (!vsi->q_vectors[v_idx]) { 1042 dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", 1043 v_idx); 1044 return; 1045 } 1046 q_vector = vsi->q_vectors[v_idx]; 1047 1048 ice_for_each_ring(ring, q_vector->tx) 1049 ring->q_vector = NULL; 1050 ice_for_each_ring(ring, q_vector->rx) 1051 ring->q_vector = NULL; 1052 1053 /* only VSI with an associated netdev is set up with NAPI */ 1054 if (vsi->netdev) 1055 netif_napi_del(&q_vector->napi); 1056 1057 devm_kfree(&pf->pdev->dev, q_vector); 1058 vsi->q_vectors[v_idx] = NULL; 1059 } 1060 1061 /** 1062 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 1063 * @vsi: the VSI having memory freed 1064 */ 1065 void ice_vsi_free_q_vectors(struct ice_vsi *vsi) 1066 { 1067 int v_idx; 1068 1069 ice_for_each_q_vector(vsi, v_idx) 1070 ice_free_q_vector(vsi, v_idx); 1071 } 1072 1073 /** 1074 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 1075 * @vsi: the VSI being configured 1076 * @v_idx: index of the vector in the VSI struct 1077 * 1078 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1079 */ 1080 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) 1081 { 1082 struct ice_pf *pf = vsi->back; 1083 struct ice_q_vector *q_vector; 1084 1085 /* allocate q_vector */ 1086 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); 1087 if (!q_vector) 1088 return -ENOMEM; 1089 1090 q_vector->vsi = vsi; 1091 q_vector->v_idx = v_idx; 1092 if (vsi->type == ICE_VSI_VF) 1093 goto out; 1094 /* only set affinity_mask if the CPU is online */ 1095 if (cpu_online(v_idx)) 1096 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 1097 1098 /* This will not be called in the driver load path because the netdev 1099 * will not be created yet. All other cases with register the NAPI 1100 * handler here (i.e. resume, reset/rebuild, etc.) 1101 */ 1102 if (vsi->netdev) 1103 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 1104 NAPI_POLL_WEIGHT); 1105 1106 out: 1107 /* tie q_vector and VSI together */ 1108 vsi->q_vectors[v_idx] = q_vector; 1109 1110 return 0; 1111 } 1112 1113 /** 1114 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 1115 * @vsi: the VSI being configured 1116 * 1117 * We allocate one q_vector per queue interrupt. If allocation fails we 1118 * return -ENOMEM. 1119 */ 1120 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 1121 { 1122 struct ice_pf *pf = vsi->back; 1123 int v_idx = 0, num_q_vectors; 1124 int err; 1125 1126 if (vsi->q_vectors[0]) { 1127 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 1128 vsi->vsi_num); 1129 return -EEXIST; 1130 } 1131 1132 num_q_vectors = vsi->num_q_vectors; 1133 1134 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 1135 err = ice_vsi_alloc_q_vector(vsi, v_idx); 1136 if (err) 1137 goto err_out; 1138 } 1139 1140 return 0; 1141 1142 err_out: 1143 while (v_idx--) 1144 ice_free_q_vector(vsi, v_idx); 1145 1146 dev_err(&pf->pdev->dev, 1147 "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 1148 vsi->num_q_vectors, vsi->vsi_num, err); 1149 vsi->num_q_vectors = 0; 1150 return err; 1151 } 1152 1153 /** 1154 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI 1155 * @vsi: ptr to the VSI 1156 * 1157 * This should only be called after ice_vsi_alloc() which allocates the 1158 * corresponding SW VSI structure and initializes num_queue_pairs for the 1159 * newly allocated VSI. 1160 * 1161 * Returns 0 on success or negative on failure 1162 */ 1163 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) 1164 { 1165 struct ice_pf *pf = vsi->back; 1166 u16 num_q_vectors; 1167 1168 /* SRIOV doesn't grab irq_tracker entries for each VSI */ 1169 if (vsi->type == ICE_VSI_VF) 1170 return 0; 1171 1172 if (vsi->base_vector) { 1173 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 1174 vsi->vsi_num, vsi->base_vector); 1175 return -EEXIST; 1176 } 1177 1178 num_q_vectors = vsi->num_q_vectors; 1179 /* reserve slots from OS requested IRQs */ 1180 vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, 1181 vsi->idx); 1182 if (vsi->base_vector < 0) { 1183 dev_err(&pf->pdev->dev, 1184 "Failed to get tracking for %d vectors for VSI %d, err=%d\n", 1185 num_q_vectors, vsi->vsi_num, vsi->base_vector); 1186 return -ENOENT; 1187 } 1188 pf->num_avail_sw_msix -= num_q_vectors; 1189 1190 return 0; 1191 } 1192 1193 /** 1194 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1195 * @vsi: the VSI having rings deallocated 1196 */ 1197 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1198 { 1199 int i; 1200 1201 if (vsi->tx_rings) { 1202 for (i = 0; i < vsi->alloc_txq; i++) { 1203 if (vsi->tx_rings[i]) { 1204 kfree_rcu(vsi->tx_rings[i], rcu); 1205 vsi->tx_rings[i] = NULL; 1206 } 1207 } 1208 } 1209 if (vsi->rx_rings) { 1210 for (i = 0; i < vsi->alloc_rxq; i++) { 1211 if (vsi->rx_rings[i]) { 1212 kfree_rcu(vsi->rx_rings[i], rcu); 1213 vsi->rx_rings[i] = NULL; 1214 } 1215 } 1216 } 1217 } 1218 1219 /** 1220 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1221 * @vsi: VSI which is having rings allocated 1222 */ 1223 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1224 { 1225 struct ice_pf *pf = vsi->back; 1226 int i; 1227 1228 /* Allocate Tx rings */ 1229 for (i = 0; i < vsi->alloc_txq; i++) { 1230 struct ice_ring *ring; 1231 1232 /* allocate with kzalloc(), free with kfree_rcu() */ 1233 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1234 1235 if (!ring) 1236 goto err_out; 1237 1238 ring->q_index = i; 1239 ring->reg_idx = vsi->txq_map[i]; 1240 ring->ring_active = false; 1241 ring->vsi = vsi; 1242 ring->dev = &pf->pdev->dev; 1243 ring->count = vsi->num_tx_desc; 1244 vsi->tx_rings[i] = ring; 1245 } 1246 1247 /* Allocate Rx rings */ 1248 for (i = 0; i < vsi->alloc_rxq; i++) { 1249 struct ice_ring *ring; 1250 1251 /* allocate with kzalloc(), free with kfree_rcu() */ 1252 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1253 if (!ring) 1254 goto err_out; 1255 1256 ring->q_index = i; 1257 ring->reg_idx = vsi->rxq_map[i]; 1258 ring->ring_active = false; 1259 ring->vsi = vsi; 1260 ring->netdev = vsi->netdev; 1261 ring->dev = &pf->pdev->dev; 1262 ring->count = vsi->num_rx_desc; 1263 vsi->rx_rings[i] = ring; 1264 } 1265 1266 return 0; 1267 1268 err_out: 1269 ice_vsi_clear_rings(vsi); 1270 return -ENOMEM; 1271 } 1272 1273 /** 1274 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 1275 * @vsi: the VSI being configured 1276 * 1277 * This function maps descriptor rings to the queue-specific vectors allotted 1278 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 1279 * and Rx rings to the vector as "efficiently" as possible. 1280 */ 1281 #ifdef CONFIG_DCB 1282 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1283 #else 1284 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1285 #endif /* CONFIG_DCB */ 1286 { 1287 int q_vectors = vsi->num_q_vectors; 1288 int tx_rings_rem, rx_rings_rem; 1289 int v_id; 1290 1291 /* initially assigning remaining rings count to VSIs num queue value */ 1292 tx_rings_rem = vsi->num_txq; 1293 rx_rings_rem = vsi->num_rxq; 1294 1295 for (v_id = 0; v_id < q_vectors; v_id++) { 1296 struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 1297 int tx_rings_per_v, rx_rings_per_v, q_id, q_base; 1298 1299 /* Tx rings mapping to vector */ 1300 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 1301 q_vector->num_ring_tx = tx_rings_per_v; 1302 q_vector->tx.ring = NULL; 1303 q_vector->tx.itr_idx = ICE_TX_ITR; 1304 q_base = vsi->num_txq - tx_rings_rem; 1305 1306 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 1307 struct ice_ring *tx_ring = vsi->tx_rings[q_id]; 1308 1309 tx_ring->q_vector = q_vector; 1310 tx_ring->next = q_vector->tx.ring; 1311 q_vector->tx.ring = tx_ring; 1312 } 1313 tx_rings_rem -= tx_rings_per_v; 1314 1315 /* Rx rings mapping to vector */ 1316 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 1317 q_vector->num_ring_rx = rx_rings_per_v; 1318 q_vector->rx.ring = NULL; 1319 q_vector->rx.itr_idx = ICE_RX_ITR; 1320 q_base = vsi->num_rxq - rx_rings_rem; 1321 1322 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 1323 struct ice_ring *rx_ring = vsi->rx_rings[q_id]; 1324 1325 rx_ring->q_vector = q_vector; 1326 rx_ring->next = q_vector->rx.ring; 1327 q_vector->rx.ring = rx_ring; 1328 } 1329 rx_rings_rem -= rx_rings_per_v; 1330 } 1331 } 1332 1333 /** 1334 * ice_vsi_manage_rss_lut - disable/enable RSS 1335 * @vsi: the VSI being changed 1336 * @ena: boolean value indicating if this is an enable or disable request 1337 * 1338 * In the event of disable request for RSS, this function will zero out RSS 1339 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1340 * LUT. 1341 */ 1342 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1343 { 1344 int err = 0; 1345 u8 *lut; 1346 1347 lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, 1348 GFP_KERNEL); 1349 if (!lut) 1350 return -ENOMEM; 1351 1352 if (ena) { 1353 if (vsi->rss_lut_user) 1354 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1355 else 1356 ice_fill_rss_lut(lut, vsi->rss_table_size, 1357 vsi->rss_size); 1358 } 1359 1360 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); 1361 devm_kfree(&vsi->back->pdev->dev, lut); 1362 return err; 1363 } 1364 1365 /** 1366 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1367 * @vsi: VSI to be configured 1368 */ 1369 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1370 { 1371 struct ice_aqc_get_set_rss_keys *key; 1372 struct ice_pf *pf = vsi->back; 1373 enum ice_status status; 1374 int err = 0; 1375 u8 *lut; 1376 1377 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); 1378 1379 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); 1380 if (!lut) 1381 return -ENOMEM; 1382 1383 if (vsi->rss_lut_user) 1384 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1385 else 1386 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1387 1388 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, 1389 vsi->rss_table_size); 1390 1391 if (status) { 1392 dev_err(&pf->pdev->dev, 1393 "set_rss_lut failed, error %d\n", status); 1394 err = -EIO; 1395 goto ice_vsi_cfg_rss_exit; 1396 } 1397 1398 key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL); 1399 if (!key) { 1400 err = -ENOMEM; 1401 goto ice_vsi_cfg_rss_exit; 1402 } 1403 1404 if (vsi->rss_hkey_user) 1405 memcpy(key, 1406 (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user, 1407 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1408 else 1409 netdev_rss_key_fill((void *)key, 1410 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1411 1412 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); 1413 1414 if (status) { 1415 dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n", 1416 status); 1417 err = -EIO; 1418 } 1419 1420 devm_kfree(&pf->pdev->dev, key); 1421 ice_vsi_cfg_rss_exit: 1422 devm_kfree(&pf->pdev->dev, lut); 1423 return err; 1424 } 1425 1426 /** 1427 * ice_add_mac_to_list - Add a MAC address filter entry to the list 1428 * @vsi: the VSI to be forwarded to 1429 * @add_list: pointer to the list which contains MAC filter entries 1430 * @macaddr: the MAC address to be added. 1431 * 1432 * Adds MAC address filter entry to the temp list 1433 * 1434 * Returns 0 on success or ENOMEM on failure. 1435 */ 1436 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, 1437 const u8 *macaddr) 1438 { 1439 struct ice_fltr_list_entry *tmp; 1440 struct ice_pf *pf = vsi->back; 1441 1442 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); 1443 if (!tmp) 1444 return -ENOMEM; 1445 1446 tmp->fltr_info.flag = ICE_FLTR_TX; 1447 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1448 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1449 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1450 tmp->fltr_info.vsi_handle = vsi->idx; 1451 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); 1452 1453 INIT_LIST_HEAD(&tmp->list_entry); 1454 list_add(&tmp->list_entry, add_list); 1455 1456 return 0; 1457 } 1458 1459 /** 1460 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1461 * @vsi: the VSI to be updated 1462 */ 1463 void ice_update_eth_stats(struct ice_vsi *vsi) 1464 { 1465 struct ice_eth_stats *prev_es, *cur_es; 1466 struct ice_hw *hw = &vsi->back->hw; 1467 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1468 1469 prev_es = &vsi->eth_stats_prev; 1470 cur_es = &vsi->eth_stats; 1471 1472 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1473 &prev_es->rx_bytes, &cur_es->rx_bytes); 1474 1475 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1476 &prev_es->rx_unicast, &cur_es->rx_unicast); 1477 1478 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1479 &prev_es->rx_multicast, &cur_es->rx_multicast); 1480 1481 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1482 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1483 1484 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1485 &prev_es->rx_discards, &cur_es->rx_discards); 1486 1487 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1488 &prev_es->tx_bytes, &cur_es->tx_bytes); 1489 1490 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1491 &prev_es->tx_unicast, &cur_es->tx_unicast); 1492 1493 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1494 &prev_es->tx_multicast, &cur_es->tx_multicast); 1495 1496 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1497 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1498 1499 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1500 &prev_es->tx_errors, &cur_es->tx_errors); 1501 1502 vsi->stat_offsets_loaded = true; 1503 } 1504 1505 /** 1506 * ice_free_fltr_list - free filter lists helper 1507 * @dev: pointer to the device struct 1508 * @h: pointer to the list head to be freed 1509 * 1510 * Helper function to free filter lists previously created using 1511 * ice_add_mac_to_list 1512 */ 1513 void ice_free_fltr_list(struct device *dev, struct list_head *h) 1514 { 1515 struct ice_fltr_list_entry *e, *tmp; 1516 1517 list_for_each_entry_safe(e, tmp, h, list_entry) { 1518 list_del(&e->list_entry); 1519 devm_kfree(dev, e); 1520 } 1521 } 1522 1523 /** 1524 * ice_vsi_add_vlan - Add VSI membership for given VLAN 1525 * @vsi: the VSI being configured 1526 * @vid: VLAN ID to be added 1527 */ 1528 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) 1529 { 1530 struct ice_fltr_list_entry *tmp; 1531 struct ice_pf *pf = vsi->back; 1532 LIST_HEAD(tmp_add_list); 1533 enum ice_status status; 1534 int err = 0; 1535 1536 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); 1537 if (!tmp) 1538 return -ENOMEM; 1539 1540 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1541 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1542 tmp->fltr_info.flag = ICE_FLTR_TX; 1543 tmp->fltr_info.src_id = ICE_SRC_ID_VSI; 1544 tmp->fltr_info.vsi_handle = vsi->idx; 1545 tmp->fltr_info.l_data.vlan.vlan_id = vid; 1546 1547 INIT_LIST_HEAD(&tmp->list_entry); 1548 list_add(&tmp->list_entry, &tmp_add_list); 1549 1550 status = ice_add_vlan(&pf->hw, &tmp_add_list); 1551 if (status) { 1552 err = -ENODEV; 1553 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", 1554 vid, vsi->vsi_num); 1555 } 1556 1557 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1558 return err; 1559 } 1560 1561 /** 1562 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN 1563 * @vsi: the VSI being configured 1564 * @vid: VLAN ID to be removed 1565 * 1566 * Returns 0 on success and negative on failure 1567 */ 1568 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) 1569 { 1570 struct ice_fltr_list_entry *list; 1571 struct ice_pf *pf = vsi->back; 1572 LIST_HEAD(tmp_add_list); 1573 enum ice_status status; 1574 int err = 0; 1575 1576 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 1577 if (!list) 1578 return -ENOMEM; 1579 1580 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 1581 list->fltr_info.vsi_handle = vsi->idx; 1582 list->fltr_info.fltr_act = ICE_FWD_TO_VSI; 1583 list->fltr_info.l_data.vlan.vlan_id = vid; 1584 list->fltr_info.flag = ICE_FLTR_TX; 1585 list->fltr_info.src_id = ICE_SRC_ID_VSI; 1586 1587 INIT_LIST_HEAD(&list->list_entry); 1588 list_add(&list->list_entry, &tmp_add_list); 1589 1590 status = ice_remove_vlan(&pf->hw, &tmp_add_list); 1591 if (status == ICE_ERR_DOES_NOT_EXIST) { 1592 dev_dbg(&pf->pdev->dev, 1593 "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", 1594 vid, vsi->vsi_num, status); 1595 } else if (status) { 1596 dev_err(&pf->pdev->dev, 1597 "Error removing VLAN %d on vsi %i error: %d\n", 1598 vid, vsi->vsi_num, status); 1599 err = -EIO; 1600 } 1601 1602 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1603 return err; 1604 } 1605 1606 /** 1607 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1608 * @vsi: the VSI being configured 1609 * 1610 * Return 0 on success and a negative value on error 1611 * Configure the Rx VSI for operation. 1612 */ 1613 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1614 { 1615 u16 i; 1616 1617 if (vsi->type == ICE_VSI_VF) 1618 goto setup_rings; 1619 1620 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) 1621 vsi->max_frame = vsi->netdev->mtu + 1622 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1623 else 1624 vsi->max_frame = ICE_RXBUF_2048; 1625 1626 vsi->rx_buf_len = ICE_RXBUF_2048; 1627 setup_rings: 1628 /* set up individual rings */ 1629 for (i = 0; i < vsi->num_rxq; i++) { 1630 int err; 1631 1632 err = ice_setup_rx_ctx(vsi->rx_rings[i]); 1633 if (err) { 1634 dev_err(&vsi->back->pdev->dev, 1635 "ice_setup_rx_ctx failed for RxQ %d, err %d\n", 1636 i, err); 1637 return err; 1638 } 1639 } 1640 1641 return 0; 1642 } 1643 1644 /** 1645 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1646 * @vsi: the VSI being configured 1647 * @rings: Tx ring array to be configured 1648 * @offset: offset within vsi->txq_map 1649 * 1650 * Return 0 on success and a negative value on error 1651 * Configure the Tx VSI for operation. 1652 */ 1653 static int 1654 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) 1655 { 1656 struct ice_aqc_add_tx_qgrp *qg_buf; 1657 struct ice_aqc_add_txqs_perq *txq; 1658 struct ice_pf *pf = vsi->back; 1659 u8 num_q_grps, q_idx = 0; 1660 enum ice_status status; 1661 u16 buf_len, i, pf_q; 1662 int err = 0, tc; 1663 1664 buf_len = sizeof(*qg_buf); 1665 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); 1666 if (!qg_buf) 1667 return -ENOMEM; 1668 1669 qg_buf->num_txqs = 1; 1670 num_q_grps = 1; 1671 1672 /* set up and configure the Tx queues for each enabled TC */ 1673 ice_for_each_traffic_class(tc) { 1674 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 1675 break; 1676 1677 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 1678 struct ice_tlan_ctx tlan_ctx = { 0 }; 1679 1680 pf_q = vsi->txq_map[q_idx + offset]; 1681 ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); 1682 /* copy context contents into the qg_buf */ 1683 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1684 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 1685 ice_tlan_ctx_info); 1686 1687 /* init queue specific tail reg. It is referred as 1688 * transmit comm scheduler queue doorbell. 1689 */ 1690 rings[q_idx]->tail = 1691 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1692 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1693 i, num_q_grps, qg_buf, 1694 buf_len, NULL); 1695 if (status) { 1696 dev_err(&pf->pdev->dev, 1697 "Failed to set LAN Tx queue context, error: %d\n", 1698 status); 1699 err = -ENODEV; 1700 goto err_cfg_txqs; 1701 } 1702 1703 /* Add Tx Queue TEID into the VSI Tx ring from the 1704 * response. This will complete configuring and 1705 * enabling the queue. 1706 */ 1707 txq = &qg_buf->txqs[0]; 1708 if (pf_q == le16_to_cpu(txq->txq_id)) 1709 rings[q_idx]->txq_teid = 1710 le32_to_cpu(txq->q_teid); 1711 1712 q_idx++; 1713 } 1714 } 1715 err_cfg_txqs: 1716 devm_kfree(&pf->pdev->dev, qg_buf); 1717 return err; 1718 } 1719 1720 /** 1721 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1722 * @vsi: the VSI being configured 1723 * 1724 * Return 0 on success and a negative value on error 1725 * Configure the Tx VSI for operation. 1726 */ 1727 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1728 { 1729 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); 1730 } 1731 1732 /** 1733 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1734 * @intrl: interrupt rate limit in usecs 1735 * @gran: interrupt rate limit granularity in usecs 1736 * 1737 * This function converts a decimal interrupt rate limit in usecs to the format 1738 * expected by firmware. 1739 */ 1740 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1741 { 1742 u32 val = intrl / gran; 1743 1744 if (val) 1745 return val | GLINT_RATE_INTRL_ENA_M; 1746 return 0; 1747 } 1748 1749 /** 1750 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 1751 * @hw: board specific structure 1752 */ 1753 static void ice_cfg_itr_gran(struct ice_hw *hw) 1754 { 1755 u32 regval = rd32(hw, GLINT_CTL); 1756 1757 /* no need to update global register if ITR gran is already set */ 1758 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 1759 (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 1760 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 1761 (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 1762 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 1763 (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 1764 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 1765 (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 1766 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 1767 return; 1768 1769 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 1770 GLINT_CTL_ITR_GRAN_200_M) | 1771 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 1772 GLINT_CTL_ITR_GRAN_100_M) | 1773 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 1774 GLINT_CTL_ITR_GRAN_50_M) | 1775 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 1776 GLINT_CTL_ITR_GRAN_25_M); 1777 wr32(hw, GLINT_CTL, regval); 1778 } 1779 1780 /** 1781 * ice_cfg_itr - configure the initial interrupt throttle values 1782 * @hw: pointer to the HW structure 1783 * @q_vector: interrupt vector that's being configured 1784 * 1785 * Configure interrupt throttling values for the ring containers that are 1786 * associated with the interrupt vector passed in. 1787 */ 1788 static void 1789 ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) 1790 { 1791 ice_cfg_itr_gran(hw); 1792 1793 if (q_vector->num_ring_rx) { 1794 struct ice_ring_container *rc = &q_vector->rx; 1795 1796 /* if this value is set then don't overwrite with default */ 1797 if (!rc->itr_setting) 1798 rc->itr_setting = ICE_DFLT_RX_ITR; 1799 1800 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1801 rc->next_update = jiffies + 1; 1802 rc->current_itr = rc->target_itr; 1803 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1804 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1805 } 1806 1807 if (q_vector->num_ring_tx) { 1808 struct ice_ring_container *rc = &q_vector->tx; 1809 1810 /* if this value is set then don't overwrite with default */ 1811 if (!rc->itr_setting) 1812 rc->itr_setting = ICE_DFLT_TX_ITR; 1813 1814 rc->target_itr = ITR_TO_REG(rc->itr_setting); 1815 rc->next_update = jiffies + 1; 1816 rc->current_itr = rc->target_itr; 1817 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1818 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 1819 } 1820 } 1821 1822 /** 1823 * ice_cfg_txq_interrupt - configure interrupt on Tx queue 1824 * @vsi: the VSI being configured 1825 * @txq: Tx queue being mapped to MSI-X vector 1826 * @msix_idx: MSI-X vector index within the function 1827 * @itr_idx: ITR index of the interrupt cause 1828 * 1829 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector 1830 * within the function space. 1831 */ 1832 #ifdef CONFIG_PCI_IOV 1833 void 1834 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 1835 #else 1836 static void 1837 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 1838 #endif /* CONFIG_PCI_IOV */ 1839 { 1840 struct ice_pf *pf = vsi->back; 1841 struct ice_hw *hw = &pf->hw; 1842 u32 val; 1843 1844 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; 1845 1846 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 1847 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); 1848 1849 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 1850 } 1851 1852 /** 1853 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue 1854 * @vsi: the VSI being configured 1855 * @rxq: Rx queue being mapped to MSI-X vector 1856 * @msix_idx: MSI-X vector index within the function 1857 * @itr_idx: ITR index of the interrupt cause 1858 * 1859 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector 1860 * within the function space. 1861 */ 1862 #ifdef CONFIG_PCI_IOV 1863 void 1864 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 1865 #else 1866 static void 1867 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 1868 #endif /* CONFIG_PCI_IOV */ 1869 { 1870 struct ice_pf *pf = vsi->back; 1871 struct ice_hw *hw = &pf->hw; 1872 u32 val; 1873 1874 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; 1875 1876 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 1877 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); 1878 1879 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 1880 1881 ice_flush(hw); 1882 } 1883 1884 /** 1885 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1886 * @vsi: the VSI being configured 1887 * 1888 * This configures MSIX mode interrupts for the PF VSI, and should not be used 1889 * for the VF VSI. 1890 */ 1891 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 1892 { 1893 struct ice_pf *pf = vsi->back; 1894 struct ice_hw *hw = &pf->hw; 1895 u32 txq = 0, rxq = 0; 1896 int i, q; 1897 1898 for (i = 0; i < vsi->num_q_vectors; i++) { 1899 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1900 u16 reg_idx = q_vector->reg_idx; 1901 1902 ice_cfg_itr(hw, q_vector); 1903 1904 wr32(hw, GLINT_RATE(reg_idx), 1905 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1906 1907 /* Both Transmit Queue Interrupt Cause Control register 1908 * and Receive Queue Interrupt Cause control register 1909 * expects MSIX_INDX field to be the vector index 1910 * within the function space and not the absolute 1911 * vector index across PF or across device. 1912 * For SR-IOV VF VSIs queue vector index always starts 1913 * with 1 since first vector index(0) is used for OICR 1914 * in VF space. Since VMDq and other PF VSIs are within 1915 * the PF function space, use the vector index that is 1916 * tracked for this PF. 1917 */ 1918 for (q = 0; q < q_vector->num_ring_tx; q++) { 1919 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 1920 q_vector->tx.itr_idx); 1921 txq++; 1922 } 1923 1924 for (q = 0; q < q_vector->num_ring_rx; q++) { 1925 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 1926 q_vector->rx.itr_idx); 1927 rxq++; 1928 } 1929 } 1930 } 1931 1932 /** 1933 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx 1934 * @vsi: the VSI being changed 1935 */ 1936 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) 1937 { 1938 struct device *dev = &vsi->back->pdev->dev; 1939 struct ice_hw *hw = &vsi->back->hw; 1940 struct ice_vsi_ctx *ctxt; 1941 enum ice_status status; 1942 int ret = 0; 1943 1944 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1945 if (!ctxt) 1946 return -ENOMEM; 1947 1948 /* Here we are configuring the VSI to let the driver add VLAN tags by 1949 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag 1950 * insertion happens in the Tx hot path, in ice_tx_map. 1951 */ 1952 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1953 1954 /* Preserve existing VLAN strip setting */ 1955 ctxt->info.vlan_flags |= (vsi->info.vlan_flags & 1956 ICE_AQ_VSI_VLAN_EMOD_M); 1957 1958 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1959 1960 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1961 if (status) { 1962 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", 1963 status, hw->adminq.sq_last_status); 1964 ret = -EIO; 1965 goto out; 1966 } 1967 1968 vsi->info.vlan_flags = ctxt->info.vlan_flags; 1969 out: 1970 devm_kfree(dev, ctxt); 1971 return ret; 1972 } 1973 1974 /** 1975 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx 1976 * @vsi: the VSI being changed 1977 * @ena: boolean value indicating if this is a enable or disable request 1978 */ 1979 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 1980 { 1981 struct device *dev = &vsi->back->pdev->dev; 1982 struct ice_hw *hw = &vsi->back->hw; 1983 struct ice_vsi_ctx *ctxt; 1984 enum ice_status status; 1985 int ret = 0; 1986 1987 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1988 if (!ctxt) 1989 return -ENOMEM; 1990 1991 /* Here we are configuring what the VSI should do with the VLAN tag in 1992 * the Rx packet. We can either leave the tag in the packet or put it in 1993 * the Rx descriptor. 1994 */ 1995 if (ena) 1996 /* Strip VLAN tag from Rx packet and put it in the desc */ 1997 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 1998 else 1999 /* Disable stripping. Leave tag in packet */ 2000 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 2001 2002 /* Allow all packets untagged/tagged */ 2003 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 2004 2005 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 2006 2007 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 2008 if (status) { 2009 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", 2010 ena, status, hw->adminq.sq_last_status); 2011 ret = -EIO; 2012 goto out; 2013 } 2014 2015 vsi->info.vlan_flags = ctxt->info.vlan_flags; 2016 out: 2017 devm_kfree(dev, ctxt); 2018 return ret; 2019 } 2020 2021 /** 2022 * ice_vsi_start_rx_rings - start VSI's Rx rings 2023 * @vsi: the VSI whose rings are to be started 2024 * 2025 * Returns 0 on success and a negative value on error 2026 */ 2027 int ice_vsi_start_rx_rings(struct ice_vsi *vsi) 2028 { 2029 return ice_vsi_ctrl_rx_rings(vsi, true); 2030 } 2031 2032 /** 2033 * ice_vsi_stop_rx_rings - stop VSI's Rx rings 2034 * @vsi: the VSI 2035 * 2036 * Returns 0 on success and a negative value on error 2037 */ 2038 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) 2039 { 2040 return ice_vsi_ctrl_rx_rings(vsi, false); 2041 } 2042 2043 /** 2044 * ice_trigger_sw_intr - trigger a software interrupt 2045 * @hw: pointer to the HW structure 2046 * @q_vector: interrupt vector to trigger the software interrupt for 2047 */ 2048 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) 2049 { 2050 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 2051 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | 2052 GLINT_DYN_CTL_SWINT_TRIG_M | 2053 GLINT_DYN_CTL_INTENA_M); 2054 } 2055 2056 /** 2057 * ice_vsi_stop_tx_rings - Disable Tx rings 2058 * @vsi: the VSI being configured 2059 * @rst_src: reset source 2060 * @rel_vmvf_num: Relative ID of VF/VM 2061 * @rings: Tx ring array to be stopped 2062 * @offset: offset within vsi->txq_map 2063 */ 2064 static int 2065 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2066 u16 rel_vmvf_num, struct ice_ring **rings, int offset) 2067 { 2068 struct ice_pf *pf = vsi->back; 2069 struct ice_hw *hw = &pf->hw; 2070 int tc, q_idx = 0, err = 0; 2071 u16 *q_ids, *q_handles, i; 2072 enum ice_status status; 2073 u32 *q_teids, val; 2074 2075 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2076 return -EINVAL; 2077 2078 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), 2079 GFP_KERNEL); 2080 if (!q_teids) 2081 return -ENOMEM; 2082 2083 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), 2084 GFP_KERNEL); 2085 if (!q_ids) { 2086 err = -ENOMEM; 2087 goto err_alloc_q_ids; 2088 } 2089 2090 q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, 2091 sizeof(*q_handles), GFP_KERNEL); 2092 if (!q_handles) { 2093 err = -ENOMEM; 2094 goto err_alloc_q_handles; 2095 } 2096 2097 /* set up the Tx queue list to be disabled for each enabled TC */ 2098 ice_for_each_traffic_class(tc) { 2099 if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 2100 break; 2101 2102 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 2103 struct ice_q_vector *q_vector; 2104 2105 if (!rings || !rings[q_idx]) { 2106 err = -EINVAL; 2107 goto err_out; 2108 } 2109 2110 q_ids[i] = vsi->txq_map[q_idx + offset]; 2111 q_teids[i] = rings[q_idx]->txq_teid; 2112 q_handles[i] = i; 2113 2114 /* clear cause_ena bit for disabled queues */ 2115 val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); 2116 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2117 wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); 2118 2119 /* software is expected to wait for 100 ns */ 2120 ndelay(100); 2121 2122 /* trigger a software interrupt for the vector 2123 * associated to the queue to schedule NAPI handler 2124 */ 2125 q_vector = rings[i]->q_vector; 2126 if (q_vector) 2127 ice_trigger_sw_intr(hw, q_vector); 2128 2129 q_idx++; 2130 } 2131 status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, 2132 vsi->num_txq, q_handles, q_ids, 2133 q_teids, rst_src, rel_vmvf_num, NULL); 2134 2135 /* if the disable queue command was exercised during an active 2136 * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not 2137 * an error as the reset operation disables queues at the 2138 * hardware level anyway. 2139 */ 2140 if (status == ICE_ERR_RESET_ONGOING) { 2141 dev_dbg(&pf->pdev->dev, 2142 "Reset in progress. LAN Tx queues already disabled\n"); 2143 } else if (status == ICE_ERR_DOES_NOT_EXIST) { 2144 dev_dbg(&pf->pdev->dev, 2145 "LAN Tx queues does not exist, nothing to disabled\n"); 2146 } else if (status) { 2147 dev_err(&pf->pdev->dev, 2148 "Failed to disable LAN Tx queues, error: %d\n", 2149 status); 2150 err = -ENODEV; 2151 } 2152 } 2153 2154 err_out: 2155 devm_kfree(&pf->pdev->dev, q_handles); 2156 2157 err_alloc_q_handles: 2158 devm_kfree(&pf->pdev->dev, q_ids); 2159 2160 err_alloc_q_ids: 2161 devm_kfree(&pf->pdev->dev, q_teids); 2162 2163 return err; 2164 } 2165 2166 /** 2167 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2168 * @vsi: the VSI being configured 2169 * @rst_src: reset source 2170 * @rel_vmvf_num: Relative ID of VF/VM 2171 */ 2172 int 2173 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2174 u16 rel_vmvf_num) 2175 { 2176 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, 2177 0); 2178 } 2179 2180 /** 2181 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI 2182 * @vsi: VSI to enable or disable VLAN pruning on 2183 * @ena: set to true to enable VLAN pruning and false to disable it 2184 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode 2185 * 2186 * returns 0 if VSI is updated, negative otherwise 2187 */ 2188 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) 2189 { 2190 struct ice_vsi_ctx *ctxt; 2191 struct device *dev; 2192 struct ice_pf *pf; 2193 int status; 2194 2195 if (!vsi) 2196 return -EINVAL; 2197 2198 pf = vsi->back; 2199 dev = &pf->pdev->dev; 2200 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 2201 if (!ctxt) 2202 return -ENOMEM; 2203 2204 ctxt->info = vsi->info; 2205 2206 if (ena) { 2207 ctxt->info.sec_flags |= 2208 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2209 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; 2210 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2211 } else { 2212 ctxt->info.sec_flags &= 2213 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 2214 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 2215 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2216 } 2217 2218 if (!vlan_promisc) 2219 ctxt->info.valid_sections = 2220 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | 2221 ICE_AQ_VSI_PROP_SW_VALID); 2222 2223 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); 2224 if (status) { 2225 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", 2226 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, 2227 pf->hw.adminq.sq_last_status); 2228 goto err_out; 2229 } 2230 2231 vsi->info.sec_flags = ctxt->info.sec_flags; 2232 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 2233 2234 devm_kfree(dev, ctxt); 2235 return 0; 2236 2237 err_out: 2238 devm_kfree(dev, ctxt); 2239 return -EIO; 2240 } 2241 2242 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2243 { 2244 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; 2245 2246 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); 2247 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); 2248 } 2249 2250 /** 2251 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors 2252 * @vsi: VSI to set the q_vectors register index on 2253 */ 2254 static int 2255 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) 2256 { 2257 u16 i; 2258 2259 if (!vsi || !vsi->q_vectors) 2260 return -EINVAL; 2261 2262 ice_for_each_q_vector(vsi, i) { 2263 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2264 2265 if (!q_vector) { 2266 dev_err(&vsi->back->pdev->dev, 2267 "Failed to set reg_idx on q_vector %d VSI %d\n", 2268 i, vsi->vsi_num); 2269 goto clear_reg_idx; 2270 } 2271 2272 if (vsi->type == ICE_VSI_VF) { 2273 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; 2274 2275 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); 2276 } else { 2277 q_vector->reg_idx = 2278 q_vector->v_idx + vsi->base_vector; 2279 } 2280 } 2281 2282 return 0; 2283 2284 clear_reg_idx: 2285 ice_for_each_q_vector(vsi, i) { 2286 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2287 2288 if (q_vector) 2289 q_vector->reg_idx = 0; 2290 } 2291 2292 return -EINVAL; 2293 } 2294 2295 /** 2296 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule 2297 * @vsi: the VSI being configured 2298 * @add_rule: boolean value to add or remove ethertype filter rule 2299 */ 2300 static void 2301 ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) 2302 { 2303 struct ice_fltr_list_entry *list; 2304 struct ice_pf *pf = vsi->back; 2305 LIST_HEAD(tmp_add_list); 2306 enum ice_status status; 2307 2308 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 2309 if (!list) 2310 return; 2311 2312 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 2313 list->fltr_info.fltr_act = ICE_DROP_PACKET; 2314 list->fltr_info.flag = ICE_FLTR_TX; 2315 list->fltr_info.src_id = ICE_SRC_ID_VSI; 2316 list->fltr_info.vsi_handle = vsi->idx; 2317 list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype; 2318 2319 INIT_LIST_HEAD(&list->list_entry); 2320 list_add(&list->list_entry, &tmp_add_list); 2321 2322 if (add_rule) 2323 status = ice_add_eth_mac(&pf->hw, &tmp_add_list); 2324 else 2325 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); 2326 2327 if (status) 2328 dev_err(&pf->pdev->dev, 2329 "Failure Adding or Removing Ethertype on VSI %i error: %d\n", 2330 vsi->vsi_num, status); 2331 2332 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 2333 } 2334 2335 /** 2336 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2337 * @vsi: the VSI being configured 2338 * @tx: bool to determine Tx or Rx rule 2339 * @create: bool to determine create or remove Rule 2340 */ 2341 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2342 { 2343 struct ice_fltr_list_entry *list; 2344 struct ice_pf *pf = vsi->back; 2345 LIST_HEAD(tmp_add_list); 2346 enum ice_status status; 2347 2348 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); 2349 if (!list) 2350 return; 2351 2352 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 2353 list->fltr_info.vsi_handle = vsi->idx; 2354 list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP; 2355 2356 if (tx) { 2357 list->fltr_info.fltr_act = ICE_DROP_PACKET; 2358 list->fltr_info.flag = ICE_FLTR_TX; 2359 list->fltr_info.src_id = ICE_SRC_ID_VSI; 2360 } else { 2361 list->fltr_info.fltr_act = ICE_FWD_TO_VSI; 2362 list->fltr_info.flag = ICE_FLTR_RX; 2363 list->fltr_info.src_id = ICE_SRC_ID_LPORT; 2364 } 2365 2366 INIT_LIST_HEAD(&list->list_entry); 2367 list_add(&list->list_entry, &tmp_add_list); 2368 2369 if (create) 2370 status = ice_add_eth_mac(&pf->hw, &tmp_add_list); 2371 else 2372 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); 2373 2374 if (status) 2375 dev_err(&pf->pdev->dev, 2376 "Fail %s %s LLDP rule on VSI %i error: %d\n", 2377 create ? "adding" : "removing", tx ? "TX" : "RX", 2378 vsi->vsi_num, status); 2379 2380 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 2381 } 2382 2383 /** 2384 * ice_vsi_setup - Set up a VSI by a given type 2385 * @pf: board private structure 2386 * @pi: pointer to the port_info instance 2387 * @type: VSI type 2388 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be 2389 * used only for ICE_VSI_VF VSI type. For other VSI types, should 2390 * fill-in ICE_INVAL_VFID as input. 2391 * 2392 * This allocates the sw VSI structure and its queue resources. 2393 * 2394 * Returns pointer to the successfully allocated and configured VSI sw struct on 2395 * success, NULL on failure. 2396 */ 2397 struct ice_vsi * 2398 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 2399 enum ice_vsi_type type, u16 vf_id) 2400 { 2401 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2402 struct device *dev = &pf->pdev->dev; 2403 enum ice_status status; 2404 struct ice_vsi *vsi; 2405 int ret, i; 2406 2407 if (type == ICE_VSI_VF) 2408 vsi = ice_vsi_alloc(pf, type, vf_id); 2409 else 2410 vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); 2411 2412 if (!vsi) { 2413 dev_err(dev, "could not allocate VSI\n"); 2414 return NULL; 2415 } 2416 2417 vsi->port_info = pi; 2418 vsi->vsw = pf->first_sw; 2419 if (vsi->type == ICE_VSI_PF) 2420 vsi->ethtype = ETH_P_PAUSE; 2421 2422 if (vsi->type == ICE_VSI_VF) 2423 vsi->vf_id = vf_id; 2424 2425 if (ice_vsi_get_qs(vsi)) { 2426 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2427 vsi->idx); 2428 goto unroll_get_qs; 2429 } 2430 2431 /* set RSS capabilities */ 2432 ice_vsi_set_rss_params(vsi); 2433 2434 /* set TC configuration */ 2435 ice_vsi_set_tc_cfg(vsi); 2436 2437 /* create the VSI */ 2438 ret = ice_vsi_init(vsi); 2439 if (ret) 2440 goto unroll_get_qs; 2441 2442 switch (vsi->type) { 2443 case ICE_VSI_PF: 2444 ret = ice_vsi_alloc_q_vectors(vsi); 2445 if (ret) 2446 goto unroll_vsi_init; 2447 2448 ret = ice_vsi_setup_vector_base(vsi); 2449 if (ret) 2450 goto unroll_alloc_q_vector; 2451 2452 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2453 if (ret) 2454 goto unroll_vector_base; 2455 2456 ret = ice_vsi_alloc_rings(vsi); 2457 if (ret) 2458 goto unroll_vector_base; 2459 2460 ice_vsi_map_rings_to_vectors(vsi); 2461 2462 /* Do not exit if configuring RSS had an issue, at least 2463 * receive traffic on first queue. Hence no need to capture 2464 * return value 2465 */ 2466 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2467 ice_vsi_cfg_rss_lut_key(vsi); 2468 break; 2469 case ICE_VSI_VF: 2470 /* VF driver will take care of creating netdev for this type and 2471 * map queues to vectors through Virtchnl, PF driver only 2472 * creates a VSI and corresponding structures for bookkeeping 2473 * purpose 2474 */ 2475 ret = ice_vsi_alloc_q_vectors(vsi); 2476 if (ret) 2477 goto unroll_vsi_init; 2478 2479 ret = ice_vsi_alloc_rings(vsi); 2480 if (ret) 2481 goto unroll_alloc_q_vector; 2482 2483 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2484 if (ret) 2485 goto unroll_vector_base; 2486 2487 pf->q_left_tx -= vsi->alloc_txq; 2488 pf->q_left_rx -= vsi->alloc_rxq; 2489 2490 /* Do not exit if configuring RSS had an issue, at least 2491 * receive traffic on first queue. Hence no need to capture 2492 * return value 2493 */ 2494 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2495 ice_vsi_cfg_rss_lut_key(vsi); 2496 break; 2497 case ICE_VSI_LB: 2498 ret = ice_vsi_alloc_rings(vsi); 2499 if (ret) 2500 goto unroll_vsi_init; 2501 break; 2502 default: 2503 /* clean up the resources and exit */ 2504 goto unroll_vsi_init; 2505 } 2506 2507 /* configure VSI nodes based on number of queues and TC's */ 2508 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2509 max_txqs[i] = vsi->alloc_txq; 2510 2511 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2512 max_txqs); 2513 if (status) { 2514 dev_err(&pf->pdev->dev, 2515 "VSI %d failed lan queue config, error %d\n", 2516 vsi->vsi_num, status); 2517 goto unroll_vector_base; 2518 } 2519 2520 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2521 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2522 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2523 * The rule is added once for PF VSI in order to create appropriate 2524 * recipe, since VSI/VSI list is ignored with drop action... 2525 * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets 2526 * need to be dropped so that VFs cannot send LLDP packets to reconfig 2527 * DCB settings in the HW. Also, if the FW DCBX engine is not running 2528 * then Rx LLDP packets need to be redirected up the stack. 2529 */ 2530 if (vsi->type == ICE_VSI_PF) { 2531 ice_vsi_add_rem_eth_mac(vsi, true); 2532 2533 /* Tx LLDP packets */ 2534 ice_cfg_sw_lldp(vsi, true, true); 2535 2536 /* Rx LLDP packets */ 2537 if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) 2538 ice_cfg_sw_lldp(vsi, false, true); 2539 } 2540 2541 return vsi; 2542 2543 unroll_vector_base: 2544 /* reclaim SW interrupts back to the common pool */ 2545 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2546 pf->num_avail_sw_msix += vsi->num_q_vectors; 2547 unroll_alloc_q_vector: 2548 ice_vsi_free_q_vectors(vsi); 2549 unroll_vsi_init: 2550 ice_vsi_delete(vsi); 2551 unroll_get_qs: 2552 ice_vsi_put_qs(vsi); 2553 pf->q_left_tx += vsi->alloc_txq; 2554 pf->q_left_rx += vsi->alloc_rxq; 2555 ice_vsi_clear(vsi); 2556 2557 return NULL; 2558 } 2559 2560 /** 2561 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2562 * @vsi: the VSI being cleaned up 2563 */ 2564 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2565 { 2566 struct ice_pf *pf = vsi->back; 2567 struct ice_hw *hw = &pf->hw; 2568 u32 txq = 0; 2569 u32 rxq = 0; 2570 int i, q; 2571 2572 for (i = 0; i < vsi->num_q_vectors; i++) { 2573 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2574 u16 reg_idx = q_vector->reg_idx; 2575 2576 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0); 2577 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0); 2578 for (q = 0; q < q_vector->num_ring_tx; q++) { 2579 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2580 txq++; 2581 } 2582 2583 for (q = 0; q < q_vector->num_ring_rx; q++) { 2584 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2585 rxq++; 2586 } 2587 } 2588 2589 ice_flush(hw); 2590 } 2591 2592 /** 2593 * ice_vsi_free_irq - Free the IRQ association with the OS 2594 * @vsi: the VSI being configured 2595 */ 2596 void ice_vsi_free_irq(struct ice_vsi *vsi) 2597 { 2598 struct ice_pf *pf = vsi->back; 2599 int base = vsi->base_vector; 2600 int i; 2601 2602 if (!vsi->q_vectors || !vsi->irqs_ready) 2603 return; 2604 2605 ice_vsi_release_msix(vsi); 2606 if (vsi->type == ICE_VSI_VF) 2607 return; 2608 2609 vsi->irqs_ready = false; 2610 ice_for_each_q_vector(vsi, i) { 2611 u16 vector = i + base; 2612 int irq_num; 2613 2614 irq_num = pf->msix_entries[vector].vector; 2615 2616 /* free only the irqs that were actually requested */ 2617 if (!vsi->q_vectors[i] || 2618 !(vsi->q_vectors[i]->num_ring_tx || 2619 vsi->q_vectors[i]->num_ring_rx)) 2620 continue; 2621 2622 /* clear the affinity notifier in the IRQ descriptor */ 2623 irq_set_affinity_notifier(irq_num, NULL); 2624 2625 /* clear the affinity_mask in the IRQ descriptor */ 2626 irq_set_affinity_hint(irq_num, NULL); 2627 synchronize_irq(irq_num); 2628 devm_free_irq(&pf->pdev->dev, irq_num, 2629 vsi->q_vectors[i]); 2630 } 2631 } 2632 2633 /** 2634 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2635 * @vsi: the VSI having resources freed 2636 */ 2637 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2638 { 2639 int i; 2640 2641 if (!vsi->tx_rings) 2642 return; 2643 2644 ice_for_each_txq(vsi, i) 2645 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2646 ice_free_tx_ring(vsi->tx_rings[i]); 2647 } 2648 2649 /** 2650 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2651 * @vsi: the VSI having resources freed 2652 */ 2653 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2654 { 2655 int i; 2656 2657 if (!vsi->rx_rings) 2658 return; 2659 2660 ice_for_each_rxq(vsi, i) 2661 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2662 ice_free_rx_ring(vsi->rx_rings[i]); 2663 } 2664 2665 /** 2666 * ice_vsi_close - Shut down a VSI 2667 * @vsi: the VSI being shut down 2668 */ 2669 void ice_vsi_close(struct ice_vsi *vsi) 2670 { 2671 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) 2672 ice_down(vsi); 2673 2674 ice_vsi_free_irq(vsi); 2675 ice_vsi_free_tx_rings(vsi); 2676 ice_vsi_free_rx_rings(vsi); 2677 } 2678 2679 /** 2680 * ice_free_res - free a block of resources 2681 * @res: pointer to the resource 2682 * @index: starting index previously returned by ice_get_res 2683 * @id: identifier to track owner 2684 * 2685 * Returns number of resources freed 2686 */ 2687 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) 2688 { 2689 int count = 0; 2690 int i; 2691 2692 if (!res || index >= res->end) 2693 return -EINVAL; 2694 2695 id |= ICE_RES_VALID_BIT; 2696 for (i = index; i < res->end && res->list[i] == id; i++) { 2697 res->list[i] = 0; 2698 count++; 2699 } 2700 2701 return count; 2702 } 2703 2704 /** 2705 * ice_search_res - Search the tracker for a block of resources 2706 * @res: pointer to the resource 2707 * @needed: size of the block needed 2708 * @id: identifier to track owner 2709 * 2710 * Returns the base item index of the block, or -ENOMEM for error 2711 */ 2712 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) 2713 { 2714 int start = 0, end = 0; 2715 2716 if (needed > res->end) 2717 return -ENOMEM; 2718 2719 id |= ICE_RES_VALID_BIT; 2720 2721 do { 2722 /* skip already allocated entries */ 2723 if (res->list[end++] & ICE_RES_VALID_BIT) { 2724 start = end; 2725 if ((start + needed) > res->end) 2726 break; 2727 } 2728 2729 if (end == (start + needed)) { 2730 int i = start; 2731 2732 /* there was enough, so assign it to the requestor */ 2733 while (i != end) 2734 res->list[i++] = id; 2735 2736 return start; 2737 } 2738 } while (end < res->end); 2739 2740 return -ENOMEM; 2741 } 2742 2743 /** 2744 * ice_get_res - get a block of resources 2745 * @pf: board private structure 2746 * @res: pointer to the resource 2747 * @needed: size of the block needed 2748 * @id: identifier to track owner 2749 * 2750 * Returns the base item index of the block, or negative for error 2751 */ 2752 int 2753 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) 2754 { 2755 if (!res || !pf) 2756 return -EINVAL; 2757 2758 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { 2759 dev_err(&pf->pdev->dev, 2760 "param err: needed=%d, num_entries = %d id=0x%04x\n", 2761 needed, res->num_entries, id); 2762 return -EINVAL; 2763 } 2764 2765 return ice_search_res(res, needed, id); 2766 } 2767 2768 /** 2769 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2770 * @vsi: the VSI being un-configured 2771 */ 2772 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2773 { 2774 int base = vsi->base_vector; 2775 struct ice_pf *pf = vsi->back; 2776 struct ice_hw *hw = &pf->hw; 2777 u32 val; 2778 int i; 2779 2780 /* disable interrupt causation from each queue */ 2781 if (vsi->tx_rings) { 2782 ice_for_each_txq(vsi, i) { 2783 if (vsi->tx_rings[i]) { 2784 u16 reg; 2785 2786 reg = vsi->tx_rings[i]->reg_idx; 2787 val = rd32(hw, QINT_TQCTL(reg)); 2788 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2789 wr32(hw, QINT_TQCTL(reg), val); 2790 } 2791 } 2792 } 2793 2794 if (vsi->rx_rings) { 2795 ice_for_each_rxq(vsi, i) { 2796 if (vsi->rx_rings[i]) { 2797 u16 reg; 2798 2799 reg = vsi->rx_rings[i]->reg_idx; 2800 val = rd32(hw, QINT_RQCTL(reg)); 2801 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2802 wr32(hw, QINT_RQCTL(reg), val); 2803 } 2804 } 2805 } 2806 2807 /* disable each interrupt */ 2808 ice_for_each_q_vector(vsi, i) 2809 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 2810 2811 ice_flush(hw); 2812 2813 ice_for_each_q_vector(vsi, i) 2814 synchronize_irq(pf->msix_entries[i + base].vector); 2815 } 2816 2817 /** 2818 * ice_napi_del - Remove NAPI handler for the VSI 2819 * @vsi: VSI for which NAPI handler is to be removed 2820 */ 2821 void ice_napi_del(struct ice_vsi *vsi) 2822 { 2823 int v_idx; 2824 2825 if (!vsi->netdev) 2826 return; 2827 2828 ice_for_each_q_vector(vsi, v_idx) 2829 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 2830 } 2831 2832 /** 2833 * ice_vsi_release - Delete a VSI and free its resources 2834 * @vsi: the VSI being removed 2835 * 2836 * Returns 0 on success or < 0 on error 2837 */ 2838 int ice_vsi_release(struct ice_vsi *vsi) 2839 { 2840 struct ice_pf *pf; 2841 2842 if (!vsi->back) 2843 return -ENODEV; 2844 pf = vsi->back; 2845 2846 /* do not unregister while driver is in the reset recovery pending 2847 * state. Since reset/rebuild happens through PF service task workqueue, 2848 * it's not a good idea to unregister netdev that is associated to the 2849 * PF that is running the work queue items currently. This is done to 2850 * avoid check_flush_dependency() warning on this wq 2851 */ 2852 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) 2853 unregister_netdev(vsi->netdev); 2854 2855 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2856 ice_rss_clean(vsi); 2857 2858 /* Disable VSI and free resources */ 2859 if (vsi->type != ICE_VSI_LB) 2860 ice_vsi_dis_irq(vsi); 2861 ice_vsi_close(vsi); 2862 2863 /* SR-IOV determines needed MSIX resources all at once instead of per 2864 * VSI since when VFs are spawned we know how many VFs there are and how 2865 * many interrupts each VF needs. SR-IOV MSIX resources are also 2866 * cleared in the same manner. 2867 */ 2868 if (vsi->type != ICE_VSI_VF) { 2869 /* reclaim SW interrupts back to the common pool */ 2870 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2871 pf->num_avail_sw_msix += vsi->num_q_vectors; 2872 } 2873 2874 if (vsi->type == ICE_VSI_PF) { 2875 ice_vsi_add_rem_eth_mac(vsi, false); 2876 ice_cfg_sw_lldp(vsi, true, false); 2877 /* The Rx rule will only exist to remove if the LLDP FW 2878 * engine is currently stopped 2879 */ 2880 if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) 2881 ice_cfg_sw_lldp(vsi, false, false); 2882 } 2883 2884 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 2885 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2886 ice_vsi_delete(vsi); 2887 ice_vsi_free_q_vectors(vsi); 2888 2889 /* make sure unregister_netdev() was called by checking __ICE_DOWN */ 2890 if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) { 2891 free_netdev(vsi->netdev); 2892 vsi->netdev = NULL; 2893 } 2894 2895 ice_vsi_clear_rings(vsi); 2896 2897 ice_vsi_put_qs(vsi); 2898 pf->q_left_tx += vsi->alloc_txq; 2899 pf->q_left_rx += vsi->alloc_rxq; 2900 2901 /* retain SW VSI data structure since it is needed to unregister and 2902 * free VSI netdev when PF is not in reset recovery pending state,\ 2903 * for ex: during rmmod. 2904 */ 2905 if (!ice_is_reset_in_progress(pf->state)) 2906 ice_vsi_clear(vsi); 2907 2908 return 0; 2909 } 2910 2911 /** 2912 * ice_vsi_rebuild - Rebuild VSI after reset 2913 * @vsi: VSI to be rebuild 2914 * 2915 * Returns 0 on success and negative value on failure 2916 */ 2917 int ice_vsi_rebuild(struct ice_vsi *vsi) 2918 { 2919 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2920 struct ice_vf *vf = NULL; 2921 enum ice_status status; 2922 struct ice_pf *pf; 2923 int ret, i; 2924 2925 if (!vsi) 2926 return -EINVAL; 2927 2928 pf = vsi->back; 2929 if (vsi->type == ICE_VSI_VF) 2930 vf = &pf->vf[vsi->vf_id]; 2931 2932 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2933 ice_vsi_free_q_vectors(vsi); 2934 2935 /* SR-IOV determines needed MSIX resources all at once instead of per 2936 * VSI since when VFs are spawned we know how many VFs there are and how 2937 * many interrupts each VF needs. SR-IOV MSIX resources are also 2938 * cleared in the same manner. 2939 */ 2940 if (vsi->type != ICE_VSI_VF) { 2941 /* reclaim SW interrupts back to the common pool */ 2942 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2943 pf->num_avail_sw_msix += vsi->num_q_vectors; 2944 vsi->base_vector = 0; 2945 } 2946 2947 ice_vsi_clear_rings(vsi); 2948 ice_vsi_free_arrays(vsi); 2949 ice_dev_onetime_setup(&pf->hw); 2950 if (vsi->type == ICE_VSI_VF) 2951 ice_vsi_set_num_qs(vsi, vf->vf_id); 2952 else 2953 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 2954 ice_vsi_set_tc_cfg(vsi); 2955 2956 /* Initialize VSI struct elements and create VSI in FW */ 2957 ret = ice_vsi_init(vsi); 2958 if (ret < 0) 2959 goto err_vsi; 2960 2961 ret = ice_vsi_alloc_arrays(vsi); 2962 if (ret < 0) 2963 goto err_vsi; 2964 2965 switch (vsi->type) { 2966 case ICE_VSI_PF: 2967 ret = ice_vsi_alloc_q_vectors(vsi); 2968 if (ret) 2969 goto err_rings; 2970 2971 ret = ice_vsi_setup_vector_base(vsi); 2972 if (ret) 2973 goto err_vectors; 2974 2975 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2976 if (ret) 2977 goto err_vectors; 2978 2979 ret = ice_vsi_alloc_rings(vsi); 2980 if (ret) 2981 goto err_vectors; 2982 2983 ice_vsi_map_rings_to_vectors(vsi); 2984 /* Do not exit if configuring RSS had an issue, at least 2985 * receive traffic on first queue. Hence no need to capture 2986 * return value 2987 */ 2988 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2989 ice_vsi_cfg_rss_lut_key(vsi); 2990 break; 2991 case ICE_VSI_VF: 2992 ret = ice_vsi_alloc_q_vectors(vsi); 2993 if (ret) 2994 goto err_rings; 2995 2996 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2997 if (ret) 2998 goto err_vectors; 2999 3000 ret = ice_vsi_alloc_rings(vsi); 3001 if (ret) 3002 goto err_vectors; 3003 3004 pf->q_left_tx -= vsi->alloc_txq; 3005 pf->q_left_rx -= vsi->alloc_rxq; 3006 break; 3007 default: 3008 break; 3009 } 3010 3011 /* configure VSI nodes based on number of queues and TC's */ 3012 for (i = 0; i < vsi->tc_cfg.numtc; i++) 3013 max_txqs[i] = vsi->alloc_txq; 3014 3015 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3016 max_txqs); 3017 if (status) { 3018 dev_err(&pf->pdev->dev, 3019 "VSI %d failed lan queue config, error %d\n", 3020 vsi->vsi_num, status); 3021 goto err_vectors; 3022 } 3023 return 0; 3024 3025 err_vectors: 3026 ice_vsi_free_q_vectors(vsi); 3027 err_rings: 3028 if (vsi->netdev) { 3029 vsi->current_netdev_flags = 0; 3030 unregister_netdev(vsi->netdev); 3031 free_netdev(vsi->netdev); 3032 vsi->netdev = NULL; 3033 } 3034 err_vsi: 3035 ice_vsi_clear(vsi); 3036 set_bit(__ICE_RESET_FAILED, pf->state); 3037 return ret; 3038 } 3039 3040 /** 3041 * ice_is_reset_in_progress - check for a reset in progress 3042 * @state: PF state field 3043 */ 3044 bool ice_is_reset_in_progress(unsigned long *state) 3045 { 3046 return test_bit(__ICE_RESET_OICR_RECV, state) || 3047 test_bit(__ICE_PFR_REQ, state) || 3048 test_bit(__ICE_CORER_REQ, state) || 3049 test_bit(__ICE_GLOBR_REQ, state); 3050 } 3051 3052 #ifdef CONFIG_DCB 3053 /** 3054 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3055 * @vsi: VSI being configured 3056 * @ctx: the context buffer returned from AQ VSI update command 3057 */ 3058 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3059 { 3060 vsi->info.mapping_flags = ctx->info.mapping_flags; 3061 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3062 sizeof(vsi->info.q_mapping)); 3063 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3064 sizeof(vsi->info.tc_mapping)); 3065 } 3066 3067 /** 3068 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 3069 * @vsi: the VSI being configured 3070 * @ena_tc: TC map to be enabled 3071 */ 3072 static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 3073 { 3074 struct net_device *netdev = vsi->netdev; 3075 struct ice_pf *pf = vsi->back; 3076 struct ice_dcbx_cfg *dcbcfg; 3077 u8 netdev_tc; 3078 int i; 3079 3080 if (!netdev) 3081 return; 3082 3083 if (!ena_tc) { 3084 netdev_reset_tc(netdev); 3085 return; 3086 } 3087 3088 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) 3089 return; 3090 3091 dcbcfg = &pf->hw.port_info->local_dcbx_cfg; 3092 3093 ice_for_each_traffic_class(i) 3094 if (vsi->tc_cfg.ena_tc & BIT(i)) 3095 netdev_set_tc_queue(netdev, 3096 vsi->tc_cfg.tc_info[i].netdev_tc, 3097 vsi->tc_cfg.tc_info[i].qcount_tx, 3098 vsi->tc_cfg.tc_info[i].qoffset); 3099 3100 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 3101 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 3102 3103 /* Get the mapped netdev TC# for the UP */ 3104 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 3105 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3106 } 3107 } 3108 3109 /** 3110 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3111 * @vsi: VSI to be configured 3112 * @ena_tc: TC bitmap 3113 * 3114 * VSI queues expected to be quiesced before calling this function 3115 */ 3116 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3117 { 3118 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3119 struct ice_vsi_ctx *ctx; 3120 struct ice_pf *pf = vsi->back; 3121 enum ice_status status; 3122 int i, ret = 0; 3123 u8 num_tc = 0; 3124 3125 ice_for_each_traffic_class(i) { 3126 /* build bitmap of enabled TCs */ 3127 if (ena_tc & BIT(i)) 3128 num_tc++; 3129 /* populate max_txqs per TC */ 3130 max_txqs[i] = vsi->alloc_txq; 3131 } 3132 3133 vsi->tc_cfg.ena_tc = ena_tc; 3134 vsi->tc_cfg.numtc = num_tc; 3135 3136 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); 3137 if (!ctx) 3138 return -ENOMEM; 3139 3140 ctx->vf_num = 0; 3141 ctx->info = vsi->info; 3142 3143 ice_vsi_setup_q_map(vsi, ctx); 3144 3145 /* must to indicate which section of VSI context are being modified */ 3146 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3147 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3148 if (status) { 3149 dev_info(&pf->pdev->dev, "Failed VSI Update\n"); 3150 ret = -EIO; 3151 goto out; 3152 } 3153 3154 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3155 max_txqs); 3156 3157 if (status) { 3158 dev_err(&pf->pdev->dev, 3159 "VSI %d failed TC config, error %d\n", 3160 vsi->vsi_num, status); 3161 ret = -EIO; 3162 goto out; 3163 } 3164 ice_vsi_update_q_map(vsi, ctx); 3165 vsi->info.valid_sections = 0; 3166 3167 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3168 out: 3169 devm_kfree(&pf->pdev->dev, ctx); 3170 return ret; 3171 } 3172 #endif /* CONFIG_DCB */ 3173