1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_flow.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_devlink.h" 11 12 /** 13 * ice_vsi_type_str - maps VSI type enum to string equivalents 14 * @vsi_type: VSI type enum 15 */ 16 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) 17 { 18 switch (vsi_type) { 19 case ICE_VSI_PF: 20 return "ICE_VSI_PF"; 21 case ICE_VSI_VF: 22 return "ICE_VSI_VF"; 23 case ICE_VSI_CTRL: 24 return "ICE_VSI_CTRL"; 25 case ICE_VSI_LB: 26 return "ICE_VSI_LB"; 27 case ICE_VSI_SWITCHDEV_CTRL: 28 return "ICE_VSI_SWITCHDEV_CTRL"; 29 default: 30 return "unknown"; 31 } 32 } 33 34 /** 35 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings 36 * @vsi: the VSI being configured 37 * @ena: start or stop the Rx rings 38 * 39 * First enable/disable all of the Rx rings, flush any remaining writes, and 40 * then verify that they have all been enabled/disabled successfully. This will 41 * let all of the register writes complete when enabling/disabling the Rx rings 42 * before waiting for the change in hardware to complete. 43 */ 44 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) 45 { 46 int ret = 0; 47 u16 i; 48 49 for (i = 0; i < vsi->num_rxq; i++) 50 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); 51 52 ice_flush(&vsi->back->hw); 53 54 for (i = 0; i < vsi->num_rxq; i++) { 55 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); 56 if (ret) 57 break; 58 } 59 60 return ret; 61 } 62 63 /** 64 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 65 * @vsi: VSI pointer 66 * 67 * On error: returns error code (negative) 68 * On success: returns 0 69 */ 70 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 71 { 72 struct ice_pf *pf = vsi->back; 73 struct device *dev; 74 75 dev = ice_pf_to_dev(pf); 76 77 /* allocate memory for both Tx and Rx ring pointers */ 78 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, 79 sizeof(*vsi->tx_rings), GFP_KERNEL); 80 if (!vsi->tx_rings) 81 return -ENOMEM; 82 83 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, 84 sizeof(*vsi->rx_rings), GFP_KERNEL); 85 if (!vsi->rx_rings) 86 goto err_rings; 87 88 /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ 89 vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), 90 sizeof(*vsi->txq_map), GFP_KERNEL); 91 92 if (!vsi->txq_map) 93 goto err_txq_map; 94 95 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, 96 sizeof(*vsi->rxq_map), GFP_KERNEL); 97 if (!vsi->rxq_map) 98 goto err_rxq_map; 99 100 /* There is no need to allocate q_vectors for a loopback VSI. */ 101 if (vsi->type == ICE_VSI_LB) 102 return 0; 103 104 /* allocate memory for q_vector pointers */ 105 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, 106 sizeof(*vsi->q_vectors), GFP_KERNEL); 107 if (!vsi->q_vectors) 108 goto err_vectors; 109 110 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); 111 if (!vsi->af_xdp_zc_qps) 112 goto err_zc_qps; 113 114 return 0; 115 116 err_zc_qps: 117 devm_kfree(dev, vsi->q_vectors); 118 err_vectors: 119 devm_kfree(dev, vsi->rxq_map); 120 err_rxq_map: 121 devm_kfree(dev, vsi->txq_map); 122 err_txq_map: 123 devm_kfree(dev, vsi->rx_rings); 124 err_rings: 125 devm_kfree(dev, vsi->tx_rings); 126 return -ENOMEM; 127 } 128 129 /** 130 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 131 * @vsi: the VSI being configured 132 */ 133 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 134 { 135 switch (vsi->type) { 136 case ICE_VSI_PF: 137 case ICE_VSI_SWITCHDEV_CTRL: 138 case ICE_VSI_CTRL: 139 case ICE_VSI_LB: 140 /* a user could change the values of num_[tr]x_desc using 141 * ethtool -G so we should keep those values instead of 142 * overwriting them with the defaults. 143 */ 144 if (!vsi->num_rx_desc) 145 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 146 if (!vsi->num_tx_desc) 147 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 148 break; 149 default: 150 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", 151 vsi->type); 152 break; 153 } 154 } 155 156 /** 157 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 158 * @vsi: the VSI being configured 159 * @vf_id: ID of the VF being configured 160 * 161 * Return 0 on success and a negative value on error 162 */ 163 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) 164 { 165 struct ice_pf *pf = vsi->back; 166 struct ice_vf *vf = NULL; 167 168 if (vsi->type == ICE_VSI_VF) 169 vsi->vf_id = vf_id; 170 else 171 vsi->vf_id = ICE_INVAL_VFID; 172 173 switch (vsi->type) { 174 case ICE_VSI_PF: 175 if (vsi->req_txq) { 176 vsi->alloc_txq = vsi->req_txq; 177 vsi->num_txq = vsi->req_txq; 178 } else { 179 vsi->alloc_txq = min3(pf->num_lan_msix, 180 ice_get_avail_txq_count(pf), 181 (u16)num_online_cpus()); 182 } 183 184 pf->num_lan_tx = vsi->alloc_txq; 185 186 /* only 1 Rx queue unless RSS is enabled */ 187 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 188 vsi->alloc_rxq = 1; 189 } else { 190 if (vsi->req_rxq) { 191 vsi->alloc_rxq = vsi->req_rxq; 192 vsi->num_rxq = vsi->req_rxq; 193 } else { 194 vsi->alloc_rxq = min3(pf->num_lan_msix, 195 ice_get_avail_rxq_count(pf), 196 (u16)num_online_cpus()); 197 } 198 } 199 200 pf->num_lan_rx = vsi->alloc_rxq; 201 202 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, 203 max_t(int, vsi->alloc_rxq, 204 vsi->alloc_txq)); 205 break; 206 case ICE_VSI_SWITCHDEV_CTRL: 207 /* The number of queues for ctrl VSI is equal to number of VFs. 208 * Each ring is associated to the corresponding VF_PR netdev. 209 */ 210 vsi->alloc_txq = pf->num_alloc_vfs; 211 vsi->alloc_rxq = pf->num_alloc_vfs; 212 vsi->num_q_vectors = 1; 213 break; 214 case ICE_VSI_VF: 215 vf = &pf->vf[vsi->vf_id]; 216 if (vf->num_req_qs) 217 vf->num_vf_qs = vf->num_req_qs; 218 vsi->alloc_txq = vf->num_vf_qs; 219 vsi->alloc_rxq = vf->num_vf_qs; 220 /* pf->num_msix_per_vf includes (VF miscellaneous vector + 221 * data queue interrupts). Since vsi->num_q_vectors is number 222 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 223 * original vector count 224 */ 225 vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; 226 break; 227 case ICE_VSI_CTRL: 228 vsi->alloc_txq = 1; 229 vsi->alloc_rxq = 1; 230 vsi->num_q_vectors = 1; 231 break; 232 case ICE_VSI_LB: 233 vsi->alloc_txq = 1; 234 vsi->alloc_rxq = 1; 235 break; 236 default: 237 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); 238 break; 239 } 240 241 ice_vsi_set_num_desc(vsi); 242 } 243 244 /** 245 * ice_get_free_slot - get the next non-NULL location index in array 246 * @array: array to search 247 * @size: size of the array 248 * @curr: last known occupied index to be used as a search hint 249 * 250 * void * is being used to keep the functionality generic. This lets us use this 251 * function on any array of pointers. 252 */ 253 static int ice_get_free_slot(void *array, int size, int curr) 254 { 255 int **tmp_array = (int **)array; 256 int next; 257 258 if (curr < (size - 1) && !tmp_array[curr + 1]) { 259 next = curr + 1; 260 } else { 261 int i = 0; 262 263 while ((i < size) && (tmp_array[i])) 264 i++; 265 if (i == size) 266 next = ICE_NO_VSI; 267 else 268 next = i; 269 } 270 return next; 271 } 272 273 /** 274 * ice_vsi_delete - delete a VSI from the switch 275 * @vsi: pointer to VSI being removed 276 */ 277 static void ice_vsi_delete(struct ice_vsi *vsi) 278 { 279 struct ice_pf *pf = vsi->back; 280 struct ice_vsi_ctx *ctxt; 281 enum ice_status status; 282 283 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 284 if (!ctxt) 285 return; 286 287 if (vsi->type == ICE_VSI_VF) 288 ctxt->vf_num = vsi->vf_id; 289 ctxt->vsi_num = vsi->vsi_num; 290 291 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 292 293 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 294 if (status) 295 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", 296 vsi->vsi_num, ice_stat_str(status)); 297 298 kfree(ctxt); 299 } 300 301 /** 302 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 303 * @vsi: pointer to VSI being cleared 304 */ 305 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 306 { 307 struct ice_pf *pf = vsi->back; 308 struct device *dev; 309 310 dev = ice_pf_to_dev(pf); 311 312 if (vsi->af_xdp_zc_qps) { 313 bitmap_free(vsi->af_xdp_zc_qps); 314 vsi->af_xdp_zc_qps = NULL; 315 } 316 /* free the ring and vector containers */ 317 if (vsi->q_vectors) { 318 devm_kfree(dev, vsi->q_vectors); 319 vsi->q_vectors = NULL; 320 } 321 if (vsi->tx_rings) { 322 devm_kfree(dev, vsi->tx_rings); 323 vsi->tx_rings = NULL; 324 } 325 if (vsi->rx_rings) { 326 devm_kfree(dev, vsi->rx_rings); 327 vsi->rx_rings = NULL; 328 } 329 if (vsi->txq_map) { 330 devm_kfree(dev, vsi->txq_map); 331 vsi->txq_map = NULL; 332 } 333 if (vsi->rxq_map) { 334 devm_kfree(dev, vsi->rxq_map); 335 vsi->rxq_map = NULL; 336 } 337 } 338 339 /** 340 * ice_vsi_clear - clean up and deallocate the provided VSI 341 * @vsi: pointer to VSI being cleared 342 * 343 * This deallocates the VSI's queue resources, removes it from the PF's 344 * VSI array if necessary, and deallocates the VSI 345 * 346 * Returns 0 on success, negative on failure 347 */ 348 static int ice_vsi_clear(struct ice_vsi *vsi) 349 { 350 struct ice_pf *pf = NULL; 351 struct device *dev; 352 353 if (!vsi) 354 return 0; 355 356 if (!vsi->back) 357 return -EINVAL; 358 359 pf = vsi->back; 360 dev = ice_pf_to_dev(pf); 361 362 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 363 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); 364 return -EINVAL; 365 } 366 367 mutex_lock(&pf->sw_mutex); 368 /* updates the PF for this cleared VSI */ 369 370 pf->vsi[vsi->idx] = NULL; 371 if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) 372 pf->next_vsi = vsi->idx; 373 if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && 374 vsi->vf_id != ICE_INVAL_VFID) 375 pf->next_vsi = vsi->idx; 376 377 ice_vsi_free_arrays(vsi); 378 mutex_unlock(&pf->sw_mutex); 379 devm_kfree(dev, vsi); 380 381 return 0; 382 } 383 384 /** 385 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI 386 * @irq: interrupt number 387 * @data: pointer to a q_vector 388 */ 389 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) 390 { 391 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 392 393 if (!q_vector->tx.ring) 394 return IRQ_HANDLED; 395 396 #define FDIR_RX_DESC_CLEAN_BUDGET 64 397 ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); 398 ice_clean_ctrl_tx_irq(q_vector->tx.ring); 399 400 return IRQ_HANDLED; 401 } 402 403 /** 404 * ice_msix_clean_rings - MSIX mode Interrupt Handler 405 * @irq: interrupt number 406 * @data: pointer to a q_vector 407 */ 408 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 409 { 410 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 411 412 if (!q_vector->tx.ring && !q_vector->rx.ring) 413 return IRQ_HANDLED; 414 415 q_vector->total_events++; 416 417 napi_schedule(&q_vector->napi); 418 419 return IRQ_HANDLED; 420 } 421 422 static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) 423 { 424 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 425 struct ice_pf *pf = q_vector->vsi->back; 426 int i; 427 428 if (!q_vector->tx.ring && !q_vector->rx.ring) 429 return IRQ_HANDLED; 430 431 ice_for_each_vf(pf, i) 432 napi_schedule(&pf->vf[i].repr->q_vector->napi); 433 434 return IRQ_HANDLED; 435 } 436 437 /** 438 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 439 * @pf: board private structure 440 * @vsi_type: type of VSI 441 * @vf_id: ID of the VF being configured 442 * 443 * returns a pointer to a VSI on success, NULL on failure. 444 */ 445 static struct ice_vsi * 446 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) 447 { 448 struct device *dev = ice_pf_to_dev(pf); 449 struct ice_vsi *vsi = NULL; 450 451 /* Need to protect the allocation of the VSIs at the PF level */ 452 mutex_lock(&pf->sw_mutex); 453 454 /* If we have already allocated our maximum number of VSIs, 455 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 456 * is available to be populated 457 */ 458 if (pf->next_vsi == ICE_NO_VSI) { 459 dev_dbg(dev, "out of VSI slots!\n"); 460 goto unlock_pf; 461 } 462 463 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); 464 if (!vsi) 465 goto unlock_pf; 466 467 vsi->type = vsi_type; 468 vsi->back = pf; 469 set_bit(ICE_VSI_DOWN, vsi->state); 470 471 if (vsi_type == ICE_VSI_VF) 472 ice_vsi_set_num_qs(vsi, vf_id); 473 else 474 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 475 476 switch (vsi->type) { 477 case ICE_VSI_SWITCHDEV_CTRL: 478 if (ice_vsi_alloc_arrays(vsi)) 479 goto err_rings; 480 481 /* Setup eswitch MSIX irq handler for VSI */ 482 vsi->irq_handler = ice_eswitch_msix_clean_rings; 483 break; 484 case ICE_VSI_PF: 485 if (ice_vsi_alloc_arrays(vsi)) 486 goto err_rings; 487 488 /* Setup default MSIX irq handler for VSI */ 489 vsi->irq_handler = ice_msix_clean_rings; 490 break; 491 case ICE_VSI_CTRL: 492 if (ice_vsi_alloc_arrays(vsi)) 493 goto err_rings; 494 495 /* Setup ctrl VSI MSIX irq handler */ 496 vsi->irq_handler = ice_msix_clean_ctrl_vsi; 497 break; 498 case ICE_VSI_VF: 499 if (ice_vsi_alloc_arrays(vsi)) 500 goto err_rings; 501 break; 502 case ICE_VSI_LB: 503 if (ice_vsi_alloc_arrays(vsi)) 504 goto err_rings; 505 break; 506 default: 507 dev_warn(dev, "Unknown VSI type %d\n", vsi->type); 508 goto unlock_pf; 509 } 510 511 if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { 512 /* Use the last VSI slot as the index for PF control VSI */ 513 vsi->idx = pf->num_alloc_vsi - 1; 514 pf->ctrl_vsi_idx = vsi->idx; 515 pf->vsi[vsi->idx] = vsi; 516 } else { 517 /* fill slot and make note of the index */ 518 vsi->idx = pf->next_vsi; 519 pf->vsi[pf->next_vsi] = vsi; 520 521 /* prepare pf->next_vsi for next use */ 522 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 523 pf->next_vsi); 524 } 525 526 if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) 527 pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; 528 goto unlock_pf; 529 530 err_rings: 531 devm_kfree(dev, vsi); 532 vsi = NULL; 533 unlock_pf: 534 mutex_unlock(&pf->sw_mutex); 535 return vsi; 536 } 537 538 /** 539 * ice_alloc_fd_res - Allocate FD resource for a VSI 540 * @vsi: pointer to the ice_vsi 541 * 542 * This allocates the FD resources 543 * 544 * Returns 0 on success, -EPERM on no-op or -EIO on failure 545 */ 546 static int ice_alloc_fd_res(struct ice_vsi *vsi) 547 { 548 struct ice_pf *pf = vsi->back; 549 u32 g_val, b_val; 550 551 /* Flow Director filters are only allocated/assigned to the PF VSI which 552 * passes the traffic. The CTRL VSI is only used to add/delete filters 553 * so we don't allocate resources to it 554 */ 555 556 /* FD filters from guaranteed pool per VSI */ 557 g_val = pf->hw.func_caps.fd_fltr_guar; 558 if (!g_val) 559 return -EPERM; 560 561 /* FD filters from best effort pool */ 562 b_val = pf->hw.func_caps.fd_fltr_best_effort; 563 if (!b_val) 564 return -EPERM; 565 566 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF)) 567 return -EPERM; 568 569 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 570 return -EPERM; 571 572 vsi->num_gfltr = g_val / pf->num_alloc_vsi; 573 574 /* each VSI gets same "best_effort" quota */ 575 vsi->num_bfltr = b_val; 576 577 if (vsi->type == ICE_VSI_VF) { 578 vsi->num_gfltr = 0; 579 580 /* each VSI gets same "best_effort" quota */ 581 vsi->num_bfltr = b_val; 582 } 583 584 return 0; 585 } 586 587 /** 588 * ice_vsi_get_qs - Assign queues from PF to VSI 589 * @vsi: the VSI to assign queues to 590 * 591 * Returns 0 on success and a negative value on error 592 */ 593 static int ice_vsi_get_qs(struct ice_vsi *vsi) 594 { 595 struct ice_pf *pf = vsi->back; 596 struct ice_qs_cfg tx_qs_cfg = { 597 .qs_mutex = &pf->avail_q_mutex, 598 .pf_map = pf->avail_txqs, 599 .pf_map_size = pf->max_pf_txqs, 600 .q_count = vsi->alloc_txq, 601 .scatter_count = ICE_MAX_SCATTER_TXQS, 602 .vsi_map = vsi->txq_map, 603 .vsi_map_offset = 0, 604 .mapping_mode = ICE_VSI_MAP_CONTIG 605 }; 606 struct ice_qs_cfg rx_qs_cfg = { 607 .qs_mutex = &pf->avail_q_mutex, 608 .pf_map = pf->avail_rxqs, 609 .pf_map_size = pf->max_pf_rxqs, 610 .q_count = vsi->alloc_rxq, 611 .scatter_count = ICE_MAX_SCATTER_RXQS, 612 .vsi_map = vsi->rxq_map, 613 .vsi_map_offset = 0, 614 .mapping_mode = ICE_VSI_MAP_CONTIG 615 }; 616 int ret; 617 618 ret = __ice_vsi_get_qs(&tx_qs_cfg); 619 if (ret) 620 return ret; 621 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; 622 623 ret = __ice_vsi_get_qs(&rx_qs_cfg); 624 if (ret) 625 return ret; 626 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; 627 628 return 0; 629 } 630 631 /** 632 * ice_vsi_put_qs - Release queues from VSI to PF 633 * @vsi: the VSI that is going to release queues 634 */ 635 static void ice_vsi_put_qs(struct ice_vsi *vsi) 636 { 637 struct ice_pf *pf = vsi->back; 638 int i; 639 640 mutex_lock(&pf->avail_q_mutex); 641 642 for (i = 0; i < vsi->alloc_txq; i++) { 643 clear_bit(vsi->txq_map[i], pf->avail_txqs); 644 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 645 } 646 647 for (i = 0; i < vsi->alloc_rxq; i++) { 648 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 649 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 650 } 651 652 mutex_unlock(&pf->avail_q_mutex); 653 } 654 655 /** 656 * ice_is_safe_mode 657 * @pf: pointer to the PF struct 658 * 659 * returns true if driver is in safe mode, false otherwise 660 */ 661 bool ice_is_safe_mode(struct ice_pf *pf) 662 { 663 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 664 } 665 666 /** 667 * ice_is_aux_ena 668 * @pf: pointer to the PF struct 669 * 670 * returns true if AUX devices/drivers are supported, false otherwise 671 */ 672 bool ice_is_aux_ena(struct ice_pf *pf) 673 { 674 return test_bit(ICE_FLAG_AUX_ENA, pf->flags); 675 } 676 677 /** 678 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration 679 * @vsi: the VSI being cleaned up 680 * 681 * This function deletes RSS input set for all flows that were configured 682 * for this VSI 683 */ 684 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) 685 { 686 struct ice_pf *pf = vsi->back; 687 enum ice_status status; 688 689 if (ice_is_safe_mode(pf)) 690 return; 691 692 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); 693 if (status) 694 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n", 695 vsi->vsi_num, ice_stat_str(status)); 696 } 697 698 /** 699 * ice_rss_clean - Delete RSS related VSI structures and configuration 700 * @vsi: the VSI being removed 701 */ 702 static void ice_rss_clean(struct ice_vsi *vsi) 703 { 704 struct ice_pf *pf = vsi->back; 705 struct device *dev; 706 707 dev = ice_pf_to_dev(pf); 708 709 if (vsi->rss_hkey_user) 710 devm_kfree(dev, vsi->rss_hkey_user); 711 if (vsi->rss_lut_user) 712 devm_kfree(dev, vsi->rss_lut_user); 713 714 ice_vsi_clean_rss_flow_fld(vsi); 715 /* remove RSS replay list */ 716 if (!ice_is_safe_mode(pf)) 717 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); 718 } 719 720 /** 721 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 722 * @vsi: the VSI being configured 723 */ 724 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 725 { 726 struct ice_hw_common_caps *cap; 727 struct ice_pf *pf = vsi->back; 728 729 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 730 vsi->rss_size = 1; 731 return; 732 } 733 734 cap = &pf->hw.func_caps.common_cap; 735 switch (vsi->type) { 736 case ICE_VSI_PF: 737 /* PF VSI will inherit RSS instance of PF */ 738 vsi->rss_table_size = (u16)cap->rss_table_size; 739 vsi->rss_size = min_t(u16, num_online_cpus(), 740 BIT(cap->rss_table_entry_width)); 741 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 742 break; 743 case ICE_VSI_SWITCHDEV_CTRL: 744 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 745 vsi->rss_size = min_t(u16, num_online_cpus(), 746 BIT(cap->rss_table_entry_width)); 747 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 748 break; 749 case ICE_VSI_VF: 750 /* VF VSI will get a small RSS table. 751 * For VSI_LUT, LUT size should be set to 64 bytes. 752 */ 753 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 754 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; 755 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 756 break; 757 case ICE_VSI_LB: 758 break; 759 default: 760 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", 761 ice_vsi_type_str(vsi->type)); 762 break; 763 } 764 } 765 766 /** 767 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 768 * @ctxt: the VSI context being set 769 * 770 * This initializes a default VSI context for all sections except the Queues. 771 */ 772 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) 773 { 774 u32 table = 0; 775 776 memset(&ctxt->info, 0, sizeof(ctxt->info)); 777 /* VSI's should be allocated from shared pool */ 778 ctxt->alloc_from_pool = true; 779 /* Src pruning enabled by default */ 780 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 781 /* Traffic from VSI can be sent to LAN */ 782 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 783 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy 784 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all 785 * packets untagged/tagged. 786 */ 787 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 788 ICE_AQ_VSI_VLAN_MODE_M) >> 789 ICE_AQ_VSI_VLAN_MODE_S); 790 /* Have 1:1 UP mapping for both ingress/egress tables */ 791 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 792 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 793 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 794 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 795 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 796 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 797 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 798 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 799 ctxt->info.ingress_table = cpu_to_le32(table); 800 ctxt->info.egress_table = cpu_to_le32(table); 801 /* Have 1:1 UP mapping for outer to inner UP table */ 802 ctxt->info.outer_up_table = cpu_to_le32(table); 803 /* No Outer tag support outer_tag_flags remains to zero */ 804 } 805 806 /** 807 * ice_vsi_setup_q_map - Setup a VSI queue map 808 * @vsi: the VSI being configured 809 * @ctxt: VSI context structure 810 */ 811 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 812 { 813 u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; 814 u16 num_txq_per_tc, num_rxq_per_tc; 815 u16 qcount_tx = vsi->alloc_txq; 816 u16 qcount_rx = vsi->alloc_rxq; 817 bool ena_tc0 = false; 818 u8 netdev_tc = 0; 819 int i; 820 821 /* at least TC0 should be enabled by default */ 822 if (vsi->tc_cfg.numtc) { 823 if (!(vsi->tc_cfg.ena_tc & BIT(0))) 824 ena_tc0 = true; 825 } else { 826 ena_tc0 = true; 827 } 828 829 if (ena_tc0) { 830 vsi->tc_cfg.numtc++; 831 vsi->tc_cfg.ena_tc |= 1; 832 } 833 834 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); 835 if (!num_rxq_per_tc) 836 num_rxq_per_tc = 1; 837 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; 838 if (!num_txq_per_tc) 839 num_txq_per_tc = 1; 840 841 /* find the (rounded up) power-of-2 of qcount */ 842 pow = (u16)order_base_2(num_rxq_per_tc); 843 844 /* TC mapping is a function of the number of Rx queues assigned to the 845 * VSI for each traffic class and the offset of these queues. 846 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 847 * queues allocated to TC0. No:of queues is a power-of-2. 848 * 849 * If TC is not enabled, the queue offset is set to 0, and allocate one 850 * queue, this way, traffic for the given TC will be sent to the default 851 * queue. 852 * 853 * Setup number and offset of Rx queues for all TCs for the VSI 854 */ 855 ice_for_each_traffic_class(i) { 856 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 857 /* TC is not enabled */ 858 vsi->tc_cfg.tc_info[i].qoffset = 0; 859 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 860 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 861 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 862 ctxt->info.tc_mapping[i] = 0; 863 continue; 864 } 865 866 /* TC is enabled */ 867 vsi->tc_cfg.tc_info[i].qoffset = offset; 868 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; 869 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; 870 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 871 872 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 873 ICE_AQ_VSI_TC_Q_OFFSET_M) | 874 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 875 ICE_AQ_VSI_TC_Q_NUM_M); 876 offset += num_rxq_per_tc; 877 tx_count += num_txq_per_tc; 878 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 879 } 880 881 /* if offset is non-zero, means it is calculated correctly based on 882 * enabled TCs for a given VSI otherwise qcount_rx will always 883 * be correct and non-zero because it is based off - VSI's 884 * allocated Rx queues which is at least 1 (hence qcount_tx will be 885 * at least 1) 886 */ 887 if (offset) 888 vsi->num_rxq = offset; 889 else 890 vsi->num_rxq = num_rxq_per_tc; 891 892 vsi->num_txq = tx_count; 893 894 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 895 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 896 /* since there is a chance that num_rxq could have been changed 897 * in the above for loop, make num_txq equal to num_rxq. 898 */ 899 vsi->num_txq = vsi->num_rxq; 900 } 901 902 /* Rx queue mapping */ 903 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 904 /* q_mapping buffer holds the info for the first queue allocated for 905 * this VSI in the PF space and also the number of queues associated 906 * with this VSI. 907 */ 908 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 909 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 910 } 911 912 /** 913 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI 914 * @ctxt: the VSI context being set 915 * @vsi: the VSI being configured 916 */ 917 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 918 { 919 u8 dflt_q_group, dflt_q_prio; 920 u16 dflt_q, report_q, val; 921 922 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && 923 vsi->type != ICE_VSI_VF) 924 return; 925 926 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 927 ctxt->info.valid_sections |= cpu_to_le16(val); 928 dflt_q = 0; 929 dflt_q_group = 0; 930 report_q = 0; 931 dflt_q_prio = 0; 932 933 /* enable flow director filtering/programming */ 934 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; 935 ctxt->info.fd_options = cpu_to_le16(val); 936 /* max of allocated flow director filters */ 937 ctxt->info.max_fd_fltr_dedicated = 938 cpu_to_le16(vsi->num_gfltr); 939 /* max of shared flow director filters any VSI may program */ 940 ctxt->info.max_fd_fltr_shared = 941 cpu_to_le16(vsi->num_bfltr); 942 /* default queue index within the VSI of the default FD */ 943 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & 944 ICE_AQ_VSI_FD_DEF_Q_M); 945 /* target queue or queue group to the FD filter */ 946 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & 947 ICE_AQ_VSI_FD_DEF_GRP_M); 948 ctxt->info.fd_def_q = cpu_to_le16(val); 949 /* queue index on which FD filter completion is reported */ 950 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & 951 ICE_AQ_VSI_FD_REPORT_Q_M); 952 /* priority of the default qindex action */ 953 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & 954 ICE_AQ_VSI_FD_DEF_PRIORITY_M); 955 ctxt->info.fd_report_opt = cpu_to_le16(val); 956 } 957 958 /** 959 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 960 * @ctxt: the VSI context being set 961 * @vsi: the VSI being configured 962 */ 963 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 964 { 965 u8 lut_type, hash_type; 966 struct device *dev; 967 struct ice_pf *pf; 968 969 pf = vsi->back; 970 dev = ice_pf_to_dev(pf); 971 972 switch (vsi->type) { 973 case ICE_VSI_PF: 974 /* PF VSI will inherit RSS instance of PF */ 975 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 976 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 977 break; 978 case ICE_VSI_VF: 979 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 980 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 981 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 982 break; 983 default: 984 dev_dbg(dev, "Unsupported VSI type %s\n", 985 ice_vsi_type_str(vsi->type)); 986 return; 987 } 988 989 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 990 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 991 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 992 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 993 } 994 995 /** 996 * ice_vsi_init - Create and initialize a VSI 997 * @vsi: the VSI being configured 998 * @init_vsi: is this call creating a VSI 999 * 1000 * This initializes a VSI context depending on the VSI type to be added and 1001 * passes it down to the add_vsi aq command to create a new VSI. 1002 */ 1003 static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) 1004 { 1005 struct ice_pf *pf = vsi->back; 1006 struct ice_hw *hw = &pf->hw; 1007 struct ice_vsi_ctx *ctxt; 1008 struct device *dev; 1009 int ret = 0; 1010 1011 dev = ice_pf_to_dev(pf); 1012 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1013 if (!ctxt) 1014 return -ENOMEM; 1015 1016 switch (vsi->type) { 1017 case ICE_VSI_CTRL: 1018 case ICE_VSI_LB: 1019 case ICE_VSI_PF: 1020 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 1021 break; 1022 case ICE_VSI_SWITCHDEV_CTRL: 1023 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; 1024 break; 1025 case ICE_VSI_VF: 1026 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 1027 /* VF number here is the absolute VF number (0-255) */ 1028 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 1029 break; 1030 default: 1031 ret = -ENODEV; 1032 goto out; 1033 } 1034 1035 ice_set_dflt_vsi_ctx(ctxt); 1036 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1037 ice_set_fd_vsi_ctx(ctxt, vsi); 1038 /* if the switch is in VEB mode, allow VSI loopback */ 1039 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 1040 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 1041 1042 /* Set LUT type and HASH type if RSS is enabled */ 1043 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && 1044 vsi->type != ICE_VSI_CTRL) { 1045 ice_set_rss_vsi_ctx(ctxt, vsi); 1046 /* if updating VSI context, make sure to set valid_section: 1047 * to indicate which section of VSI context being updated 1048 */ 1049 if (!init_vsi) 1050 ctxt->info.valid_sections |= 1051 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 1052 } 1053 1054 ctxt->info.sw_id = vsi->port_info->sw_id; 1055 ice_vsi_setup_q_map(vsi, ctxt); 1056 if (!init_vsi) /* means VSI being updated */ 1057 /* must to indicate which section of VSI context are 1058 * being modified 1059 */ 1060 ctxt->info.valid_sections |= 1061 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 1062 1063 /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off 1064 * respectively 1065 */ 1066 if (vsi->type == ICE_VSI_VF) { 1067 ctxt->info.valid_sections |= 1068 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1069 if (pf->vf[vsi->vf_id].spoofchk) { 1070 ctxt->info.sec_flags |= 1071 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 1072 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 1073 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 1074 } else { 1075 ctxt->info.sec_flags &= 1076 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 1077 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 1078 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)); 1079 } 1080 } 1081 1082 /* Allow control frames out of main VSI */ 1083 if (vsi->type == ICE_VSI_PF) { 1084 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 1085 ctxt->info.valid_sections |= 1086 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1087 } 1088 1089 if (init_vsi) { 1090 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1091 if (ret) { 1092 dev_err(dev, "Add VSI failed, err %d\n", ret); 1093 ret = -EIO; 1094 goto out; 1095 } 1096 } else { 1097 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1098 if (ret) { 1099 dev_err(dev, "Update VSI failed, err %d\n", ret); 1100 ret = -EIO; 1101 goto out; 1102 } 1103 } 1104 1105 /* keep context for update VSI operations */ 1106 vsi->info = ctxt->info; 1107 1108 /* record VSI number returned */ 1109 vsi->vsi_num = ctxt->vsi_num; 1110 1111 out: 1112 kfree(ctxt); 1113 return ret; 1114 } 1115 1116 /** 1117 * ice_free_res - free a block of resources 1118 * @res: pointer to the resource 1119 * @index: starting index previously returned by ice_get_res 1120 * @id: identifier to track owner 1121 * 1122 * Returns number of resources freed 1123 */ 1124 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) 1125 { 1126 int count = 0; 1127 int i; 1128 1129 if (!res || index >= res->end) 1130 return -EINVAL; 1131 1132 id |= ICE_RES_VALID_BIT; 1133 for (i = index; i < res->end && res->list[i] == id; i++) { 1134 res->list[i] = 0; 1135 count++; 1136 } 1137 1138 return count; 1139 } 1140 1141 /** 1142 * ice_search_res - Search the tracker for a block of resources 1143 * @res: pointer to the resource 1144 * @needed: size of the block needed 1145 * @id: identifier to track owner 1146 * 1147 * Returns the base item index of the block, or -ENOMEM for error 1148 */ 1149 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) 1150 { 1151 u16 start = 0, end = 0; 1152 1153 if (needed > res->end) 1154 return -ENOMEM; 1155 1156 id |= ICE_RES_VALID_BIT; 1157 1158 do { 1159 /* skip already allocated entries */ 1160 if (res->list[end++] & ICE_RES_VALID_BIT) { 1161 start = end; 1162 if ((start + needed) > res->end) 1163 break; 1164 } 1165 1166 if (end == (start + needed)) { 1167 int i = start; 1168 1169 /* there was enough, so assign it to the requestor */ 1170 while (i != end) 1171 res->list[i++] = id; 1172 1173 return start; 1174 } 1175 } while (end < res->end); 1176 1177 return -ENOMEM; 1178 } 1179 1180 /** 1181 * ice_get_free_res_count - Get free count from a resource tracker 1182 * @res: Resource tracker instance 1183 */ 1184 static u16 ice_get_free_res_count(struct ice_res_tracker *res) 1185 { 1186 u16 i, count = 0; 1187 1188 for (i = 0; i < res->end; i++) 1189 if (!(res->list[i] & ICE_RES_VALID_BIT)) 1190 count++; 1191 1192 return count; 1193 } 1194 1195 /** 1196 * ice_get_res - get a block of resources 1197 * @pf: board private structure 1198 * @res: pointer to the resource 1199 * @needed: size of the block needed 1200 * @id: identifier to track owner 1201 * 1202 * Returns the base item index of the block, or negative for error 1203 */ 1204 int 1205 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) 1206 { 1207 if (!res || !pf) 1208 return -EINVAL; 1209 1210 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { 1211 dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", 1212 needed, res->num_entries, id); 1213 return -EINVAL; 1214 } 1215 1216 return ice_search_res(res, needed, id); 1217 } 1218 1219 /** 1220 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI 1221 * @vsi: ptr to the VSI 1222 * 1223 * This should only be called after ice_vsi_alloc() which allocates the 1224 * corresponding SW VSI structure and initializes num_queue_pairs for the 1225 * newly allocated VSI. 1226 * 1227 * Returns 0 on success or negative on failure 1228 */ 1229 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) 1230 { 1231 struct ice_pf *pf = vsi->back; 1232 struct device *dev; 1233 u16 num_q_vectors; 1234 int base; 1235 1236 dev = ice_pf_to_dev(pf); 1237 /* SRIOV doesn't grab irq_tracker entries for each VSI */ 1238 if (vsi->type == ICE_VSI_VF) 1239 return 0; 1240 1241 if (vsi->base_vector) { 1242 dev_dbg(dev, "VSI %d has non-zero base vector %d\n", 1243 vsi->vsi_num, vsi->base_vector); 1244 return -EEXIST; 1245 } 1246 1247 num_q_vectors = vsi->num_q_vectors; 1248 /* reserve slots from OS requested IRQs */ 1249 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { 1250 int i; 1251 1252 ice_for_each_vf(pf, i) { 1253 struct ice_vf *vf = &pf->vf[i]; 1254 1255 if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1256 base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; 1257 break; 1258 } 1259 } 1260 if (i == pf->num_alloc_vfs) 1261 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, 1262 ICE_RES_VF_CTRL_VEC_ID); 1263 } else { 1264 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, 1265 vsi->idx); 1266 } 1267 1268 if (base < 0) { 1269 dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", 1270 ice_get_free_res_count(pf->irq_tracker), 1271 ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); 1272 return -ENOENT; 1273 } 1274 vsi->base_vector = (u16)base; 1275 pf->num_avail_sw_msix -= num_q_vectors; 1276 1277 return 0; 1278 } 1279 1280 /** 1281 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1282 * @vsi: the VSI having rings deallocated 1283 */ 1284 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1285 { 1286 int i; 1287 1288 /* Avoid stale references by clearing map from vector to ring */ 1289 if (vsi->q_vectors) { 1290 ice_for_each_q_vector(vsi, i) { 1291 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1292 1293 if (q_vector) { 1294 q_vector->tx.ring = NULL; 1295 q_vector->rx.ring = NULL; 1296 } 1297 } 1298 } 1299 1300 if (vsi->tx_rings) { 1301 for (i = 0; i < vsi->alloc_txq; i++) { 1302 if (vsi->tx_rings[i]) { 1303 kfree_rcu(vsi->tx_rings[i], rcu); 1304 WRITE_ONCE(vsi->tx_rings[i], NULL); 1305 } 1306 } 1307 } 1308 if (vsi->rx_rings) { 1309 for (i = 0; i < vsi->alloc_rxq; i++) { 1310 if (vsi->rx_rings[i]) { 1311 kfree_rcu(vsi->rx_rings[i], rcu); 1312 WRITE_ONCE(vsi->rx_rings[i], NULL); 1313 } 1314 } 1315 } 1316 } 1317 1318 /** 1319 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1320 * @vsi: VSI which is having rings allocated 1321 */ 1322 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1323 { 1324 struct ice_pf *pf = vsi->back; 1325 struct device *dev; 1326 u16 i; 1327 1328 dev = ice_pf_to_dev(pf); 1329 /* Allocate Tx rings */ 1330 for (i = 0; i < vsi->alloc_txq; i++) { 1331 struct ice_ring *ring; 1332 1333 /* allocate with kzalloc(), free with kfree_rcu() */ 1334 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1335 1336 if (!ring) 1337 goto err_out; 1338 1339 ring->q_index = i; 1340 ring->reg_idx = vsi->txq_map[i]; 1341 ring->vsi = vsi; 1342 ring->tx_tstamps = &pf->ptp.port.tx; 1343 ring->dev = dev; 1344 ring->count = vsi->num_tx_desc; 1345 WRITE_ONCE(vsi->tx_rings[i], ring); 1346 } 1347 1348 /* Allocate Rx rings */ 1349 for (i = 0; i < vsi->alloc_rxq; i++) { 1350 struct ice_ring *ring; 1351 1352 /* allocate with kzalloc(), free with kfree_rcu() */ 1353 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1354 if (!ring) 1355 goto err_out; 1356 1357 ring->q_index = i; 1358 ring->reg_idx = vsi->rxq_map[i]; 1359 ring->vsi = vsi; 1360 ring->netdev = vsi->netdev; 1361 ring->dev = dev; 1362 ring->count = vsi->num_rx_desc; 1363 WRITE_ONCE(vsi->rx_rings[i], ring); 1364 } 1365 1366 return 0; 1367 1368 err_out: 1369 ice_vsi_clear_rings(vsi); 1370 return -ENOMEM; 1371 } 1372 1373 /** 1374 * ice_vsi_manage_rss_lut - disable/enable RSS 1375 * @vsi: the VSI being changed 1376 * @ena: boolean value indicating if this is an enable or disable request 1377 * 1378 * In the event of disable request for RSS, this function will zero out RSS 1379 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1380 * LUT. 1381 */ 1382 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1383 { 1384 u8 *lut; 1385 1386 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1387 if (!lut) 1388 return; 1389 1390 if (ena) { 1391 if (vsi->rss_lut_user) 1392 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1393 else 1394 ice_fill_rss_lut(lut, vsi->rss_table_size, 1395 vsi->rss_size); 1396 } 1397 1398 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1399 kfree(lut); 1400 } 1401 1402 /** 1403 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1404 * @vsi: VSI to be configured 1405 */ 1406 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1407 { 1408 struct ice_pf *pf = vsi->back; 1409 struct device *dev; 1410 u8 *lut, *key; 1411 int err; 1412 1413 dev = ice_pf_to_dev(pf); 1414 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); 1415 1416 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1417 if (!lut) 1418 return -ENOMEM; 1419 1420 if (vsi->rss_lut_user) 1421 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1422 else 1423 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1424 1425 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1426 if (err) { 1427 dev_err(dev, "set_rss_lut failed, error %d\n", err); 1428 goto ice_vsi_cfg_rss_exit; 1429 } 1430 1431 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); 1432 if (!key) { 1433 err = -ENOMEM; 1434 goto ice_vsi_cfg_rss_exit; 1435 } 1436 1437 if (vsi->rss_hkey_user) 1438 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1439 else 1440 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1441 1442 err = ice_set_rss_key(vsi, key); 1443 if (err) 1444 dev_err(dev, "set_rss_key failed, error %d\n", err); 1445 1446 kfree(key); 1447 ice_vsi_cfg_rss_exit: 1448 kfree(lut); 1449 return err; 1450 } 1451 1452 /** 1453 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows 1454 * @vsi: VSI to be configured 1455 * 1456 * This function will only be called during the VF VSI setup. Upon successful 1457 * completion of package download, this function will configure default RSS 1458 * input sets for VF VSI. 1459 */ 1460 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) 1461 { 1462 struct ice_pf *pf = vsi->back; 1463 enum ice_status status; 1464 struct device *dev; 1465 1466 dev = ice_pf_to_dev(pf); 1467 if (ice_is_safe_mode(pf)) { 1468 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1469 vsi->vsi_num); 1470 return; 1471 } 1472 1473 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); 1474 if (status) 1475 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n", 1476 vsi->vsi_num, ice_stat_str(status)); 1477 } 1478 1479 /** 1480 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows 1481 * @vsi: VSI to be configured 1482 * 1483 * This function will only be called after successful download package call 1484 * during initialization of PF. Since the downloaded package will erase the 1485 * RSS section, this function will configure RSS input sets for different 1486 * flow types. The last profile added has the highest priority, therefore 2 1487 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles 1488 * (i.e. IPv4 src/dst TCP src/dst port). 1489 */ 1490 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) 1491 { 1492 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; 1493 struct ice_pf *pf = vsi->back; 1494 struct ice_hw *hw = &pf->hw; 1495 enum ice_status status; 1496 struct device *dev; 1497 1498 dev = ice_pf_to_dev(pf); 1499 if (ice_is_safe_mode(pf)) { 1500 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1501 vsi_num); 1502 return; 1503 } 1504 /* configure RSS for IPv4 with input set IP src/dst */ 1505 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1506 ICE_FLOW_SEG_HDR_IPV4); 1507 if (status) 1508 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n", 1509 vsi_num, ice_stat_str(status)); 1510 1511 /* configure RSS for IPv6 with input set IPv6 src/dst */ 1512 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1513 ICE_FLOW_SEG_HDR_IPV6); 1514 if (status) 1515 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n", 1516 vsi_num, ice_stat_str(status)); 1517 1518 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ 1519 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, 1520 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); 1521 if (status) 1522 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n", 1523 vsi_num, ice_stat_str(status)); 1524 1525 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ 1526 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, 1527 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); 1528 if (status) 1529 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n", 1530 vsi_num, ice_stat_str(status)); 1531 1532 /* configure RSS for sctp4 with input set IP src/dst */ 1533 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1534 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); 1535 if (status) 1536 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n", 1537 vsi_num, ice_stat_str(status)); 1538 1539 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ 1540 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, 1541 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); 1542 if (status) 1543 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n", 1544 vsi_num, ice_stat_str(status)); 1545 1546 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ 1547 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, 1548 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); 1549 if (status) 1550 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n", 1551 vsi_num, ice_stat_str(status)); 1552 1553 /* configure RSS for sctp6 with input set IPv6 src/dst */ 1554 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1555 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); 1556 if (status) 1557 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n", 1558 vsi_num, ice_stat_str(status)); 1559 } 1560 1561 /** 1562 * ice_pf_state_is_nominal - checks the PF for nominal state 1563 * @pf: pointer to PF to check 1564 * 1565 * Check the PF's state for a collection of bits that would indicate 1566 * the PF is in a state that would inhibit normal operation for 1567 * driver functionality. 1568 * 1569 * Returns true if PF is in a nominal state, false otherwise 1570 */ 1571 bool ice_pf_state_is_nominal(struct ice_pf *pf) 1572 { 1573 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; 1574 1575 if (!pf) 1576 return false; 1577 1578 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); 1579 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) 1580 return false; 1581 1582 return true; 1583 } 1584 1585 /** 1586 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1587 * @vsi: the VSI to be updated 1588 */ 1589 void ice_update_eth_stats(struct ice_vsi *vsi) 1590 { 1591 struct ice_eth_stats *prev_es, *cur_es; 1592 struct ice_hw *hw = &vsi->back->hw; 1593 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1594 1595 prev_es = &vsi->eth_stats_prev; 1596 cur_es = &vsi->eth_stats; 1597 1598 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1599 &prev_es->rx_bytes, &cur_es->rx_bytes); 1600 1601 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1602 &prev_es->rx_unicast, &cur_es->rx_unicast); 1603 1604 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1605 &prev_es->rx_multicast, &cur_es->rx_multicast); 1606 1607 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1608 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1609 1610 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1611 &prev_es->rx_discards, &cur_es->rx_discards); 1612 1613 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1614 &prev_es->tx_bytes, &cur_es->tx_bytes); 1615 1616 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1617 &prev_es->tx_unicast, &cur_es->tx_unicast); 1618 1619 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1620 &prev_es->tx_multicast, &cur_es->tx_multicast); 1621 1622 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1623 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1624 1625 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1626 &prev_es->tx_errors, &cur_es->tx_errors); 1627 1628 vsi->stat_offsets_loaded = true; 1629 } 1630 1631 /** 1632 * ice_vsi_add_vlan - Add VSI membership for given VLAN 1633 * @vsi: the VSI being configured 1634 * @vid: VLAN ID to be added 1635 * @action: filter action to be performed on match 1636 */ 1637 int 1638 ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action) 1639 { 1640 struct ice_pf *pf = vsi->back; 1641 struct device *dev; 1642 int err = 0; 1643 1644 dev = ice_pf_to_dev(pf); 1645 1646 if (!ice_fltr_add_vlan(vsi, vid, action)) { 1647 vsi->num_vlan++; 1648 } else { 1649 err = -ENODEV; 1650 dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid, 1651 vsi->vsi_num); 1652 } 1653 1654 return err; 1655 } 1656 1657 /** 1658 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN 1659 * @vsi: the VSI being configured 1660 * @vid: VLAN ID to be removed 1661 * 1662 * Returns 0 on success and negative on failure 1663 */ 1664 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) 1665 { 1666 struct ice_pf *pf = vsi->back; 1667 enum ice_status status; 1668 struct device *dev; 1669 int err = 0; 1670 1671 dev = ice_pf_to_dev(pf); 1672 1673 status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); 1674 if (!status) { 1675 vsi->num_vlan--; 1676 } else if (status == ICE_ERR_DOES_NOT_EXIST) { 1677 dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n", 1678 vid, vsi->vsi_num, ice_stat_str(status)); 1679 } else { 1680 dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n", 1681 vid, vsi->vsi_num, ice_stat_str(status)); 1682 err = -EIO; 1683 } 1684 1685 return err; 1686 } 1687 1688 /** 1689 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length 1690 * @vsi: VSI 1691 */ 1692 void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) 1693 { 1694 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { 1695 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1696 vsi->rx_buf_len = ICE_RXBUF_2048; 1697 #if (PAGE_SIZE < 8192) 1698 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && 1699 (vsi->netdev->mtu <= ETH_DATA_LEN)) { 1700 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; 1701 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; 1702 #endif 1703 } else { 1704 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1705 #if (PAGE_SIZE < 8192) 1706 vsi->rx_buf_len = ICE_RXBUF_3072; 1707 #else 1708 vsi->rx_buf_len = ICE_RXBUF_2048; 1709 #endif 1710 } 1711 } 1712 1713 /** 1714 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register 1715 * @hw: HW pointer 1716 * @pf_q: index of the Rx queue in the PF's queue space 1717 * @rxdid: flexible descriptor RXDID 1718 * @prio: priority for the RXDID for this queue 1719 * @ena_ts: true to enable timestamp and false to disable timestamp 1720 */ 1721 void 1722 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 1723 bool ena_ts) 1724 { 1725 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1726 1727 /* clear any previous values */ 1728 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | 1729 QRXFLXP_CNTXT_RXDID_PRIO_M | 1730 QRXFLXP_CNTXT_TS_M); 1731 1732 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1733 QRXFLXP_CNTXT_RXDID_IDX_M; 1734 1735 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1736 QRXFLXP_CNTXT_RXDID_PRIO_M; 1737 1738 if (ena_ts) 1739 /* Enable TimeSync on this queue */ 1740 regval |= QRXFLXP_CNTXT_TS_M; 1741 1742 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1743 } 1744 1745 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) 1746 { 1747 if (q_idx >= vsi->num_rxq) 1748 return -EINVAL; 1749 1750 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); 1751 } 1752 1753 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx) 1754 { 1755 struct ice_aqc_add_tx_qgrp *qg_buf; 1756 int err; 1757 1758 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) 1759 return -EINVAL; 1760 1761 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); 1762 if (!qg_buf) 1763 return -ENOMEM; 1764 1765 qg_buf->num_txqs = 1; 1766 1767 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); 1768 kfree(qg_buf); 1769 return err; 1770 } 1771 1772 /** 1773 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1774 * @vsi: the VSI being configured 1775 * 1776 * Return 0 on success and a negative value on error 1777 * Configure the Rx VSI for operation. 1778 */ 1779 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1780 { 1781 u16 i; 1782 1783 if (vsi->type == ICE_VSI_VF) 1784 goto setup_rings; 1785 1786 ice_vsi_cfg_frame_size(vsi); 1787 setup_rings: 1788 /* set up individual rings */ 1789 ice_for_each_rxq(vsi, i) { 1790 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); 1791 1792 if (err) 1793 return err; 1794 } 1795 1796 return 0; 1797 } 1798 1799 /** 1800 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1801 * @vsi: the VSI being configured 1802 * @rings: Tx ring array to be configured 1803 * @count: number of Tx ring array elements 1804 * 1805 * Return 0 on success and a negative value on error 1806 * Configure the Tx VSI for operation. 1807 */ 1808 static int 1809 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count) 1810 { 1811 struct ice_aqc_add_tx_qgrp *qg_buf; 1812 u16 q_idx = 0; 1813 int err = 0; 1814 1815 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); 1816 if (!qg_buf) 1817 return -ENOMEM; 1818 1819 qg_buf->num_txqs = 1; 1820 1821 for (q_idx = 0; q_idx < count; q_idx++) { 1822 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); 1823 if (err) 1824 goto err_cfg_txqs; 1825 } 1826 1827 err_cfg_txqs: 1828 kfree(qg_buf); 1829 return err; 1830 } 1831 1832 /** 1833 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1834 * @vsi: the VSI being configured 1835 * 1836 * Return 0 on success and a negative value on error 1837 * Configure the Tx VSI for operation. 1838 */ 1839 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1840 { 1841 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); 1842 } 1843 1844 /** 1845 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI 1846 * @vsi: the VSI being configured 1847 * 1848 * Return 0 on success and a negative value on error 1849 * Configure the Tx queues dedicated for XDP in given VSI for operation. 1850 */ 1851 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) 1852 { 1853 int ret; 1854 int i; 1855 1856 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); 1857 if (ret) 1858 return ret; 1859 1860 for (i = 0; i < vsi->num_xdp_txq; i++) 1861 vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); 1862 1863 return ret; 1864 } 1865 1866 /** 1867 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1868 * @intrl: interrupt rate limit in usecs 1869 * @gran: interrupt rate limit granularity in usecs 1870 * 1871 * This function converts a decimal interrupt rate limit in usecs to the format 1872 * expected by firmware. 1873 */ 1874 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1875 { 1876 u32 val = intrl / gran; 1877 1878 if (val) 1879 return val | GLINT_RATE_INTRL_ENA_M; 1880 return 0; 1881 } 1882 1883 /** 1884 * ice_write_intrl - write throttle rate limit to interrupt specific register 1885 * @q_vector: pointer to interrupt specific structure 1886 * @intrl: throttle rate limit in microseconds to write 1887 */ 1888 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) 1889 { 1890 struct ice_hw *hw = &q_vector->vsi->back->hw; 1891 1892 wr32(hw, GLINT_RATE(q_vector->reg_idx), 1893 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); 1894 } 1895 1896 /** 1897 * __ice_write_itr - write throttle rate to register 1898 * @q_vector: pointer to interrupt data structure 1899 * @rc: pointer to ring container 1900 * @itr: throttle rate in microseconds to write 1901 */ 1902 static void __ice_write_itr(struct ice_q_vector *q_vector, 1903 struct ice_ring_container *rc, u16 itr) 1904 { 1905 struct ice_hw *hw = &q_vector->vsi->back->hw; 1906 1907 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 1908 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); 1909 } 1910 1911 /** 1912 * ice_write_itr - write throttle rate to queue specific register 1913 * @rc: pointer to ring container 1914 * @itr: throttle rate in microseconds to write 1915 */ 1916 void ice_write_itr(struct ice_ring_container *rc, u16 itr) 1917 { 1918 struct ice_q_vector *q_vector; 1919 1920 if (!rc->ring) 1921 return; 1922 1923 q_vector = rc->ring->q_vector; 1924 1925 __ice_write_itr(q_vector, rc, itr); 1926 } 1927 1928 /** 1929 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1930 * @vsi: the VSI being configured 1931 * 1932 * This configures MSIX mode interrupts for the PF VSI, and should not be used 1933 * for the VF VSI. 1934 */ 1935 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 1936 { 1937 struct ice_pf *pf = vsi->back; 1938 struct ice_hw *hw = &pf->hw; 1939 u16 txq = 0, rxq = 0; 1940 int i, q; 1941 1942 for (i = 0; i < vsi->num_q_vectors; i++) { 1943 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1944 u16 reg_idx = q_vector->reg_idx; 1945 1946 ice_cfg_itr(hw, q_vector); 1947 1948 /* Both Transmit Queue Interrupt Cause Control register 1949 * and Receive Queue Interrupt Cause control register 1950 * expects MSIX_INDX field to be the vector index 1951 * within the function space and not the absolute 1952 * vector index across PF or across device. 1953 * For SR-IOV VF VSIs queue vector index always starts 1954 * with 1 since first vector index(0) is used for OICR 1955 * in VF space. Since VMDq and other PF VSIs are within 1956 * the PF function space, use the vector index that is 1957 * tracked for this PF. 1958 */ 1959 for (q = 0; q < q_vector->num_ring_tx; q++) { 1960 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 1961 q_vector->tx.itr_idx); 1962 txq++; 1963 } 1964 1965 for (q = 0; q < q_vector->num_ring_rx; q++) { 1966 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 1967 q_vector->rx.itr_idx); 1968 rxq++; 1969 } 1970 } 1971 } 1972 1973 /** 1974 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx 1975 * @vsi: the VSI being changed 1976 */ 1977 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) 1978 { 1979 struct ice_hw *hw = &vsi->back->hw; 1980 struct ice_vsi_ctx *ctxt; 1981 enum ice_status status; 1982 int ret = 0; 1983 1984 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1985 if (!ctxt) 1986 return -ENOMEM; 1987 1988 /* Here we are configuring the VSI to let the driver add VLAN tags by 1989 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag 1990 * insertion happens in the Tx hot path, in ice_tx_map. 1991 */ 1992 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1993 1994 /* Preserve existing VLAN strip setting */ 1995 ctxt->info.vlan_flags |= (vsi->info.vlan_flags & 1996 ICE_AQ_VSI_VLAN_EMOD_M); 1997 1998 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1999 2000 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 2001 if (status) { 2002 dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", 2003 ice_stat_str(status), 2004 ice_aq_str(hw->adminq.sq_last_status)); 2005 ret = -EIO; 2006 goto out; 2007 } 2008 2009 vsi->info.vlan_flags = ctxt->info.vlan_flags; 2010 out: 2011 kfree(ctxt); 2012 return ret; 2013 } 2014 2015 /** 2016 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx 2017 * @vsi: the VSI being changed 2018 * @ena: boolean value indicating if this is a enable or disable request 2019 */ 2020 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) 2021 { 2022 struct ice_hw *hw = &vsi->back->hw; 2023 struct ice_vsi_ctx *ctxt; 2024 enum ice_status status; 2025 int ret = 0; 2026 2027 /* do not allow modifying VLAN stripping when a port VLAN is configured 2028 * on this VSI 2029 */ 2030 if (vsi->info.pvid) 2031 return 0; 2032 2033 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 2034 if (!ctxt) 2035 return -ENOMEM; 2036 2037 /* Here we are configuring what the VSI should do with the VLAN tag in 2038 * the Rx packet. We can either leave the tag in the packet or put it in 2039 * the Rx descriptor. 2040 */ 2041 if (ena) 2042 /* Strip VLAN tag from Rx packet and put it in the desc */ 2043 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 2044 else 2045 /* Disable stripping. Leave tag in packet */ 2046 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 2047 2048 /* Allow all packets untagged/tagged */ 2049 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 2050 2051 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 2052 2053 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 2054 if (status) { 2055 dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", 2056 ena, ice_stat_str(status), 2057 ice_aq_str(hw->adminq.sq_last_status)); 2058 ret = -EIO; 2059 goto out; 2060 } 2061 2062 vsi->info.vlan_flags = ctxt->info.vlan_flags; 2063 out: 2064 kfree(ctxt); 2065 return ret; 2066 } 2067 2068 /** 2069 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings 2070 * @vsi: the VSI whose rings are to be enabled 2071 * 2072 * Returns 0 on success and a negative value on error 2073 */ 2074 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) 2075 { 2076 return ice_vsi_ctrl_all_rx_rings(vsi, true); 2077 } 2078 2079 /** 2080 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings 2081 * @vsi: the VSI whose rings are to be disabled 2082 * 2083 * Returns 0 on success and a negative value on error 2084 */ 2085 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) 2086 { 2087 return ice_vsi_ctrl_all_rx_rings(vsi, false); 2088 } 2089 2090 /** 2091 * ice_vsi_stop_tx_rings - Disable Tx rings 2092 * @vsi: the VSI being configured 2093 * @rst_src: reset source 2094 * @rel_vmvf_num: Relative ID of VF/VM 2095 * @rings: Tx ring array to be stopped 2096 * @count: number of Tx ring array elements 2097 */ 2098 static int 2099 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2100 u16 rel_vmvf_num, struct ice_ring **rings, u16 count) 2101 { 2102 u16 q_idx; 2103 2104 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2105 return -EINVAL; 2106 2107 for (q_idx = 0; q_idx < count; q_idx++) { 2108 struct ice_txq_meta txq_meta = { }; 2109 int status; 2110 2111 if (!rings || !rings[q_idx]) 2112 return -EINVAL; 2113 2114 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); 2115 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, 2116 rings[q_idx], &txq_meta); 2117 2118 if (status) 2119 return status; 2120 } 2121 2122 return 0; 2123 } 2124 2125 /** 2126 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2127 * @vsi: the VSI being configured 2128 * @rst_src: reset source 2129 * @rel_vmvf_num: Relative ID of VF/VM 2130 */ 2131 int 2132 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2133 u16 rel_vmvf_num) 2134 { 2135 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); 2136 } 2137 2138 /** 2139 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings 2140 * @vsi: the VSI being configured 2141 */ 2142 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) 2143 { 2144 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); 2145 } 2146 2147 /** 2148 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 2149 * @vsi: VSI to check whether or not VLAN pruning is enabled. 2150 * 2151 * returns true if Rx VLAN pruning is enabled and false otherwise. 2152 */ 2153 bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 2154 { 2155 if (!vsi) 2156 return false; 2157 2158 return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); 2159 } 2160 2161 /** 2162 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI 2163 * @vsi: VSI to enable or disable VLAN pruning on 2164 * @ena: set to true to enable VLAN pruning and false to disable it 2165 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode 2166 * 2167 * returns 0 if VSI is updated, negative otherwise 2168 */ 2169 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) 2170 { 2171 struct ice_vsi_ctx *ctxt; 2172 struct ice_pf *pf; 2173 int status; 2174 2175 if (!vsi) 2176 return -EINVAL; 2177 2178 /* Don't enable VLAN pruning if the netdev is currently in promiscuous 2179 * mode. VLAN pruning will be enabled when the interface exits 2180 * promiscuous mode if any VLAN filters are active. 2181 */ 2182 if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena) 2183 return 0; 2184 2185 pf = vsi->back; 2186 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 2187 if (!ctxt) 2188 return -ENOMEM; 2189 2190 ctxt->info = vsi->info; 2191 2192 if (ena) 2193 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2194 else 2195 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 2196 2197 if (!vlan_promisc) 2198 ctxt->info.valid_sections = 2199 cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 2200 2201 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); 2202 if (status) { 2203 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n", 2204 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, 2205 ice_stat_str(status), 2206 ice_aq_str(pf->hw.adminq.sq_last_status)); 2207 goto err_out; 2208 } 2209 2210 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 2211 2212 kfree(ctxt); 2213 return 0; 2214 2215 err_out: 2216 kfree(ctxt); 2217 return -EIO; 2218 } 2219 2220 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2221 { 2222 struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; 2223 2224 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); 2225 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); 2226 } 2227 2228 /** 2229 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors 2230 * @vsi: VSI to set the q_vectors register index on 2231 */ 2232 static int 2233 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) 2234 { 2235 u16 i; 2236 2237 if (!vsi || !vsi->q_vectors) 2238 return -EINVAL; 2239 2240 ice_for_each_q_vector(vsi, i) { 2241 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2242 2243 if (!q_vector) { 2244 dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n", 2245 i, vsi->vsi_num); 2246 goto clear_reg_idx; 2247 } 2248 2249 if (vsi->type == ICE_VSI_VF) { 2250 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; 2251 2252 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); 2253 } else { 2254 q_vector->reg_idx = 2255 q_vector->v_idx + vsi->base_vector; 2256 } 2257 } 2258 2259 return 0; 2260 2261 clear_reg_idx: 2262 ice_for_each_q_vector(vsi, i) { 2263 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2264 2265 if (q_vector) 2266 q_vector->reg_idx = 0; 2267 } 2268 2269 return -EINVAL; 2270 } 2271 2272 /** 2273 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2274 * @vsi: the VSI being configured 2275 * @tx: bool to determine Tx or Rx rule 2276 * @create: bool to determine create or remove Rule 2277 */ 2278 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2279 { 2280 enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, 2281 enum ice_sw_fwd_act_type act); 2282 struct ice_pf *pf = vsi->back; 2283 enum ice_status status; 2284 struct device *dev; 2285 2286 dev = ice_pf_to_dev(pf); 2287 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; 2288 2289 if (tx) { 2290 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, 2291 ICE_DROP_PACKET); 2292 } else { 2293 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { 2294 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, 2295 create); 2296 } else { 2297 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, 2298 ICE_FWD_TO_VSI); 2299 } 2300 } 2301 2302 if (status) 2303 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", 2304 create ? "adding" : "removing", tx ? "TX" : "RX", 2305 vsi->vsi_num, ice_stat_str(status)); 2306 } 2307 2308 /** 2309 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it 2310 * @vsi: pointer to the VSI 2311 * 2312 * This function will allocate new scheduler aggregator now if needed and will 2313 * move specified VSI into it. 2314 */ 2315 static void ice_set_agg_vsi(struct ice_vsi *vsi) 2316 { 2317 struct device *dev = ice_pf_to_dev(vsi->back); 2318 struct ice_agg_node *agg_node_iter = NULL; 2319 u32 agg_id = ICE_INVALID_AGG_NODE_ID; 2320 struct ice_agg_node *agg_node = NULL; 2321 int node_offset, max_agg_nodes = 0; 2322 struct ice_port_info *port_info; 2323 struct ice_pf *pf = vsi->back; 2324 u32 agg_node_id_start = 0; 2325 enum ice_status status; 2326 2327 /* create (as needed) scheduler aggregator node and move VSI into 2328 * corresponding aggregator node 2329 * - PF aggregator node to contains VSIs of type _PF and _CTRL 2330 * - VF aggregator nodes will contain VF VSI 2331 */ 2332 port_info = pf->hw.port_info; 2333 if (!port_info) 2334 return; 2335 2336 switch (vsi->type) { 2337 case ICE_VSI_CTRL: 2338 case ICE_VSI_LB: 2339 case ICE_VSI_PF: 2340 case ICE_VSI_SWITCHDEV_CTRL: 2341 max_agg_nodes = ICE_MAX_PF_AGG_NODES; 2342 agg_node_id_start = ICE_PF_AGG_NODE_ID_START; 2343 agg_node_iter = &pf->pf_agg_node[0]; 2344 break; 2345 case ICE_VSI_VF: 2346 /* user can create 'n' VFs on a given PF, but since max children 2347 * per aggregator node can be only 64. Following code handles 2348 * aggregator(s) for VF VSIs, either selects a agg_node which 2349 * was already created provided num_vsis < 64, otherwise 2350 * select next available node, which will be created 2351 */ 2352 max_agg_nodes = ICE_MAX_VF_AGG_NODES; 2353 agg_node_id_start = ICE_VF_AGG_NODE_ID_START; 2354 agg_node_iter = &pf->vf_agg_node[0]; 2355 break; 2356 default: 2357 /* other VSI type, handle later if needed */ 2358 dev_dbg(dev, "unexpected VSI type %s\n", 2359 ice_vsi_type_str(vsi->type)); 2360 return; 2361 } 2362 2363 /* find the appropriate aggregator node */ 2364 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { 2365 /* see if we can find space in previously created 2366 * node if num_vsis < 64, otherwise skip 2367 */ 2368 if (agg_node_iter->num_vsis && 2369 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 2370 agg_node_iter++; 2371 continue; 2372 } 2373 2374 if (agg_node_iter->valid && 2375 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { 2376 agg_id = agg_node_iter->agg_id; 2377 agg_node = agg_node_iter; 2378 break; 2379 } 2380 2381 /* find unclaimed agg_id */ 2382 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { 2383 agg_id = node_offset + agg_node_id_start; 2384 agg_node = agg_node_iter; 2385 break; 2386 } 2387 /* move to next agg_node */ 2388 agg_node_iter++; 2389 } 2390 2391 if (!agg_node) 2392 return; 2393 2394 /* if selected aggregator node was not created, create it */ 2395 if (!agg_node->valid) { 2396 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG, 2397 (u8)vsi->tc_cfg.ena_tc); 2398 if (status) { 2399 dev_err(dev, "unable to create aggregator node with agg_id %u\n", 2400 agg_id); 2401 return; 2402 } 2403 /* aggregator node is created, store the neeeded info */ 2404 agg_node->valid = true; 2405 agg_node->agg_id = agg_id; 2406 } 2407 2408 /* move VSI to corresponding aggregator node */ 2409 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, 2410 (u8)vsi->tc_cfg.ena_tc); 2411 if (status) { 2412 dev_err(dev, "unable to move VSI idx %u into aggregator %u node", 2413 vsi->idx, agg_id); 2414 return; 2415 } 2416 2417 /* keep active children count for aggregator node */ 2418 agg_node->num_vsis++; 2419 2420 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved 2421 * to aggregator node 2422 */ 2423 vsi->agg_node = agg_node; 2424 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n", 2425 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, 2426 vsi->agg_node->num_vsis); 2427 } 2428 2429 /** 2430 * ice_vsi_setup - Set up a VSI by a given type 2431 * @pf: board private structure 2432 * @pi: pointer to the port_info instance 2433 * @vsi_type: VSI type 2434 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be 2435 * used only for ICE_VSI_VF VSI type. For other VSI types, should 2436 * fill-in ICE_INVAL_VFID as input. 2437 * 2438 * This allocates the sw VSI structure and its queue resources. 2439 * 2440 * Returns pointer to the successfully allocated and configured VSI sw struct on 2441 * success, NULL on failure. 2442 */ 2443 struct ice_vsi * 2444 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 2445 enum ice_vsi_type vsi_type, u16 vf_id) 2446 { 2447 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2448 struct device *dev = ice_pf_to_dev(pf); 2449 enum ice_status status; 2450 struct ice_vsi *vsi; 2451 int ret, i; 2452 2453 if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) 2454 vsi = ice_vsi_alloc(pf, vsi_type, vf_id); 2455 else 2456 vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID); 2457 2458 if (!vsi) { 2459 dev_err(dev, "could not allocate VSI\n"); 2460 return NULL; 2461 } 2462 2463 vsi->port_info = pi; 2464 vsi->vsw = pf->first_sw; 2465 if (vsi->type == ICE_VSI_PF) 2466 vsi->ethtype = ETH_P_PAUSE; 2467 2468 if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) 2469 vsi->vf_id = vf_id; 2470 2471 ice_alloc_fd_res(vsi); 2472 2473 if (ice_vsi_get_qs(vsi)) { 2474 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2475 vsi->idx); 2476 goto unroll_vsi_alloc; 2477 } 2478 2479 /* set RSS capabilities */ 2480 ice_vsi_set_rss_params(vsi); 2481 2482 /* set TC configuration */ 2483 ice_vsi_set_tc_cfg(vsi); 2484 2485 /* create the VSI */ 2486 ret = ice_vsi_init(vsi, true); 2487 if (ret) 2488 goto unroll_get_qs; 2489 2490 switch (vsi->type) { 2491 case ICE_VSI_CTRL: 2492 case ICE_VSI_SWITCHDEV_CTRL: 2493 case ICE_VSI_PF: 2494 ret = ice_vsi_alloc_q_vectors(vsi); 2495 if (ret) 2496 goto unroll_vsi_init; 2497 2498 ret = ice_vsi_setup_vector_base(vsi); 2499 if (ret) 2500 goto unroll_alloc_q_vector; 2501 2502 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2503 if (ret) 2504 goto unroll_vector_base; 2505 2506 ret = ice_vsi_alloc_rings(vsi); 2507 if (ret) 2508 goto unroll_vector_base; 2509 2510 /* Always add VLAN ID 0 switch rule by default. This is needed 2511 * in order to allow all untagged and 0 tagged priority traffic 2512 * if Rx VLAN pruning is enabled. Also there are cases where we 2513 * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid() 2514 * so this handles those cases (i.e. adding the PF to a bridge 2515 * without the 8021q module loaded). 2516 */ 2517 ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI); 2518 if (ret) 2519 goto unroll_clear_rings; 2520 2521 ice_vsi_map_rings_to_vectors(vsi); 2522 2523 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2524 if (vsi->type != ICE_VSI_CTRL) 2525 /* Do not exit if configuring RSS had an issue, at 2526 * least receive traffic on first queue. Hence no 2527 * need to capture return value 2528 */ 2529 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2530 ice_vsi_cfg_rss_lut_key(vsi); 2531 ice_vsi_set_rss_flow_fld(vsi); 2532 } 2533 ice_init_arfs(vsi); 2534 break; 2535 case ICE_VSI_VF: 2536 /* VF driver will take care of creating netdev for this type and 2537 * map queues to vectors through Virtchnl, PF driver only 2538 * creates a VSI and corresponding structures for bookkeeping 2539 * purpose 2540 */ 2541 ret = ice_vsi_alloc_q_vectors(vsi); 2542 if (ret) 2543 goto unroll_vsi_init; 2544 2545 ret = ice_vsi_alloc_rings(vsi); 2546 if (ret) 2547 goto unroll_alloc_q_vector; 2548 2549 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2550 if (ret) 2551 goto unroll_vector_base; 2552 2553 /* Do not exit if configuring RSS had an issue, at least 2554 * receive traffic on first queue. Hence no need to capture 2555 * return value 2556 */ 2557 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2558 ice_vsi_cfg_rss_lut_key(vsi); 2559 ice_vsi_set_vf_rss_flow_fld(vsi); 2560 } 2561 break; 2562 case ICE_VSI_LB: 2563 ret = ice_vsi_alloc_rings(vsi); 2564 if (ret) 2565 goto unroll_vsi_init; 2566 break; 2567 default: 2568 /* clean up the resources and exit */ 2569 goto unroll_vsi_init; 2570 } 2571 2572 /* configure VSI nodes based on number of queues and TC's */ 2573 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2574 max_txqs[i] = vsi->alloc_txq; 2575 2576 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2577 max_txqs); 2578 if (status) { 2579 dev_err(dev, "VSI %d failed lan queue config, error %s\n", 2580 vsi->vsi_num, ice_stat_str(status)); 2581 goto unroll_clear_rings; 2582 } 2583 2584 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2585 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2586 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2587 * The rule is added once for PF VSI in order to create appropriate 2588 * recipe, since VSI/VSI list is ignored with drop action... 2589 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to 2590 * be dropped so that VFs cannot send LLDP packets to reconfig DCB 2591 * settings in the HW. 2592 */ 2593 if (!ice_is_safe_mode(pf)) 2594 if (vsi->type == ICE_VSI_PF) { 2595 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2596 ICE_DROP_PACKET); 2597 ice_cfg_sw_lldp(vsi, true, true); 2598 } 2599 2600 if (!vsi->agg_node) 2601 ice_set_agg_vsi(vsi); 2602 return vsi; 2603 2604 unroll_clear_rings: 2605 ice_vsi_clear_rings(vsi); 2606 unroll_vector_base: 2607 /* reclaim SW interrupts back to the common pool */ 2608 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2609 pf->num_avail_sw_msix += vsi->num_q_vectors; 2610 unroll_alloc_q_vector: 2611 ice_vsi_free_q_vectors(vsi); 2612 unroll_vsi_init: 2613 ice_vsi_delete(vsi); 2614 unroll_get_qs: 2615 ice_vsi_put_qs(vsi); 2616 unroll_vsi_alloc: 2617 if (vsi_type == ICE_VSI_VF) 2618 ice_enable_lag(pf->lag); 2619 ice_vsi_clear(vsi); 2620 2621 return NULL; 2622 } 2623 2624 /** 2625 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2626 * @vsi: the VSI being cleaned up 2627 */ 2628 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2629 { 2630 struct ice_pf *pf = vsi->back; 2631 struct ice_hw *hw = &pf->hw; 2632 u32 txq = 0; 2633 u32 rxq = 0; 2634 int i, q; 2635 2636 for (i = 0; i < vsi->num_q_vectors; i++) { 2637 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2638 2639 ice_write_intrl(q_vector, 0); 2640 for (q = 0; q < q_vector->num_ring_tx; q++) { 2641 ice_write_itr(&q_vector->tx, 0); 2642 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2643 if (ice_is_xdp_ena_vsi(vsi)) { 2644 u32 xdp_txq = txq + vsi->num_xdp_txq; 2645 2646 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); 2647 } 2648 txq++; 2649 } 2650 2651 for (q = 0; q < q_vector->num_ring_rx; q++) { 2652 ice_write_itr(&q_vector->rx, 0); 2653 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2654 rxq++; 2655 } 2656 } 2657 2658 ice_flush(hw); 2659 } 2660 2661 /** 2662 * ice_vsi_free_irq - Free the IRQ association with the OS 2663 * @vsi: the VSI being configured 2664 */ 2665 void ice_vsi_free_irq(struct ice_vsi *vsi) 2666 { 2667 struct ice_pf *pf = vsi->back; 2668 int base = vsi->base_vector; 2669 int i; 2670 2671 if (!vsi->q_vectors || !vsi->irqs_ready) 2672 return; 2673 2674 ice_vsi_release_msix(vsi); 2675 if (vsi->type == ICE_VSI_VF) 2676 return; 2677 2678 vsi->irqs_ready = false; 2679 ice_for_each_q_vector(vsi, i) { 2680 u16 vector = i + base; 2681 int irq_num; 2682 2683 irq_num = pf->msix_entries[vector].vector; 2684 2685 /* free only the irqs that were actually requested */ 2686 if (!vsi->q_vectors[i] || 2687 !(vsi->q_vectors[i]->num_ring_tx || 2688 vsi->q_vectors[i]->num_ring_rx)) 2689 continue; 2690 2691 /* clear the affinity notifier in the IRQ descriptor */ 2692 irq_set_affinity_notifier(irq_num, NULL); 2693 2694 /* clear the affinity_mask in the IRQ descriptor */ 2695 irq_set_affinity_hint(irq_num, NULL); 2696 synchronize_irq(irq_num); 2697 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); 2698 } 2699 } 2700 2701 /** 2702 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2703 * @vsi: the VSI having resources freed 2704 */ 2705 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2706 { 2707 int i; 2708 2709 if (!vsi->tx_rings) 2710 return; 2711 2712 ice_for_each_txq(vsi, i) 2713 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2714 ice_free_tx_ring(vsi->tx_rings[i]); 2715 } 2716 2717 /** 2718 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2719 * @vsi: the VSI having resources freed 2720 */ 2721 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2722 { 2723 int i; 2724 2725 if (!vsi->rx_rings) 2726 return; 2727 2728 ice_for_each_rxq(vsi, i) 2729 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2730 ice_free_rx_ring(vsi->rx_rings[i]); 2731 } 2732 2733 /** 2734 * ice_vsi_close - Shut down a VSI 2735 * @vsi: the VSI being shut down 2736 */ 2737 void ice_vsi_close(struct ice_vsi *vsi) 2738 { 2739 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 2740 ice_down(vsi); 2741 2742 ice_vsi_free_irq(vsi); 2743 ice_vsi_free_tx_rings(vsi); 2744 ice_vsi_free_rx_rings(vsi); 2745 } 2746 2747 /** 2748 * ice_ena_vsi - resume a VSI 2749 * @vsi: the VSI being resume 2750 * @locked: is the rtnl_lock already held 2751 */ 2752 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) 2753 { 2754 int err = 0; 2755 2756 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) 2757 return 0; 2758 2759 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2760 2761 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 2762 if (netif_running(vsi->netdev)) { 2763 if (!locked) 2764 rtnl_lock(); 2765 2766 err = ice_open_internal(vsi->netdev); 2767 2768 if (!locked) 2769 rtnl_unlock(); 2770 } 2771 } else if (vsi->type == ICE_VSI_CTRL) { 2772 err = ice_vsi_open_ctrl(vsi); 2773 } 2774 2775 return err; 2776 } 2777 2778 /** 2779 * ice_dis_vsi - pause a VSI 2780 * @vsi: the VSI being paused 2781 * @locked: is the rtnl_lock already held 2782 */ 2783 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 2784 { 2785 if (test_bit(ICE_VSI_DOWN, vsi->state)) 2786 return; 2787 2788 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2789 2790 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 2791 if (netif_running(vsi->netdev)) { 2792 if (!locked) 2793 rtnl_lock(); 2794 2795 ice_vsi_close(vsi); 2796 2797 if (!locked) 2798 rtnl_unlock(); 2799 } else { 2800 ice_vsi_close(vsi); 2801 } 2802 } else if (vsi->type == ICE_VSI_CTRL || 2803 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 2804 ice_vsi_close(vsi); 2805 } 2806 } 2807 2808 /** 2809 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2810 * @vsi: the VSI being un-configured 2811 */ 2812 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2813 { 2814 int base = vsi->base_vector; 2815 struct ice_pf *pf = vsi->back; 2816 struct ice_hw *hw = &pf->hw; 2817 u32 val; 2818 int i; 2819 2820 /* disable interrupt causation from each queue */ 2821 if (vsi->tx_rings) { 2822 ice_for_each_txq(vsi, i) { 2823 if (vsi->tx_rings[i]) { 2824 u16 reg; 2825 2826 reg = vsi->tx_rings[i]->reg_idx; 2827 val = rd32(hw, QINT_TQCTL(reg)); 2828 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2829 wr32(hw, QINT_TQCTL(reg), val); 2830 } 2831 } 2832 } 2833 2834 if (vsi->rx_rings) { 2835 ice_for_each_rxq(vsi, i) { 2836 if (vsi->rx_rings[i]) { 2837 u16 reg; 2838 2839 reg = vsi->rx_rings[i]->reg_idx; 2840 val = rd32(hw, QINT_RQCTL(reg)); 2841 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2842 wr32(hw, QINT_RQCTL(reg), val); 2843 } 2844 } 2845 } 2846 2847 /* disable each interrupt */ 2848 ice_for_each_q_vector(vsi, i) { 2849 if (!vsi->q_vectors[i]) 2850 continue; 2851 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 2852 } 2853 2854 ice_flush(hw); 2855 2856 /* don't call synchronize_irq() for VF's from the host */ 2857 if (vsi->type == ICE_VSI_VF) 2858 return; 2859 2860 ice_for_each_q_vector(vsi, i) 2861 synchronize_irq(pf->msix_entries[i + base].vector); 2862 } 2863 2864 /** 2865 * ice_napi_del - Remove NAPI handler for the VSI 2866 * @vsi: VSI for which NAPI handler is to be removed 2867 */ 2868 void ice_napi_del(struct ice_vsi *vsi) 2869 { 2870 int v_idx; 2871 2872 if (!vsi->netdev) 2873 return; 2874 2875 ice_for_each_q_vector(vsi, v_idx) 2876 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 2877 } 2878 2879 /** 2880 * ice_vsi_release - Delete a VSI and free its resources 2881 * @vsi: the VSI being removed 2882 * 2883 * Returns 0 on success or < 0 on error 2884 */ 2885 int ice_vsi_release(struct ice_vsi *vsi) 2886 { 2887 struct ice_pf *pf; 2888 2889 if (!vsi->back) 2890 return -ENODEV; 2891 pf = vsi->back; 2892 2893 /* do not unregister while driver is in the reset recovery pending 2894 * state. Since reset/rebuild happens through PF service task workqueue, 2895 * it's not a good idea to unregister netdev that is associated to the 2896 * PF that is running the work queue items currently. This is done to 2897 * avoid check_flush_dependency() warning on this wq 2898 */ 2899 if (vsi->netdev && !ice_is_reset_in_progress(pf->state) && 2900 (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) { 2901 unregister_netdev(vsi->netdev); 2902 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 2903 } 2904 2905 if (vsi->type == ICE_VSI_PF) 2906 ice_devlink_destroy_pf_port(pf); 2907 2908 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2909 ice_rss_clean(vsi); 2910 2911 /* Disable VSI and free resources */ 2912 if (vsi->type != ICE_VSI_LB) 2913 ice_vsi_dis_irq(vsi); 2914 ice_vsi_close(vsi); 2915 2916 /* SR-IOV determines needed MSIX resources all at once instead of per 2917 * VSI since when VFs are spawned we know how many VFs there are and how 2918 * many interrupts each VF needs. SR-IOV MSIX resources are also 2919 * cleared in the same manner. 2920 */ 2921 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { 2922 int i; 2923 2924 ice_for_each_vf(pf, i) { 2925 struct ice_vf *vf = &pf->vf[i]; 2926 2927 if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) 2928 break; 2929 } 2930 if (i == pf->num_alloc_vfs) { 2931 /* No other VFs left that have control VSI, reclaim SW 2932 * interrupts back to the common pool 2933 */ 2934 ice_free_res(pf->irq_tracker, vsi->base_vector, 2935 ICE_RES_VF_CTRL_VEC_ID); 2936 pf->num_avail_sw_msix += vsi->num_q_vectors; 2937 } 2938 } else if (vsi->type != ICE_VSI_VF) { 2939 /* reclaim SW interrupts back to the common pool */ 2940 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2941 pf->num_avail_sw_msix += vsi->num_q_vectors; 2942 } 2943 2944 if (!ice_is_safe_mode(pf)) { 2945 if (vsi->type == ICE_VSI_PF) { 2946 ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2947 ICE_DROP_PACKET); 2948 ice_cfg_sw_lldp(vsi, true, false); 2949 /* The Rx rule will only exist to remove if the LLDP FW 2950 * engine is currently stopped 2951 */ 2952 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 2953 ice_cfg_sw_lldp(vsi, false, false); 2954 } 2955 } 2956 2957 ice_fltr_remove_all(vsi); 2958 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2959 ice_vsi_delete(vsi); 2960 ice_vsi_free_q_vectors(vsi); 2961 2962 if (vsi->netdev) { 2963 if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { 2964 unregister_netdev(vsi->netdev); 2965 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 2966 } 2967 if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) { 2968 free_netdev(vsi->netdev); 2969 vsi->netdev = NULL; 2970 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 2971 } 2972 } 2973 2974 if (vsi->type == ICE_VSI_VF && 2975 vsi->agg_node && vsi->agg_node->valid) 2976 vsi->agg_node->num_vsis--; 2977 ice_vsi_clear_rings(vsi); 2978 2979 ice_vsi_put_qs(vsi); 2980 2981 /* retain SW VSI data structure since it is needed to unregister and 2982 * free VSI netdev when PF is not in reset recovery pending state,\ 2983 * for ex: during rmmod. 2984 */ 2985 if (!ice_is_reset_in_progress(pf->state)) 2986 ice_vsi_clear(vsi); 2987 2988 return 0; 2989 } 2990 2991 /** 2992 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors 2993 * @vsi: VSI connected with q_vectors 2994 * @coalesce: array of struct with stored coalesce 2995 * 2996 * Returns array size. 2997 */ 2998 static int 2999 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, 3000 struct ice_coalesce_stored *coalesce) 3001 { 3002 int i; 3003 3004 ice_for_each_q_vector(vsi, i) { 3005 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 3006 3007 coalesce[i].itr_tx = q_vector->tx.itr_setting; 3008 coalesce[i].itr_rx = q_vector->rx.itr_setting; 3009 coalesce[i].intrl = q_vector->intrl; 3010 3011 if (i < vsi->num_txq) 3012 coalesce[i].tx_valid = true; 3013 if (i < vsi->num_rxq) 3014 coalesce[i].rx_valid = true; 3015 } 3016 3017 return vsi->num_q_vectors; 3018 } 3019 3020 /** 3021 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays 3022 * @vsi: VSI connected with q_vectors 3023 * @coalesce: pointer to array of struct with stored coalesce 3024 * @size: size of coalesce array 3025 * 3026 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save 3027 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce 3028 * to default value. 3029 */ 3030 static void 3031 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, 3032 struct ice_coalesce_stored *coalesce, int size) 3033 { 3034 struct ice_ring_container *rc; 3035 int i; 3036 3037 if ((size && !coalesce) || !vsi) 3038 return; 3039 3040 /* There are a couple of cases that have to be handled here: 3041 * 1. The case where the number of queue vectors stays the same, but 3042 * the number of Tx or Rx rings changes (the first for loop) 3043 * 2. The case where the number of queue vectors increased (the 3044 * second for loop) 3045 */ 3046 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { 3047 /* There are 2 cases to handle here and they are the same for 3048 * both Tx and Rx: 3049 * if the entry was valid previously (coalesce[i].[tr]x_valid 3050 * and the loop variable is less than the number of rings 3051 * allocated, then write the previous values 3052 * 3053 * if the entry was not valid previously, but the number of 3054 * rings is less than are allocated (this means the number of 3055 * rings increased from previously), then write out the 3056 * values in the first element 3057 * 3058 * Also, always write the ITR, even if in ITR_IS_DYNAMIC 3059 * as there is no harm because the dynamic algorithm 3060 * will just overwrite. 3061 */ 3062 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { 3063 rc = &vsi->q_vectors[i]->rx; 3064 rc->itr_setting = coalesce[i].itr_rx; 3065 ice_write_itr(rc, rc->itr_setting); 3066 } else if (i < vsi->alloc_rxq) { 3067 rc = &vsi->q_vectors[i]->rx; 3068 rc->itr_setting = coalesce[0].itr_rx; 3069 ice_write_itr(rc, rc->itr_setting); 3070 } 3071 3072 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { 3073 rc = &vsi->q_vectors[i]->tx; 3074 rc->itr_setting = coalesce[i].itr_tx; 3075 ice_write_itr(rc, rc->itr_setting); 3076 } else if (i < vsi->alloc_txq) { 3077 rc = &vsi->q_vectors[i]->tx; 3078 rc->itr_setting = coalesce[0].itr_tx; 3079 ice_write_itr(rc, rc->itr_setting); 3080 } 3081 3082 vsi->q_vectors[i]->intrl = coalesce[i].intrl; 3083 ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl); 3084 } 3085 3086 /* the number of queue vectors increased so write whatever is in 3087 * the first element 3088 */ 3089 for (; i < vsi->num_q_vectors; i++) { 3090 /* transmit */ 3091 rc = &vsi->q_vectors[i]->tx; 3092 rc->itr_setting = coalesce[0].itr_tx; 3093 ice_write_itr(rc, rc->itr_setting); 3094 3095 /* receive */ 3096 rc = &vsi->q_vectors[i]->rx; 3097 rc->itr_setting = coalesce[0].itr_rx; 3098 ice_write_itr(rc, rc->itr_setting); 3099 3100 vsi->q_vectors[i]->intrl = coalesce[0].intrl; 3101 ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl); 3102 } 3103 } 3104 3105 /** 3106 * ice_vsi_rebuild - Rebuild VSI after reset 3107 * @vsi: VSI to be rebuild 3108 * @init_vsi: is this an initialization or a reconfigure of the VSI 3109 * 3110 * Returns 0 on success and negative value on failure 3111 */ 3112 int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) 3113 { 3114 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3115 struct ice_coalesce_stored *coalesce; 3116 int prev_num_q_vectors = 0; 3117 struct ice_vf *vf = NULL; 3118 enum ice_vsi_type vtype; 3119 enum ice_status status; 3120 struct ice_pf *pf; 3121 int ret, i; 3122 3123 if (!vsi) 3124 return -EINVAL; 3125 3126 pf = vsi->back; 3127 vtype = vsi->type; 3128 if (vtype == ICE_VSI_VF) 3129 vf = &pf->vf[vsi->vf_id]; 3130 3131 coalesce = kcalloc(vsi->num_q_vectors, 3132 sizeof(struct ice_coalesce_stored), GFP_KERNEL); 3133 if (!coalesce) 3134 return -ENOMEM; 3135 3136 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); 3137 3138 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 3139 ice_vsi_free_q_vectors(vsi); 3140 3141 /* SR-IOV determines needed MSIX resources all at once instead of per 3142 * VSI since when VFs are spawned we know how many VFs there are and how 3143 * many interrupts each VF needs. SR-IOV MSIX resources are also 3144 * cleared in the same manner. 3145 */ 3146 if (vtype != ICE_VSI_VF) { 3147 /* reclaim SW interrupts back to the common pool */ 3148 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 3149 pf->num_avail_sw_msix += vsi->num_q_vectors; 3150 vsi->base_vector = 0; 3151 } 3152 3153 if (ice_is_xdp_ena_vsi(vsi)) 3154 /* return value check can be skipped here, it always returns 3155 * 0 if reset is in progress 3156 */ 3157 ice_destroy_xdp_rings(vsi); 3158 ice_vsi_put_qs(vsi); 3159 ice_vsi_clear_rings(vsi); 3160 ice_vsi_free_arrays(vsi); 3161 if (vtype == ICE_VSI_VF) 3162 ice_vsi_set_num_qs(vsi, vf->vf_id); 3163 else 3164 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); 3165 3166 ret = ice_vsi_alloc_arrays(vsi); 3167 if (ret < 0) 3168 goto err_vsi; 3169 3170 ice_vsi_get_qs(vsi); 3171 3172 ice_alloc_fd_res(vsi); 3173 ice_vsi_set_tc_cfg(vsi); 3174 3175 /* Initialize VSI struct elements and create VSI in FW */ 3176 ret = ice_vsi_init(vsi, init_vsi); 3177 if (ret < 0) 3178 goto err_vsi; 3179 3180 switch (vtype) { 3181 case ICE_VSI_CTRL: 3182 case ICE_VSI_SWITCHDEV_CTRL: 3183 case ICE_VSI_PF: 3184 ret = ice_vsi_alloc_q_vectors(vsi); 3185 if (ret) 3186 goto err_rings; 3187 3188 ret = ice_vsi_setup_vector_base(vsi); 3189 if (ret) 3190 goto err_vectors; 3191 3192 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 3193 if (ret) 3194 goto err_vectors; 3195 3196 ret = ice_vsi_alloc_rings(vsi); 3197 if (ret) 3198 goto err_vectors; 3199 3200 ice_vsi_map_rings_to_vectors(vsi); 3201 if (ice_is_xdp_ena_vsi(vsi)) { 3202 vsi->num_xdp_txq = vsi->alloc_rxq; 3203 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); 3204 if (ret) 3205 goto err_vectors; 3206 } 3207 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 3208 if (vtype != ICE_VSI_CTRL) 3209 /* Do not exit if configuring RSS had an issue, at 3210 * least receive traffic on first queue. Hence no 3211 * need to capture return value 3212 */ 3213 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 3214 ice_vsi_cfg_rss_lut_key(vsi); 3215 break; 3216 case ICE_VSI_VF: 3217 ret = ice_vsi_alloc_q_vectors(vsi); 3218 if (ret) 3219 goto err_rings; 3220 3221 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 3222 if (ret) 3223 goto err_vectors; 3224 3225 ret = ice_vsi_alloc_rings(vsi); 3226 if (ret) 3227 goto err_vectors; 3228 3229 break; 3230 default: 3231 break; 3232 } 3233 3234 /* configure VSI nodes based on number of queues and TC's */ 3235 for (i = 0; i < vsi->tc_cfg.numtc; i++) { 3236 max_txqs[i] = vsi->alloc_txq; 3237 3238 if (ice_is_xdp_ena_vsi(vsi)) 3239 max_txqs[i] += vsi->num_xdp_txq; 3240 } 3241 3242 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3243 max_txqs); 3244 if (status) { 3245 dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", 3246 vsi->vsi_num, ice_stat_str(status)); 3247 if (init_vsi) { 3248 ret = -EIO; 3249 goto err_vectors; 3250 } else { 3251 return ice_schedule_reset(pf, ICE_RESET_PFR); 3252 } 3253 } 3254 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); 3255 kfree(coalesce); 3256 3257 return 0; 3258 3259 err_vectors: 3260 ice_vsi_free_q_vectors(vsi); 3261 err_rings: 3262 if (vsi->netdev) { 3263 vsi->current_netdev_flags = 0; 3264 unregister_netdev(vsi->netdev); 3265 free_netdev(vsi->netdev); 3266 vsi->netdev = NULL; 3267 } 3268 err_vsi: 3269 ice_vsi_clear(vsi); 3270 set_bit(ICE_RESET_FAILED, pf->state); 3271 kfree(coalesce); 3272 return ret; 3273 } 3274 3275 /** 3276 * ice_is_reset_in_progress - check for a reset in progress 3277 * @state: PF state field 3278 */ 3279 bool ice_is_reset_in_progress(unsigned long *state) 3280 { 3281 return test_bit(ICE_RESET_OICR_RECV, state) || 3282 test_bit(ICE_PFR_REQ, state) || 3283 test_bit(ICE_CORER_REQ, state) || 3284 test_bit(ICE_GLOBR_REQ, state); 3285 } 3286 3287 /** 3288 * ice_wait_for_reset - Wait for driver to finish reset and rebuild 3289 * @pf: pointer to the PF structure 3290 * @timeout: length of time to wait, in jiffies 3291 * 3292 * Wait (sleep) for a short time until the driver finishes cleaning up from 3293 * a device reset. The caller must be able to sleep. Use this to delay 3294 * operations that could fail while the driver is cleaning up after a device 3295 * reset. 3296 * 3297 * Returns 0 on success, -EBUSY if the reset is not finished within the 3298 * timeout, and -ERESTARTSYS if the thread was interrupted. 3299 */ 3300 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) 3301 { 3302 long ret; 3303 3304 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, 3305 !ice_is_reset_in_progress(pf->state), 3306 timeout); 3307 if (ret < 0) 3308 return ret; 3309 else if (!ret) 3310 return -EBUSY; 3311 else 3312 return 0; 3313 } 3314 3315 #ifdef CONFIG_DCB 3316 /** 3317 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3318 * @vsi: VSI being configured 3319 * @ctx: the context buffer returned from AQ VSI update command 3320 */ 3321 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3322 { 3323 vsi->info.mapping_flags = ctx->info.mapping_flags; 3324 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3325 sizeof(vsi->info.q_mapping)); 3326 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3327 sizeof(vsi->info.tc_mapping)); 3328 } 3329 3330 /** 3331 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3332 * @vsi: VSI to be configured 3333 * @ena_tc: TC bitmap 3334 * 3335 * VSI queues expected to be quiesced before calling this function 3336 */ 3337 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3338 { 3339 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3340 struct ice_pf *pf = vsi->back; 3341 struct ice_vsi_ctx *ctx; 3342 enum ice_status status; 3343 struct device *dev; 3344 int i, ret = 0; 3345 u8 num_tc = 0; 3346 3347 dev = ice_pf_to_dev(pf); 3348 3349 ice_for_each_traffic_class(i) { 3350 /* build bitmap of enabled TCs */ 3351 if (ena_tc & BIT(i)) 3352 num_tc++; 3353 /* populate max_txqs per TC */ 3354 max_txqs[i] = vsi->alloc_txq; 3355 } 3356 3357 vsi->tc_cfg.ena_tc = ena_tc; 3358 vsi->tc_cfg.numtc = num_tc; 3359 3360 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3361 if (!ctx) 3362 return -ENOMEM; 3363 3364 ctx->vf_num = 0; 3365 ctx->info = vsi->info; 3366 3367 ice_vsi_setup_q_map(vsi, ctx); 3368 3369 /* must to indicate which section of VSI context are being modified */ 3370 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3371 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3372 if (status) { 3373 dev_info(dev, "Failed VSI Update\n"); 3374 ret = -EIO; 3375 goto out; 3376 } 3377 3378 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 3379 max_txqs); 3380 3381 if (status) { 3382 dev_err(dev, "VSI %d failed TC config, error %s\n", 3383 vsi->vsi_num, ice_stat_str(status)); 3384 ret = -EIO; 3385 goto out; 3386 } 3387 ice_vsi_update_q_map(vsi, ctx); 3388 vsi->info.valid_sections = 0; 3389 3390 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3391 out: 3392 kfree(ctx); 3393 return ret; 3394 } 3395 #endif /* CONFIG_DCB */ 3396 3397 /** 3398 * ice_update_ring_stats - Update ring statistics 3399 * @ring: ring to update 3400 * @pkts: number of processed packets 3401 * @bytes: number of processed bytes 3402 * 3403 * This function assumes that caller has acquired a u64_stats_sync lock. 3404 */ 3405 static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) 3406 { 3407 ring->stats.bytes += bytes; 3408 ring->stats.pkts += pkts; 3409 } 3410 3411 /** 3412 * ice_update_tx_ring_stats - Update Tx ring specific counters 3413 * @tx_ring: ring to update 3414 * @pkts: number of processed packets 3415 * @bytes: number of processed bytes 3416 */ 3417 void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) 3418 { 3419 u64_stats_update_begin(&tx_ring->syncp); 3420 ice_update_ring_stats(tx_ring, pkts, bytes); 3421 u64_stats_update_end(&tx_ring->syncp); 3422 } 3423 3424 /** 3425 * ice_update_rx_ring_stats - Update Rx ring specific counters 3426 * @rx_ring: ring to update 3427 * @pkts: number of processed packets 3428 * @bytes: number of processed bytes 3429 */ 3430 void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) 3431 { 3432 u64_stats_update_begin(&rx_ring->syncp); 3433 ice_update_ring_stats(rx_ring, pkts, bytes); 3434 u64_stats_update_end(&rx_ring->syncp); 3435 } 3436 3437 /** 3438 * ice_status_to_errno - convert from enum ice_status to Linux errno 3439 * @err: ice_status value to convert 3440 */ 3441 int ice_status_to_errno(enum ice_status err) 3442 { 3443 switch (err) { 3444 case ICE_SUCCESS: 3445 return 0; 3446 case ICE_ERR_DOES_NOT_EXIST: 3447 return -ENOENT; 3448 case ICE_ERR_OUT_OF_RANGE: 3449 case ICE_ERR_AQ_ERROR: 3450 case ICE_ERR_AQ_TIMEOUT: 3451 case ICE_ERR_AQ_EMPTY: 3452 case ICE_ERR_AQ_FW_CRITICAL: 3453 return -EIO; 3454 case ICE_ERR_PARAM: 3455 case ICE_ERR_INVAL_SIZE: 3456 return -EINVAL; 3457 case ICE_ERR_NO_MEMORY: 3458 return -ENOMEM; 3459 case ICE_ERR_MAX_LIMIT: 3460 return -EAGAIN; 3461 case ICE_ERR_RESET_ONGOING: 3462 return -EBUSY; 3463 case ICE_ERR_AQ_FULL: 3464 return -ENOSPC; 3465 default: 3466 return -EINVAL; 3467 } 3468 } 3469 3470 /** 3471 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used 3472 * @sw: switch to check if its default forwarding VSI is free 3473 * 3474 * Return true if the default forwarding VSI is already being used, else returns 3475 * false signalling that it's available to use. 3476 */ 3477 bool ice_is_dflt_vsi_in_use(struct ice_sw *sw) 3478 { 3479 return (sw->dflt_vsi && sw->dflt_vsi_ena); 3480 } 3481 3482 /** 3483 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI 3484 * @sw: switch for the default forwarding VSI to compare against 3485 * @vsi: VSI to compare against default forwarding VSI 3486 * 3487 * If this VSI passed in is the default forwarding VSI then return true, else 3488 * return false 3489 */ 3490 bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) 3491 { 3492 return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena); 3493 } 3494 3495 /** 3496 * ice_set_dflt_vsi - set the default forwarding VSI 3497 * @sw: switch used to assign the default forwarding VSI 3498 * @vsi: VSI getting set as the default forwarding VSI on the switch 3499 * 3500 * If the VSI passed in is already the default VSI and it's enabled just return 3501 * success. 3502 * 3503 * If there is already a default VSI on the switch and it's enabled then return 3504 * -EEXIST since there can only be one default VSI per switch. 3505 * 3506 * Otherwise try to set the VSI passed in as the switch's default VSI and 3507 * return the result. 3508 */ 3509 int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) 3510 { 3511 enum ice_status status; 3512 struct device *dev; 3513 3514 if (!sw || !vsi) 3515 return -EINVAL; 3516 3517 dev = ice_pf_to_dev(vsi->back); 3518 3519 /* the VSI passed in is already the default VSI */ 3520 if (ice_is_vsi_dflt_vsi(sw, vsi)) { 3521 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", 3522 vsi->vsi_num); 3523 return 0; 3524 } 3525 3526 /* another VSI is already the default VSI for this switch */ 3527 if (ice_is_dflt_vsi_in_use(sw)) { 3528 dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n", 3529 sw->dflt_vsi->vsi_num); 3530 return -EEXIST; 3531 } 3532 3533 status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); 3534 if (status) { 3535 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n", 3536 vsi->vsi_num, ice_stat_str(status)); 3537 return -EIO; 3538 } 3539 3540 sw->dflt_vsi = vsi; 3541 sw->dflt_vsi_ena = true; 3542 3543 return 0; 3544 } 3545 3546 /** 3547 * ice_clear_dflt_vsi - clear the default forwarding VSI 3548 * @sw: switch used to clear the default VSI 3549 * 3550 * If the switch has no default VSI or it's not enabled then return error. 3551 * 3552 * Otherwise try to clear the default VSI and return the result. 3553 */ 3554 int ice_clear_dflt_vsi(struct ice_sw *sw) 3555 { 3556 struct ice_vsi *dflt_vsi; 3557 enum ice_status status; 3558 struct device *dev; 3559 3560 if (!sw) 3561 return -EINVAL; 3562 3563 dev = ice_pf_to_dev(sw->pf); 3564 3565 dflt_vsi = sw->dflt_vsi; 3566 3567 /* there is no default VSI configured */ 3568 if (!ice_is_dflt_vsi_in_use(sw)) 3569 return -ENODEV; 3570 3571 status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, 3572 ICE_FLTR_RX); 3573 if (status) { 3574 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n", 3575 dflt_vsi->vsi_num, ice_stat_str(status)); 3576 return -EIO; 3577 } 3578 3579 sw->dflt_vsi = NULL; 3580 sw->dflt_vsi_ena = false; 3581 3582 return 0; 3583 } 3584 3585 /** 3586 * ice_set_link - turn on/off physical link 3587 * @vsi: VSI to modify physical link on 3588 * @ena: turn on/off physical link 3589 */ 3590 int ice_set_link(struct ice_vsi *vsi, bool ena) 3591 { 3592 struct device *dev = ice_pf_to_dev(vsi->back); 3593 struct ice_port_info *pi = vsi->port_info; 3594 struct ice_hw *hw = pi->hw; 3595 enum ice_status status; 3596 3597 if (vsi->type != ICE_VSI_PF) 3598 return -EINVAL; 3599 3600 status = ice_aq_set_link_restart_an(pi, ena, NULL); 3601 3602 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE. 3603 * this is not a fatal error, so print a warning message and return 3604 * a success code. Return an error if FW returns an error code other 3605 * than ICE_AQ_RC_EMODE 3606 */ 3607 if (status == ICE_ERR_AQ_ERROR) { 3608 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3609 dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n", 3610 (ena ? "ON" : "OFF"), ice_stat_str(status), 3611 ice_aq_str(hw->adminq.sq_last_status)); 3612 } else if (status) { 3613 dev_err(dev, "can't set link to %s, err %s aq_err %s\n", 3614 (ena ? "ON" : "OFF"), ice_stat_str(status), 3615 ice_aq_str(hw->adminq.sq_last_status)); 3616 return -EIO; 3617 } 3618 3619 return 0; 3620 } 3621 3622 /** 3623 * ice_is_feature_supported 3624 * @pf: pointer to the struct ice_pf instance 3625 * @f: feature enum to be checked 3626 * 3627 * returns true if feature is supported, false otherwise 3628 */ 3629 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) 3630 { 3631 if (f < 0 || f >= ICE_F_MAX) 3632 return false; 3633 3634 return test_bit(f, pf->features); 3635 } 3636 3637 /** 3638 * ice_set_feature_support 3639 * @pf: pointer to the struct ice_pf instance 3640 * @f: feature enum to set 3641 */ 3642 static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) 3643 { 3644 if (f < 0 || f >= ICE_F_MAX) 3645 return; 3646 3647 set_bit(f, pf->features); 3648 } 3649 3650 /** 3651 * ice_clear_feature_support 3652 * @pf: pointer to the struct ice_pf instance 3653 * @f: feature enum to clear 3654 */ 3655 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) 3656 { 3657 if (f < 0 || f >= ICE_F_MAX) 3658 return; 3659 3660 clear_bit(f, pf->features); 3661 } 3662 3663 /** 3664 * ice_init_feature_support 3665 * @pf: pointer to the struct ice_pf instance 3666 * 3667 * called during init to setup supported feature 3668 */ 3669 void ice_init_feature_support(struct ice_pf *pf) 3670 { 3671 switch (pf->hw.device_id) { 3672 case ICE_DEV_ID_E810C_BACKPLANE: 3673 case ICE_DEV_ID_E810C_QSFP: 3674 case ICE_DEV_ID_E810C_SFP: 3675 ice_set_feature_support(pf, ICE_F_DSCP); 3676 if (ice_is_e810t(&pf->hw)) 3677 ice_set_feature_support(pf, ICE_F_SMA_CTRL); 3678 break; 3679 default: 3680 break; 3681 } 3682 } 3683 3684 /** 3685 * ice_vsi_update_security - update security block in VSI 3686 * @vsi: pointer to VSI structure 3687 * @fill: function pointer to fill ctx 3688 */ 3689 int 3690 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) 3691 { 3692 struct ice_vsi_ctx ctx = { 0 }; 3693 3694 ctx.info = vsi->info; 3695 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 3696 fill(&ctx); 3697 3698 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 3699 return -ENODEV; 3700 3701 vsi->info = ctx.info; 3702 return 0; 3703 } 3704 3705 /** 3706 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx 3707 * @ctx: pointer to VSI ctx structure 3708 */ 3709 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) 3710 { 3711 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 3712 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 3713 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 3714 } 3715 3716 /** 3717 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx 3718 * @ctx: pointer to VSI ctx structure 3719 */ 3720 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) 3721 { 3722 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & 3723 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 3724 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 3725 } 3726 3727 /** 3728 * ice_vsi_ctx_set_allow_override - allow destination override on VSI 3729 * @ctx: pointer to VSI ctx structure 3730 */ 3731 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) 3732 { 3733 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 3734 } 3735 3736 /** 3737 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI 3738 * @ctx: pointer to VSI ctx structure 3739 */ 3740 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) 3741 { 3742 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 3743 } 3744