1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_flow.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_devlink.h" 11 #include "ice_vsi_vlan_ops.h" 12 13 /** 14 * ice_vsi_type_str - maps VSI type enum to string equivalents 15 * @vsi_type: VSI type enum 16 */ 17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) 18 { 19 switch (vsi_type) { 20 case ICE_VSI_PF: 21 return "ICE_VSI_PF"; 22 case ICE_VSI_VF: 23 return "ICE_VSI_VF"; 24 case ICE_VSI_CTRL: 25 return "ICE_VSI_CTRL"; 26 case ICE_VSI_CHNL: 27 return "ICE_VSI_CHNL"; 28 case ICE_VSI_LB: 29 return "ICE_VSI_LB"; 30 case ICE_VSI_SWITCHDEV_CTRL: 31 return "ICE_VSI_SWITCHDEV_CTRL"; 32 default: 33 return "unknown"; 34 } 35 } 36 37 /** 38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings 39 * @vsi: the VSI being configured 40 * @ena: start or stop the Rx rings 41 * 42 * First enable/disable all of the Rx rings, flush any remaining writes, and 43 * then verify that they have all been enabled/disabled successfully. This will 44 * let all of the register writes complete when enabling/disabling the Rx rings 45 * before waiting for the change in hardware to complete. 46 */ 47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) 48 { 49 int ret = 0; 50 u16 i; 51 52 ice_for_each_rxq(vsi, i) 53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); 54 55 ice_flush(&vsi->back->hw); 56 57 ice_for_each_rxq(vsi, i) { 58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); 59 if (ret) 60 break; 61 } 62 63 return ret; 64 } 65 66 /** 67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 68 * @vsi: VSI pointer 69 * 70 * On error: returns error code (negative) 71 * On success: returns 0 72 */ 73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 74 { 75 struct ice_pf *pf = vsi->back; 76 struct device *dev; 77 78 dev = ice_pf_to_dev(pf); 79 if (vsi->type == ICE_VSI_CHNL) 80 return 0; 81 82 /* allocate memory for both Tx and Rx ring pointers */ 83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, 84 sizeof(*vsi->tx_rings), GFP_KERNEL); 85 if (!vsi->tx_rings) 86 return -ENOMEM; 87 88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, 89 sizeof(*vsi->rx_rings), GFP_KERNEL); 90 if (!vsi->rx_rings) 91 goto err_rings; 92 93 /* txq_map needs to have enough space to track both Tx (stack) rings 94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, 95 * so use num_possible_cpus() as we want to always provide XDP ring 96 * per CPU, regardless of queue count settings from user that might 97 * have come from ethtool's set_channels() callback; 98 */ 99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), 100 sizeof(*vsi->txq_map), GFP_KERNEL); 101 102 if (!vsi->txq_map) 103 goto err_txq_map; 104 105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, 106 sizeof(*vsi->rxq_map), GFP_KERNEL); 107 if (!vsi->rxq_map) 108 goto err_rxq_map; 109 110 /* There is no need to allocate q_vectors for a loopback VSI. */ 111 if (vsi->type == ICE_VSI_LB) 112 return 0; 113 114 /* allocate memory for q_vector pointers */ 115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, 116 sizeof(*vsi->q_vectors), GFP_KERNEL); 117 if (!vsi->q_vectors) 118 goto err_vectors; 119 120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); 121 if (!vsi->af_xdp_zc_qps) 122 goto err_zc_qps; 123 124 return 0; 125 126 err_zc_qps: 127 devm_kfree(dev, vsi->q_vectors); 128 err_vectors: 129 devm_kfree(dev, vsi->rxq_map); 130 err_rxq_map: 131 devm_kfree(dev, vsi->txq_map); 132 err_txq_map: 133 devm_kfree(dev, vsi->rx_rings); 134 err_rings: 135 devm_kfree(dev, vsi->tx_rings); 136 return -ENOMEM; 137 } 138 139 /** 140 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 141 * @vsi: the VSI being configured 142 */ 143 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 144 { 145 switch (vsi->type) { 146 case ICE_VSI_PF: 147 case ICE_VSI_SWITCHDEV_CTRL: 148 case ICE_VSI_CTRL: 149 case ICE_VSI_LB: 150 /* a user could change the values of num_[tr]x_desc using 151 * ethtool -G so we should keep those values instead of 152 * overwriting them with the defaults. 153 */ 154 if (!vsi->num_rx_desc) 155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 156 if (!vsi->num_tx_desc) 157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 158 break; 159 default: 160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", 161 vsi->type); 162 break; 163 } 164 } 165 166 /** 167 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 168 * @vsi: the VSI being configured 169 * 170 * Return 0 on success and a negative value on error 171 */ 172 static void ice_vsi_set_num_qs(struct ice_vsi *vsi) 173 { 174 enum ice_vsi_type vsi_type = vsi->type; 175 struct ice_pf *pf = vsi->back; 176 struct ice_vf *vf = vsi->vf; 177 178 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) 179 return; 180 181 switch (vsi_type) { 182 case ICE_VSI_PF: 183 if (vsi->req_txq) { 184 vsi->alloc_txq = vsi->req_txq; 185 vsi->num_txq = vsi->req_txq; 186 } else { 187 vsi->alloc_txq = min3(pf->num_lan_msix, 188 ice_get_avail_txq_count(pf), 189 (u16)num_online_cpus()); 190 } 191 192 pf->num_lan_tx = vsi->alloc_txq; 193 194 /* only 1 Rx queue unless RSS is enabled */ 195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 196 vsi->alloc_rxq = 1; 197 } else { 198 if (vsi->req_rxq) { 199 vsi->alloc_rxq = vsi->req_rxq; 200 vsi->num_rxq = vsi->req_rxq; 201 } else { 202 vsi->alloc_rxq = min3(pf->num_lan_msix, 203 ice_get_avail_rxq_count(pf), 204 (u16)num_online_cpus()); 205 } 206 } 207 208 pf->num_lan_rx = vsi->alloc_rxq; 209 210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, 211 max_t(int, vsi->alloc_rxq, 212 vsi->alloc_txq)); 213 break; 214 case ICE_VSI_SWITCHDEV_CTRL: 215 /* The number of queues for ctrl VSI is equal to number of VFs. 216 * Each ring is associated to the corresponding VF_PR netdev. 217 */ 218 vsi->alloc_txq = ice_get_num_vfs(pf); 219 vsi->alloc_rxq = vsi->alloc_txq; 220 vsi->num_q_vectors = 1; 221 break; 222 case ICE_VSI_VF: 223 if (vf->num_req_qs) 224 vf->num_vf_qs = vf->num_req_qs; 225 vsi->alloc_txq = vf->num_vf_qs; 226 vsi->alloc_rxq = vf->num_vf_qs; 227 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + 228 * data queue interrupts). Since vsi->num_q_vectors is number 229 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 230 * original vector count 231 */ 232 vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; 233 break; 234 case ICE_VSI_CTRL: 235 vsi->alloc_txq = 1; 236 vsi->alloc_rxq = 1; 237 vsi->num_q_vectors = 1; 238 break; 239 case ICE_VSI_CHNL: 240 vsi->alloc_txq = 0; 241 vsi->alloc_rxq = 0; 242 break; 243 case ICE_VSI_LB: 244 vsi->alloc_txq = 1; 245 vsi->alloc_rxq = 1; 246 break; 247 default: 248 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); 249 break; 250 } 251 252 ice_vsi_set_num_desc(vsi); 253 } 254 255 /** 256 * ice_get_free_slot - get the next non-NULL location index in array 257 * @array: array to search 258 * @size: size of the array 259 * @curr: last known occupied index to be used as a search hint 260 * 261 * void * is being used to keep the functionality generic. This lets us use this 262 * function on any array of pointers. 263 */ 264 static int ice_get_free_slot(void *array, int size, int curr) 265 { 266 int **tmp_array = (int **)array; 267 int next; 268 269 if (curr < (size - 1) && !tmp_array[curr + 1]) { 270 next = curr + 1; 271 } else { 272 int i = 0; 273 274 while ((i < size) && (tmp_array[i])) 275 i++; 276 if (i == size) 277 next = ICE_NO_VSI; 278 else 279 next = i; 280 } 281 return next; 282 } 283 284 /** 285 * ice_vsi_delete_from_hw - delete a VSI from the switch 286 * @vsi: pointer to VSI being removed 287 */ 288 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) 289 { 290 struct ice_pf *pf = vsi->back; 291 struct ice_vsi_ctx *ctxt; 292 int status; 293 294 ice_fltr_remove_all(vsi); 295 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 296 if (!ctxt) 297 return; 298 299 if (vsi->type == ICE_VSI_VF) 300 ctxt->vf_num = vsi->vf->vf_id; 301 ctxt->vsi_num = vsi->vsi_num; 302 303 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 304 305 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 306 if (status) 307 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", 308 vsi->vsi_num, status); 309 310 kfree(ctxt); 311 } 312 313 /** 314 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 315 * @vsi: pointer to VSI being cleared 316 */ 317 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 318 { 319 struct ice_pf *pf = vsi->back; 320 struct device *dev; 321 322 dev = ice_pf_to_dev(pf); 323 324 bitmap_free(vsi->af_xdp_zc_qps); 325 vsi->af_xdp_zc_qps = NULL; 326 /* free the ring and vector containers */ 327 devm_kfree(dev, vsi->q_vectors); 328 vsi->q_vectors = NULL; 329 devm_kfree(dev, vsi->tx_rings); 330 vsi->tx_rings = NULL; 331 devm_kfree(dev, vsi->rx_rings); 332 vsi->rx_rings = NULL; 333 devm_kfree(dev, vsi->txq_map); 334 vsi->txq_map = NULL; 335 devm_kfree(dev, vsi->rxq_map); 336 vsi->rxq_map = NULL; 337 } 338 339 /** 340 * ice_vsi_free_stats - Free the ring statistics structures 341 * @vsi: VSI pointer 342 */ 343 static void ice_vsi_free_stats(struct ice_vsi *vsi) 344 { 345 struct ice_vsi_stats *vsi_stat; 346 struct ice_pf *pf = vsi->back; 347 int i; 348 349 if (vsi->type == ICE_VSI_CHNL) 350 return; 351 if (!pf->vsi_stats) 352 return; 353 354 vsi_stat = pf->vsi_stats[vsi->idx]; 355 if (!vsi_stat) 356 return; 357 358 ice_for_each_alloc_txq(vsi, i) { 359 if (vsi_stat->tx_ring_stats[i]) { 360 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 361 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 362 } 363 } 364 365 ice_for_each_alloc_rxq(vsi, i) { 366 if (vsi_stat->rx_ring_stats[i]) { 367 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 368 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 369 } 370 } 371 372 kfree(vsi_stat->tx_ring_stats); 373 kfree(vsi_stat->rx_ring_stats); 374 kfree(vsi_stat); 375 pf->vsi_stats[vsi->idx] = NULL; 376 } 377 378 /** 379 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI 380 * @vsi: VSI which is having stats allocated 381 */ 382 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) 383 { 384 struct ice_ring_stats **tx_ring_stats; 385 struct ice_ring_stats **rx_ring_stats; 386 struct ice_vsi_stats *vsi_stats; 387 struct ice_pf *pf = vsi->back; 388 u16 i; 389 390 vsi_stats = pf->vsi_stats[vsi->idx]; 391 tx_ring_stats = vsi_stats->tx_ring_stats; 392 rx_ring_stats = vsi_stats->rx_ring_stats; 393 394 /* Allocate Tx ring stats */ 395 ice_for_each_alloc_txq(vsi, i) { 396 struct ice_ring_stats *ring_stats; 397 struct ice_tx_ring *ring; 398 399 ring = vsi->tx_rings[i]; 400 ring_stats = tx_ring_stats[i]; 401 402 if (!ring_stats) { 403 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 404 if (!ring_stats) 405 goto err_out; 406 407 WRITE_ONCE(tx_ring_stats[i], ring_stats); 408 } 409 410 ring->ring_stats = ring_stats; 411 } 412 413 /* Allocate Rx ring stats */ 414 ice_for_each_alloc_rxq(vsi, i) { 415 struct ice_ring_stats *ring_stats; 416 struct ice_rx_ring *ring; 417 418 ring = vsi->rx_rings[i]; 419 ring_stats = rx_ring_stats[i]; 420 421 if (!ring_stats) { 422 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 423 if (!ring_stats) 424 goto err_out; 425 426 WRITE_ONCE(rx_ring_stats[i], ring_stats); 427 } 428 429 ring->ring_stats = ring_stats; 430 } 431 432 return 0; 433 434 err_out: 435 ice_vsi_free_stats(vsi); 436 return -ENOMEM; 437 } 438 439 /** 440 * ice_vsi_free - clean up and deallocate the provided VSI 441 * @vsi: pointer to VSI being cleared 442 * 443 * This deallocates the VSI's queue resources, removes it from the PF's 444 * VSI array if necessary, and deallocates the VSI 445 */ 446 static void ice_vsi_free(struct ice_vsi *vsi) 447 { 448 struct ice_pf *pf = NULL; 449 struct device *dev; 450 451 if (!vsi || !vsi->back) 452 return; 453 454 pf = vsi->back; 455 dev = ice_pf_to_dev(pf); 456 457 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 458 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); 459 return; 460 } 461 462 mutex_lock(&pf->sw_mutex); 463 /* updates the PF for this cleared VSI */ 464 465 pf->vsi[vsi->idx] = NULL; 466 pf->next_vsi = vsi->idx; 467 468 ice_vsi_free_stats(vsi); 469 ice_vsi_free_arrays(vsi); 470 mutex_unlock(&pf->sw_mutex); 471 devm_kfree(dev, vsi); 472 } 473 474 void ice_vsi_delete(struct ice_vsi *vsi) 475 { 476 ice_vsi_delete_from_hw(vsi); 477 ice_vsi_free(vsi); 478 } 479 480 /** 481 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI 482 * @irq: interrupt number 483 * @data: pointer to a q_vector 484 */ 485 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) 486 { 487 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 488 489 if (!q_vector->tx.tx_ring) 490 return IRQ_HANDLED; 491 492 #define FDIR_RX_DESC_CLEAN_BUDGET 64 493 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); 494 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); 495 496 return IRQ_HANDLED; 497 } 498 499 /** 500 * ice_msix_clean_rings - MSIX mode Interrupt Handler 501 * @irq: interrupt number 502 * @data: pointer to a q_vector 503 */ 504 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 505 { 506 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 507 508 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 509 return IRQ_HANDLED; 510 511 q_vector->total_events++; 512 513 napi_schedule(&q_vector->napi); 514 515 return IRQ_HANDLED; 516 } 517 518 static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) 519 { 520 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 521 struct ice_pf *pf = q_vector->vsi->back; 522 struct ice_vf *vf; 523 unsigned int bkt; 524 525 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 526 return IRQ_HANDLED; 527 528 rcu_read_lock(); 529 ice_for_each_vf_rcu(pf, bkt, vf) 530 napi_schedule(&vf->repr->q_vector->napi); 531 rcu_read_unlock(); 532 533 return IRQ_HANDLED; 534 } 535 536 /** 537 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays 538 * @vsi: VSI pointer 539 */ 540 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) 541 { 542 struct ice_vsi_stats *vsi_stat; 543 struct ice_pf *pf = vsi->back; 544 545 if (vsi->type == ICE_VSI_CHNL) 546 return 0; 547 if (!pf->vsi_stats) 548 return -ENOENT; 549 550 if (pf->vsi_stats[vsi->idx]) 551 /* realloc will happen in rebuild path */ 552 return 0; 553 554 vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); 555 if (!vsi_stat) 556 return -ENOMEM; 557 558 vsi_stat->tx_ring_stats = 559 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), 560 GFP_KERNEL); 561 if (!vsi_stat->tx_ring_stats) 562 goto err_alloc_tx; 563 564 vsi_stat->rx_ring_stats = 565 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), 566 GFP_KERNEL); 567 if (!vsi_stat->rx_ring_stats) 568 goto err_alloc_rx; 569 570 pf->vsi_stats[vsi->idx] = vsi_stat; 571 572 return 0; 573 574 err_alloc_rx: 575 kfree(vsi_stat->rx_ring_stats); 576 err_alloc_tx: 577 kfree(vsi_stat->tx_ring_stats); 578 kfree(vsi_stat); 579 pf->vsi_stats[vsi->idx] = NULL; 580 return -ENOMEM; 581 } 582 583 /** 584 * ice_vsi_alloc_def - set default values for already allocated VSI 585 * @vsi: ptr to VSI 586 * @ch: ptr to channel 587 */ 588 static int 589 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) 590 { 591 if (vsi->type != ICE_VSI_CHNL) { 592 ice_vsi_set_num_qs(vsi); 593 if (ice_vsi_alloc_arrays(vsi)) 594 return -ENOMEM; 595 } 596 597 switch (vsi->type) { 598 case ICE_VSI_SWITCHDEV_CTRL: 599 /* Setup eswitch MSIX irq handler for VSI */ 600 vsi->irq_handler = ice_eswitch_msix_clean_rings; 601 break; 602 case ICE_VSI_PF: 603 /* Setup default MSIX irq handler for VSI */ 604 vsi->irq_handler = ice_msix_clean_rings; 605 break; 606 case ICE_VSI_CTRL: 607 /* Setup ctrl VSI MSIX irq handler */ 608 vsi->irq_handler = ice_msix_clean_ctrl_vsi; 609 break; 610 case ICE_VSI_CHNL: 611 if (!ch) 612 return -EINVAL; 613 614 vsi->num_rxq = ch->num_rxq; 615 vsi->num_txq = ch->num_txq; 616 vsi->next_base_q = ch->base_q; 617 break; 618 case ICE_VSI_VF: 619 case ICE_VSI_LB: 620 break; 621 default: 622 ice_vsi_free_arrays(vsi); 623 return -EINVAL; 624 } 625 626 return 0; 627 } 628 629 /** 630 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 631 * @pf: board private structure 632 * 633 * Reserves a VSI index from the PF and allocates an empty VSI structure 634 * without a type. The VSI structure must later be initialized by calling 635 * ice_vsi_cfg(). 636 * 637 * returns a pointer to a VSI on success, NULL on failure. 638 */ 639 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) 640 { 641 struct device *dev = ice_pf_to_dev(pf); 642 struct ice_vsi *vsi = NULL; 643 644 /* Need to protect the allocation of the VSIs at the PF level */ 645 mutex_lock(&pf->sw_mutex); 646 647 /* If we have already allocated our maximum number of VSIs, 648 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 649 * is available to be populated 650 */ 651 if (pf->next_vsi == ICE_NO_VSI) { 652 dev_dbg(dev, "out of VSI slots!\n"); 653 goto unlock_pf; 654 } 655 656 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); 657 if (!vsi) 658 goto unlock_pf; 659 660 vsi->back = pf; 661 set_bit(ICE_VSI_DOWN, vsi->state); 662 663 /* fill slot and make note of the index */ 664 vsi->idx = pf->next_vsi; 665 pf->vsi[pf->next_vsi] = vsi; 666 667 /* prepare pf->next_vsi for next use */ 668 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 669 pf->next_vsi); 670 671 unlock_pf: 672 mutex_unlock(&pf->sw_mutex); 673 return vsi; 674 } 675 676 /** 677 * ice_alloc_fd_res - Allocate FD resource for a VSI 678 * @vsi: pointer to the ice_vsi 679 * 680 * This allocates the FD resources 681 * 682 * Returns 0 on success, -EPERM on no-op or -EIO on failure 683 */ 684 static int ice_alloc_fd_res(struct ice_vsi *vsi) 685 { 686 struct ice_pf *pf = vsi->back; 687 u32 g_val, b_val; 688 689 /* Flow Director filters are only allocated/assigned to the PF VSI or 690 * CHNL VSI which passes the traffic. The CTRL VSI is only used to 691 * add/delete filters so resources are not allocated to it 692 */ 693 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 694 return -EPERM; 695 696 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || 697 vsi->type == ICE_VSI_CHNL)) 698 return -EPERM; 699 700 /* FD filters from guaranteed pool per VSI */ 701 g_val = pf->hw.func_caps.fd_fltr_guar; 702 if (!g_val) 703 return -EPERM; 704 705 /* FD filters from best effort pool */ 706 b_val = pf->hw.func_caps.fd_fltr_best_effort; 707 if (!b_val) 708 return -EPERM; 709 710 /* PF main VSI gets only 64 FD resources from guaranteed pool 711 * when ADQ is configured. 712 */ 713 #define ICE_PF_VSI_GFLTR 64 714 715 /* determine FD filter resources per VSI from shared(best effort) and 716 * dedicated pool 717 */ 718 if (vsi->type == ICE_VSI_PF) { 719 vsi->num_gfltr = g_val; 720 /* if MQPRIO is configured, main VSI doesn't get all FD 721 * resources from guaranteed pool. PF VSI gets 64 FD resources 722 */ 723 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 724 if (g_val < ICE_PF_VSI_GFLTR) 725 return -EPERM; 726 /* allow bare minimum entries for PF VSI */ 727 vsi->num_gfltr = ICE_PF_VSI_GFLTR; 728 } 729 730 /* each VSI gets same "best_effort" quota */ 731 vsi->num_bfltr = b_val; 732 } else if (vsi->type == ICE_VSI_VF) { 733 vsi->num_gfltr = 0; 734 735 /* each VSI gets same "best_effort" quota */ 736 vsi->num_bfltr = b_val; 737 } else { 738 struct ice_vsi *main_vsi; 739 int numtc; 740 741 main_vsi = ice_get_main_vsi(pf); 742 if (!main_vsi) 743 return -EPERM; 744 745 if (!main_vsi->all_numtc) 746 return -EINVAL; 747 748 /* figure out ADQ numtc */ 749 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; 750 751 /* only one TC but still asking resources for channels, 752 * invalid config 753 */ 754 if (numtc < ICE_CHNL_START_TC) 755 return -EPERM; 756 757 g_val -= ICE_PF_VSI_GFLTR; 758 /* channel VSIs gets equal share from guaranteed pool */ 759 vsi->num_gfltr = g_val / numtc; 760 761 /* each VSI gets same "best_effort" quota */ 762 vsi->num_bfltr = b_val; 763 } 764 765 return 0; 766 } 767 768 /** 769 * ice_vsi_get_qs - Assign queues from PF to VSI 770 * @vsi: the VSI to assign queues to 771 * 772 * Returns 0 on success and a negative value on error 773 */ 774 static int ice_vsi_get_qs(struct ice_vsi *vsi) 775 { 776 struct ice_pf *pf = vsi->back; 777 struct ice_qs_cfg tx_qs_cfg = { 778 .qs_mutex = &pf->avail_q_mutex, 779 .pf_map = pf->avail_txqs, 780 .pf_map_size = pf->max_pf_txqs, 781 .q_count = vsi->alloc_txq, 782 .scatter_count = ICE_MAX_SCATTER_TXQS, 783 .vsi_map = vsi->txq_map, 784 .vsi_map_offset = 0, 785 .mapping_mode = ICE_VSI_MAP_CONTIG 786 }; 787 struct ice_qs_cfg rx_qs_cfg = { 788 .qs_mutex = &pf->avail_q_mutex, 789 .pf_map = pf->avail_rxqs, 790 .pf_map_size = pf->max_pf_rxqs, 791 .q_count = vsi->alloc_rxq, 792 .scatter_count = ICE_MAX_SCATTER_RXQS, 793 .vsi_map = vsi->rxq_map, 794 .vsi_map_offset = 0, 795 .mapping_mode = ICE_VSI_MAP_CONTIG 796 }; 797 int ret; 798 799 if (vsi->type == ICE_VSI_CHNL) 800 return 0; 801 802 ret = __ice_vsi_get_qs(&tx_qs_cfg); 803 if (ret) 804 return ret; 805 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; 806 807 ret = __ice_vsi_get_qs(&rx_qs_cfg); 808 if (ret) 809 return ret; 810 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; 811 812 return 0; 813 } 814 815 /** 816 * ice_vsi_put_qs - Release queues from VSI to PF 817 * @vsi: the VSI that is going to release queues 818 */ 819 static void ice_vsi_put_qs(struct ice_vsi *vsi) 820 { 821 struct ice_pf *pf = vsi->back; 822 int i; 823 824 mutex_lock(&pf->avail_q_mutex); 825 826 ice_for_each_alloc_txq(vsi, i) { 827 clear_bit(vsi->txq_map[i], pf->avail_txqs); 828 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 829 } 830 831 ice_for_each_alloc_rxq(vsi, i) { 832 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 833 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 834 } 835 836 mutex_unlock(&pf->avail_q_mutex); 837 } 838 839 /** 840 * ice_is_safe_mode 841 * @pf: pointer to the PF struct 842 * 843 * returns true if driver is in safe mode, false otherwise 844 */ 845 bool ice_is_safe_mode(struct ice_pf *pf) 846 { 847 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 848 } 849 850 /** 851 * ice_is_rdma_ena 852 * @pf: pointer to the PF struct 853 * 854 * returns true if RDMA is currently supported, false otherwise 855 */ 856 bool ice_is_rdma_ena(struct ice_pf *pf) 857 { 858 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); 859 } 860 861 /** 862 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration 863 * @vsi: the VSI being cleaned up 864 * 865 * This function deletes RSS input set for all flows that were configured 866 * for this VSI 867 */ 868 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) 869 { 870 struct ice_pf *pf = vsi->back; 871 int status; 872 873 if (ice_is_safe_mode(pf)) 874 return; 875 876 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); 877 if (status) 878 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", 879 vsi->vsi_num, status); 880 } 881 882 /** 883 * ice_rss_clean - Delete RSS related VSI structures and configuration 884 * @vsi: the VSI being removed 885 */ 886 static void ice_rss_clean(struct ice_vsi *vsi) 887 { 888 struct ice_pf *pf = vsi->back; 889 struct device *dev; 890 891 dev = ice_pf_to_dev(pf); 892 893 devm_kfree(dev, vsi->rss_hkey_user); 894 devm_kfree(dev, vsi->rss_lut_user); 895 896 ice_vsi_clean_rss_flow_fld(vsi); 897 /* remove RSS replay list */ 898 if (!ice_is_safe_mode(pf)) 899 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); 900 } 901 902 /** 903 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 904 * @vsi: the VSI being configured 905 */ 906 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 907 { 908 struct ice_hw_common_caps *cap; 909 struct ice_pf *pf = vsi->back; 910 u16 max_rss_size; 911 912 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 913 vsi->rss_size = 1; 914 return; 915 } 916 917 cap = &pf->hw.func_caps.common_cap; 918 max_rss_size = BIT(cap->rss_table_entry_width); 919 switch (vsi->type) { 920 case ICE_VSI_CHNL: 921 case ICE_VSI_PF: 922 /* PF VSI will inherit RSS instance of PF */ 923 vsi->rss_table_size = (u16)cap->rss_table_size; 924 if (vsi->type == ICE_VSI_CHNL) 925 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); 926 else 927 vsi->rss_size = min_t(u16, num_online_cpus(), 928 max_rss_size); 929 vsi->rss_lut_type = ICE_LUT_PF; 930 break; 931 case ICE_VSI_SWITCHDEV_CTRL: 932 vsi->rss_table_size = ICE_LUT_VSI_SIZE; 933 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); 934 vsi->rss_lut_type = ICE_LUT_VSI; 935 break; 936 case ICE_VSI_VF: 937 /* VF VSI will get a small RSS table. 938 * For VSI_LUT, LUT size should be set to 64 bytes. 939 */ 940 vsi->rss_table_size = ICE_LUT_VSI_SIZE; 941 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; 942 vsi->rss_lut_type = ICE_LUT_VSI; 943 break; 944 case ICE_VSI_LB: 945 break; 946 default: 947 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", 948 ice_vsi_type_str(vsi->type)); 949 break; 950 } 951 } 952 953 /** 954 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 955 * @hw: HW structure used to determine the VLAN mode of the device 956 * @ctxt: the VSI context being set 957 * 958 * This initializes a default VSI context for all sections except the Queues. 959 */ 960 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) 961 { 962 u32 table = 0; 963 964 memset(&ctxt->info, 0, sizeof(ctxt->info)); 965 /* VSI's should be allocated from shared pool */ 966 ctxt->alloc_from_pool = true; 967 /* Src pruning enabled by default */ 968 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 969 /* Traffic from VSI can be sent to LAN */ 970 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 971 /* allow all untagged/tagged packets by default on Tx */ 972 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & 973 ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> 974 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); 975 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which 976 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. 977 * 978 * DVM - leave inner VLAN in packet by default 979 */ 980 if (ice_is_dvm_ena(hw)) { 981 ctxt->info.inner_vlan_flags |= 982 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 983 ctxt->info.outer_vlan_flags = 984 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << 985 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & 986 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; 987 ctxt->info.outer_vlan_flags |= 988 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 989 ICE_AQ_VSI_OUTER_TAG_TYPE_S) & 990 ICE_AQ_VSI_OUTER_TAG_TYPE_M; 991 ctxt->info.outer_vlan_flags |= 992 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, 993 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); 994 } 995 /* Have 1:1 UP mapping for both ingress/egress tables */ 996 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 997 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 998 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 999 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 1000 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 1001 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 1002 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 1003 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 1004 ctxt->info.ingress_table = cpu_to_le32(table); 1005 ctxt->info.egress_table = cpu_to_le32(table); 1006 /* Have 1:1 UP mapping for outer to inner UP table */ 1007 ctxt->info.outer_up_table = cpu_to_le32(table); 1008 /* No Outer tag support outer_tag_flags remains to zero */ 1009 } 1010 1011 /** 1012 * ice_vsi_setup_q_map - Setup a VSI queue map 1013 * @vsi: the VSI being configured 1014 * @ctxt: VSI context structure 1015 */ 1016 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1017 { 1018 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; 1019 u16 num_txq_per_tc, num_rxq_per_tc; 1020 u16 qcount_tx = vsi->alloc_txq; 1021 u16 qcount_rx = vsi->alloc_rxq; 1022 u8 netdev_tc = 0; 1023 int i; 1024 1025 if (!vsi->tc_cfg.numtc) { 1026 /* at least TC0 should be enabled by default */ 1027 vsi->tc_cfg.numtc = 1; 1028 vsi->tc_cfg.ena_tc = 1; 1029 } 1030 1031 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); 1032 if (!num_rxq_per_tc) 1033 num_rxq_per_tc = 1; 1034 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; 1035 if (!num_txq_per_tc) 1036 num_txq_per_tc = 1; 1037 1038 /* find the (rounded up) power-of-2 of qcount */ 1039 pow = (u16)order_base_2(num_rxq_per_tc); 1040 1041 /* TC mapping is a function of the number of Rx queues assigned to the 1042 * VSI for each traffic class and the offset of these queues. 1043 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 1044 * queues allocated to TC0. No:of queues is a power-of-2. 1045 * 1046 * If TC is not enabled, the queue offset is set to 0, and allocate one 1047 * queue, this way, traffic for the given TC will be sent to the default 1048 * queue. 1049 * 1050 * Setup number and offset of Rx queues for all TCs for the VSI 1051 */ 1052 ice_for_each_traffic_class(i) { 1053 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1054 /* TC is not enabled */ 1055 vsi->tc_cfg.tc_info[i].qoffset = 0; 1056 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 1057 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 1058 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 1059 ctxt->info.tc_mapping[i] = 0; 1060 continue; 1061 } 1062 1063 /* TC is enabled */ 1064 vsi->tc_cfg.tc_info[i].qoffset = offset; 1065 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; 1066 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; 1067 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 1068 1069 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 1070 ICE_AQ_VSI_TC_Q_OFFSET_M) | 1071 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 1072 ICE_AQ_VSI_TC_Q_NUM_M); 1073 offset += num_rxq_per_tc; 1074 tx_count += num_txq_per_tc; 1075 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1076 } 1077 1078 /* if offset is non-zero, means it is calculated correctly based on 1079 * enabled TCs for a given VSI otherwise qcount_rx will always 1080 * be correct and non-zero because it is based off - VSI's 1081 * allocated Rx queues which is at least 1 (hence qcount_tx will be 1082 * at least 1) 1083 */ 1084 if (offset) 1085 rx_count = offset; 1086 else 1087 rx_count = num_rxq_per_tc; 1088 1089 if (rx_count > vsi->alloc_rxq) { 1090 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 1091 rx_count, vsi->alloc_rxq); 1092 return -EINVAL; 1093 } 1094 1095 if (tx_count > vsi->alloc_txq) { 1096 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 1097 tx_count, vsi->alloc_txq); 1098 return -EINVAL; 1099 } 1100 1101 vsi->num_txq = tx_count; 1102 vsi->num_rxq = rx_count; 1103 1104 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 1105 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 1106 /* since there is a chance that num_rxq could have been changed 1107 * in the above for loop, make num_txq equal to num_rxq. 1108 */ 1109 vsi->num_txq = vsi->num_rxq; 1110 } 1111 1112 /* Rx queue mapping */ 1113 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1114 /* q_mapping buffer holds the info for the first queue allocated for 1115 * this VSI in the PF space and also the number of queues associated 1116 * with this VSI. 1117 */ 1118 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 1119 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 1120 1121 return 0; 1122 } 1123 1124 /** 1125 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI 1126 * @ctxt: the VSI context being set 1127 * @vsi: the VSI being configured 1128 */ 1129 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1130 { 1131 u8 dflt_q_group, dflt_q_prio; 1132 u16 dflt_q, report_q, val; 1133 1134 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && 1135 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) 1136 return; 1137 1138 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1139 ctxt->info.valid_sections |= cpu_to_le16(val); 1140 dflt_q = 0; 1141 dflt_q_group = 0; 1142 report_q = 0; 1143 dflt_q_prio = 0; 1144 1145 /* enable flow director filtering/programming */ 1146 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; 1147 ctxt->info.fd_options = cpu_to_le16(val); 1148 /* max of allocated flow director filters */ 1149 ctxt->info.max_fd_fltr_dedicated = 1150 cpu_to_le16(vsi->num_gfltr); 1151 /* max of shared flow director filters any VSI may program */ 1152 ctxt->info.max_fd_fltr_shared = 1153 cpu_to_le16(vsi->num_bfltr); 1154 /* default queue index within the VSI of the default FD */ 1155 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & 1156 ICE_AQ_VSI_FD_DEF_Q_M); 1157 /* target queue or queue group to the FD filter */ 1158 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & 1159 ICE_AQ_VSI_FD_DEF_GRP_M); 1160 ctxt->info.fd_def_q = cpu_to_le16(val); 1161 /* queue index on which FD filter completion is reported */ 1162 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & 1163 ICE_AQ_VSI_FD_REPORT_Q_M); 1164 /* priority of the default qindex action */ 1165 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & 1166 ICE_AQ_VSI_FD_DEF_PRIORITY_M); 1167 ctxt->info.fd_report_opt = cpu_to_le16(val); 1168 } 1169 1170 /** 1171 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 1172 * @ctxt: the VSI context being set 1173 * @vsi: the VSI being configured 1174 */ 1175 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1176 { 1177 u8 lut_type, hash_type; 1178 struct device *dev; 1179 struct ice_pf *pf; 1180 1181 pf = vsi->back; 1182 dev = ice_pf_to_dev(pf); 1183 1184 switch (vsi->type) { 1185 case ICE_VSI_CHNL: 1186 case ICE_VSI_PF: 1187 /* PF VSI will inherit RSS instance of PF */ 1188 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 1189 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1190 break; 1191 case ICE_VSI_VF: 1192 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 1193 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 1194 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1195 break; 1196 default: 1197 dev_dbg(dev, "Unsupported VSI type %s\n", 1198 ice_vsi_type_str(vsi->type)); 1199 return; 1200 } 1201 1202 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 1203 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 1204 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 1205 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 1206 } 1207 1208 static void 1209 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1210 { 1211 struct ice_pf *pf = vsi->back; 1212 u16 qcount, qmap; 1213 u8 offset = 0; 1214 int pow; 1215 1216 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); 1217 1218 pow = order_base_2(qcount); 1219 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 1220 ICE_AQ_VSI_TC_Q_OFFSET_M) | 1221 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 1222 ICE_AQ_VSI_TC_Q_NUM_M); 1223 1224 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 1225 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1226 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); 1227 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); 1228 } 1229 1230 /** 1231 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 1232 * @vsi: VSI to check whether or not VLAN pruning is enabled. 1233 * 1234 * returns true if Rx VLAN pruning is enabled and false otherwise. 1235 */ 1236 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 1237 { 1238 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1239 } 1240 1241 /** 1242 * ice_vsi_init - Create and initialize a VSI 1243 * @vsi: the VSI being configured 1244 * @vsi_flags: VSI configuration flags 1245 * 1246 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to 1247 * reconfigure an existing context. 1248 * 1249 * This initializes a VSI context depending on the VSI type to be added and 1250 * passes it down to the add_vsi aq command to create a new VSI. 1251 */ 1252 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) 1253 { 1254 struct ice_pf *pf = vsi->back; 1255 struct ice_hw *hw = &pf->hw; 1256 struct ice_vsi_ctx *ctxt; 1257 struct device *dev; 1258 int ret = 0; 1259 1260 dev = ice_pf_to_dev(pf); 1261 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1262 if (!ctxt) 1263 return -ENOMEM; 1264 1265 switch (vsi->type) { 1266 case ICE_VSI_CTRL: 1267 case ICE_VSI_LB: 1268 case ICE_VSI_PF: 1269 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 1270 break; 1271 case ICE_VSI_SWITCHDEV_CTRL: 1272 case ICE_VSI_CHNL: 1273 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; 1274 break; 1275 case ICE_VSI_VF: 1276 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 1277 /* VF number here is the absolute VF number (0-255) */ 1278 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; 1279 break; 1280 default: 1281 ret = -ENODEV; 1282 goto out; 1283 } 1284 1285 /* Handle VLAN pruning for channel VSI if main VSI has VLAN 1286 * prune enabled 1287 */ 1288 if (vsi->type == ICE_VSI_CHNL) { 1289 struct ice_vsi *main_vsi; 1290 1291 main_vsi = ice_get_main_vsi(pf); 1292 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi)) 1293 ctxt->info.sw_flags2 |= 1294 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1295 else 1296 ctxt->info.sw_flags2 &= 1297 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1298 } 1299 1300 ice_set_dflt_vsi_ctx(hw, ctxt); 1301 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1302 ice_set_fd_vsi_ctx(ctxt, vsi); 1303 /* if the switch is in VEB mode, allow VSI loopback */ 1304 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 1305 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 1306 1307 /* Set LUT type and HASH type if RSS is enabled */ 1308 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && 1309 vsi->type != ICE_VSI_CTRL) { 1310 ice_set_rss_vsi_ctx(ctxt, vsi); 1311 /* if updating VSI context, make sure to set valid_section: 1312 * to indicate which section of VSI context being updated 1313 */ 1314 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1315 ctxt->info.valid_sections |= 1316 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 1317 } 1318 1319 ctxt->info.sw_id = vsi->port_info->sw_id; 1320 if (vsi->type == ICE_VSI_CHNL) { 1321 ice_chnl_vsi_setup_q_map(vsi, ctxt); 1322 } else { 1323 ret = ice_vsi_setup_q_map(vsi, ctxt); 1324 if (ret) 1325 goto out; 1326 1327 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1328 /* means VSI being updated */ 1329 /* must to indicate which section of VSI context are 1330 * being modified 1331 */ 1332 ctxt->info.valid_sections |= 1333 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 1334 } 1335 1336 /* Allow control frames out of main VSI */ 1337 if (vsi->type == ICE_VSI_PF) { 1338 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 1339 ctxt->info.valid_sections |= 1340 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1341 } 1342 1343 if (vsi_flags & ICE_VSI_FLAG_INIT) { 1344 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1345 if (ret) { 1346 dev_err(dev, "Add VSI failed, err %d\n", ret); 1347 ret = -EIO; 1348 goto out; 1349 } 1350 } else { 1351 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1352 if (ret) { 1353 dev_err(dev, "Update VSI failed, err %d\n", ret); 1354 ret = -EIO; 1355 goto out; 1356 } 1357 } 1358 1359 /* keep context for update VSI operations */ 1360 vsi->info = ctxt->info; 1361 1362 /* record VSI number returned */ 1363 vsi->vsi_num = ctxt->vsi_num; 1364 1365 out: 1366 kfree(ctxt); 1367 return ret; 1368 } 1369 1370 /** 1371 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1372 * @vsi: the VSI having rings deallocated 1373 */ 1374 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1375 { 1376 int i; 1377 1378 /* Avoid stale references by clearing map from vector to ring */ 1379 if (vsi->q_vectors) { 1380 ice_for_each_q_vector(vsi, i) { 1381 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1382 1383 if (q_vector) { 1384 q_vector->tx.tx_ring = NULL; 1385 q_vector->rx.rx_ring = NULL; 1386 } 1387 } 1388 } 1389 1390 if (vsi->tx_rings) { 1391 ice_for_each_alloc_txq(vsi, i) { 1392 if (vsi->tx_rings[i]) { 1393 kfree_rcu(vsi->tx_rings[i], rcu); 1394 WRITE_ONCE(vsi->tx_rings[i], NULL); 1395 } 1396 } 1397 } 1398 if (vsi->rx_rings) { 1399 ice_for_each_alloc_rxq(vsi, i) { 1400 if (vsi->rx_rings[i]) { 1401 kfree_rcu(vsi->rx_rings[i], rcu); 1402 WRITE_ONCE(vsi->rx_rings[i], NULL); 1403 } 1404 } 1405 } 1406 } 1407 1408 /** 1409 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1410 * @vsi: VSI which is having rings allocated 1411 */ 1412 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1413 { 1414 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); 1415 struct ice_pf *pf = vsi->back; 1416 struct device *dev; 1417 u16 i; 1418 1419 dev = ice_pf_to_dev(pf); 1420 /* Allocate Tx rings */ 1421 ice_for_each_alloc_txq(vsi, i) { 1422 struct ice_tx_ring *ring; 1423 1424 /* allocate with kzalloc(), free with kfree_rcu() */ 1425 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1426 1427 if (!ring) 1428 goto err_out; 1429 1430 ring->q_index = i; 1431 ring->reg_idx = vsi->txq_map[i]; 1432 ring->vsi = vsi; 1433 ring->tx_tstamps = &pf->ptp.port.tx; 1434 ring->dev = dev; 1435 ring->count = vsi->num_tx_desc; 1436 ring->txq_teid = ICE_INVAL_TEID; 1437 if (dvm_ena) 1438 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; 1439 else 1440 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; 1441 WRITE_ONCE(vsi->tx_rings[i], ring); 1442 } 1443 1444 /* Allocate Rx rings */ 1445 ice_for_each_alloc_rxq(vsi, i) { 1446 struct ice_rx_ring *ring; 1447 1448 /* allocate with kzalloc(), free with kfree_rcu() */ 1449 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1450 if (!ring) 1451 goto err_out; 1452 1453 ring->q_index = i; 1454 ring->reg_idx = vsi->rxq_map[i]; 1455 ring->vsi = vsi; 1456 ring->netdev = vsi->netdev; 1457 ring->dev = dev; 1458 ring->count = vsi->num_rx_desc; 1459 ring->cached_phctime = pf->ptp.cached_phc_time; 1460 WRITE_ONCE(vsi->rx_rings[i], ring); 1461 } 1462 1463 return 0; 1464 1465 err_out: 1466 ice_vsi_clear_rings(vsi); 1467 return -ENOMEM; 1468 } 1469 1470 /** 1471 * ice_vsi_manage_rss_lut - disable/enable RSS 1472 * @vsi: the VSI being changed 1473 * @ena: boolean value indicating if this is an enable or disable request 1474 * 1475 * In the event of disable request for RSS, this function will zero out RSS 1476 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1477 * LUT. 1478 */ 1479 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1480 { 1481 u8 *lut; 1482 1483 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1484 if (!lut) 1485 return; 1486 1487 if (ena) { 1488 if (vsi->rss_lut_user) 1489 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1490 else 1491 ice_fill_rss_lut(lut, vsi->rss_table_size, 1492 vsi->rss_size); 1493 } 1494 1495 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1496 kfree(lut); 1497 } 1498 1499 /** 1500 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI 1501 * @vsi: VSI to be configured 1502 * @disable: set to true to have FCS / CRC in the frame data 1503 */ 1504 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) 1505 { 1506 int i; 1507 1508 ice_for_each_rxq(vsi, i) 1509 if (disable) 1510 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; 1511 else 1512 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; 1513 } 1514 1515 /** 1516 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1517 * @vsi: VSI to be configured 1518 */ 1519 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1520 { 1521 struct ice_pf *pf = vsi->back; 1522 struct device *dev; 1523 u8 *lut, *key; 1524 int err; 1525 1526 dev = ice_pf_to_dev(pf); 1527 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && 1528 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { 1529 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); 1530 } else { 1531 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); 1532 1533 /* If orig_rss_size is valid and it is less than determined 1534 * main VSI's rss_size, update main VSI's rss_size to be 1535 * orig_rss_size so that when tc-qdisc is deleted, main VSI 1536 * RSS table gets programmed to be correct (whatever it was 1537 * to begin with (prior to setup-tc for ADQ config) 1538 */ 1539 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && 1540 vsi->orig_rss_size <= vsi->num_rxq) { 1541 vsi->rss_size = vsi->orig_rss_size; 1542 /* now orig_rss_size is used, reset it to zero */ 1543 vsi->orig_rss_size = 0; 1544 } 1545 } 1546 1547 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1548 if (!lut) 1549 return -ENOMEM; 1550 1551 if (vsi->rss_lut_user) 1552 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1553 else 1554 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1555 1556 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1557 if (err) { 1558 dev_err(dev, "set_rss_lut failed, error %d\n", err); 1559 goto ice_vsi_cfg_rss_exit; 1560 } 1561 1562 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); 1563 if (!key) { 1564 err = -ENOMEM; 1565 goto ice_vsi_cfg_rss_exit; 1566 } 1567 1568 if (vsi->rss_hkey_user) 1569 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1570 else 1571 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1572 1573 err = ice_set_rss_key(vsi, key); 1574 if (err) 1575 dev_err(dev, "set_rss_key failed, error %d\n", err); 1576 1577 kfree(key); 1578 ice_vsi_cfg_rss_exit: 1579 kfree(lut); 1580 return err; 1581 } 1582 1583 /** 1584 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows 1585 * @vsi: VSI to be configured 1586 * 1587 * This function will only be called during the VF VSI setup. Upon successful 1588 * completion of package download, this function will configure default RSS 1589 * input sets for VF VSI. 1590 */ 1591 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) 1592 { 1593 struct ice_pf *pf = vsi->back; 1594 struct device *dev; 1595 int status; 1596 1597 dev = ice_pf_to_dev(pf); 1598 if (ice_is_safe_mode(pf)) { 1599 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1600 vsi->vsi_num); 1601 return; 1602 } 1603 1604 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); 1605 if (status) 1606 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", 1607 vsi->vsi_num, status); 1608 } 1609 1610 /** 1611 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows 1612 * @vsi: VSI to be configured 1613 * 1614 * This function will only be called after successful download package call 1615 * during initialization of PF. Since the downloaded package will erase the 1616 * RSS section, this function will configure RSS input sets for different 1617 * flow types. The last profile added has the highest priority, therefore 2 1618 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles 1619 * (i.e. IPv4 src/dst TCP src/dst port). 1620 */ 1621 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) 1622 { 1623 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; 1624 struct ice_pf *pf = vsi->back; 1625 struct ice_hw *hw = &pf->hw; 1626 struct device *dev; 1627 int status; 1628 1629 dev = ice_pf_to_dev(pf); 1630 if (ice_is_safe_mode(pf)) { 1631 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1632 vsi_num); 1633 return; 1634 } 1635 /* configure RSS for IPv4 with input set IP src/dst */ 1636 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1637 ICE_FLOW_SEG_HDR_IPV4); 1638 if (status) 1639 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", 1640 vsi_num, status); 1641 1642 /* configure RSS for IPv6 with input set IPv6 src/dst */ 1643 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1644 ICE_FLOW_SEG_HDR_IPV6); 1645 if (status) 1646 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", 1647 vsi_num, status); 1648 1649 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ 1650 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, 1651 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); 1652 if (status) 1653 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", 1654 vsi_num, status); 1655 1656 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ 1657 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, 1658 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); 1659 if (status) 1660 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", 1661 vsi_num, status); 1662 1663 /* configure RSS for sctp4 with input set IP src/dst */ 1664 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1665 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); 1666 if (status) 1667 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", 1668 vsi_num, status); 1669 1670 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ 1671 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, 1672 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); 1673 if (status) 1674 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", 1675 vsi_num, status); 1676 1677 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ 1678 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, 1679 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); 1680 if (status) 1681 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", 1682 vsi_num, status); 1683 1684 /* configure RSS for sctp6 with input set IPv6 src/dst */ 1685 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1686 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); 1687 if (status) 1688 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", 1689 vsi_num, status); 1690 1691 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, 1692 ICE_FLOW_SEG_HDR_ESP); 1693 if (status) 1694 dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", 1695 vsi_num, status); 1696 } 1697 1698 /** 1699 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length 1700 * @vsi: VSI 1701 */ 1702 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) 1703 { 1704 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { 1705 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; 1706 vsi->rx_buf_len = ICE_RXBUF_1664; 1707 #if (PAGE_SIZE < 8192) 1708 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && 1709 (vsi->netdev->mtu <= ETH_DATA_LEN)) { 1710 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; 1711 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; 1712 #endif 1713 } else { 1714 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1715 vsi->rx_buf_len = ICE_RXBUF_3072; 1716 } 1717 } 1718 1719 /** 1720 * ice_pf_state_is_nominal - checks the PF for nominal state 1721 * @pf: pointer to PF to check 1722 * 1723 * Check the PF's state for a collection of bits that would indicate 1724 * the PF is in a state that would inhibit normal operation for 1725 * driver functionality. 1726 * 1727 * Returns true if PF is in a nominal state, false otherwise 1728 */ 1729 bool ice_pf_state_is_nominal(struct ice_pf *pf) 1730 { 1731 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; 1732 1733 if (!pf) 1734 return false; 1735 1736 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); 1737 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) 1738 return false; 1739 1740 return true; 1741 } 1742 1743 /** 1744 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1745 * @vsi: the VSI to be updated 1746 */ 1747 void ice_update_eth_stats(struct ice_vsi *vsi) 1748 { 1749 struct ice_eth_stats *prev_es, *cur_es; 1750 struct ice_hw *hw = &vsi->back->hw; 1751 struct ice_pf *pf = vsi->back; 1752 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1753 1754 prev_es = &vsi->eth_stats_prev; 1755 cur_es = &vsi->eth_stats; 1756 1757 if (ice_is_reset_in_progress(pf->state)) 1758 vsi->stat_offsets_loaded = false; 1759 1760 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1761 &prev_es->rx_bytes, &cur_es->rx_bytes); 1762 1763 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1764 &prev_es->rx_unicast, &cur_es->rx_unicast); 1765 1766 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1767 &prev_es->rx_multicast, &cur_es->rx_multicast); 1768 1769 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1770 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1771 1772 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1773 &prev_es->rx_discards, &cur_es->rx_discards); 1774 1775 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1776 &prev_es->tx_bytes, &cur_es->tx_bytes); 1777 1778 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1779 &prev_es->tx_unicast, &cur_es->tx_unicast); 1780 1781 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1782 &prev_es->tx_multicast, &cur_es->tx_multicast); 1783 1784 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1785 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1786 1787 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1788 &prev_es->tx_errors, &cur_es->tx_errors); 1789 1790 vsi->stat_offsets_loaded = true; 1791 } 1792 1793 /** 1794 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register 1795 * @hw: HW pointer 1796 * @pf_q: index of the Rx queue in the PF's queue space 1797 * @rxdid: flexible descriptor RXDID 1798 * @prio: priority for the RXDID for this queue 1799 * @ena_ts: true to enable timestamp and false to disable timestamp 1800 */ 1801 void 1802 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 1803 bool ena_ts) 1804 { 1805 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1806 1807 /* clear any previous values */ 1808 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | 1809 QRXFLXP_CNTXT_RXDID_PRIO_M | 1810 QRXFLXP_CNTXT_TS_M); 1811 1812 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1813 QRXFLXP_CNTXT_RXDID_IDX_M; 1814 1815 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1816 QRXFLXP_CNTXT_RXDID_PRIO_M; 1817 1818 if (ena_ts) 1819 /* Enable TimeSync on this queue */ 1820 regval |= QRXFLXP_CNTXT_TS_M; 1821 1822 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1823 } 1824 1825 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) 1826 { 1827 if (q_idx >= vsi->num_rxq) 1828 return -EINVAL; 1829 1830 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); 1831 } 1832 1833 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) 1834 { 1835 struct ice_aqc_add_tx_qgrp *qg_buf; 1836 int err; 1837 1838 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) 1839 return -EINVAL; 1840 1841 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); 1842 if (!qg_buf) 1843 return -ENOMEM; 1844 1845 qg_buf->num_txqs = 1; 1846 1847 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); 1848 kfree(qg_buf); 1849 return err; 1850 } 1851 1852 /** 1853 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 1854 * @vsi: the VSI being configured 1855 * 1856 * Return 0 on success and a negative value on error 1857 * Configure the Rx VSI for operation. 1858 */ 1859 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 1860 { 1861 u16 i; 1862 1863 if (vsi->type == ICE_VSI_VF) 1864 goto setup_rings; 1865 1866 ice_vsi_cfg_frame_size(vsi); 1867 setup_rings: 1868 /* set up individual rings */ 1869 ice_for_each_rxq(vsi, i) { 1870 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); 1871 1872 if (err) 1873 return err; 1874 } 1875 1876 return 0; 1877 } 1878 1879 /** 1880 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1881 * @vsi: the VSI being configured 1882 * @rings: Tx ring array to be configured 1883 * @count: number of Tx ring array elements 1884 * 1885 * Return 0 on success and a negative value on error 1886 * Configure the Tx VSI for operation. 1887 */ 1888 static int 1889 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) 1890 { 1891 struct ice_aqc_add_tx_qgrp *qg_buf; 1892 u16 q_idx = 0; 1893 int err = 0; 1894 1895 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); 1896 if (!qg_buf) 1897 return -ENOMEM; 1898 1899 qg_buf->num_txqs = 1; 1900 1901 for (q_idx = 0; q_idx < count; q_idx++) { 1902 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); 1903 if (err) 1904 goto err_cfg_txqs; 1905 } 1906 1907 err_cfg_txqs: 1908 kfree(qg_buf); 1909 return err; 1910 } 1911 1912 /** 1913 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 1914 * @vsi: the VSI being configured 1915 * 1916 * Return 0 on success and a negative value on error 1917 * Configure the Tx VSI for operation. 1918 */ 1919 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 1920 { 1921 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); 1922 } 1923 1924 /** 1925 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI 1926 * @vsi: the VSI being configured 1927 * 1928 * Return 0 on success and a negative value on error 1929 * Configure the Tx queues dedicated for XDP in given VSI for operation. 1930 */ 1931 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) 1932 { 1933 int ret; 1934 int i; 1935 1936 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); 1937 if (ret) 1938 return ret; 1939 1940 ice_for_each_rxq(vsi, i) 1941 ice_tx_xsk_pool(vsi, i); 1942 1943 return 0; 1944 } 1945 1946 /** 1947 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1948 * @intrl: interrupt rate limit in usecs 1949 * @gran: interrupt rate limit granularity in usecs 1950 * 1951 * This function converts a decimal interrupt rate limit in usecs to the format 1952 * expected by firmware. 1953 */ 1954 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 1955 { 1956 u32 val = intrl / gran; 1957 1958 if (val) 1959 return val | GLINT_RATE_INTRL_ENA_M; 1960 return 0; 1961 } 1962 1963 /** 1964 * ice_write_intrl - write throttle rate limit to interrupt specific register 1965 * @q_vector: pointer to interrupt specific structure 1966 * @intrl: throttle rate limit in microseconds to write 1967 */ 1968 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) 1969 { 1970 struct ice_hw *hw = &q_vector->vsi->back->hw; 1971 1972 wr32(hw, GLINT_RATE(q_vector->reg_idx), 1973 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); 1974 } 1975 1976 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) 1977 { 1978 switch (rc->type) { 1979 case ICE_RX_CONTAINER: 1980 if (rc->rx_ring) 1981 return rc->rx_ring->q_vector; 1982 break; 1983 case ICE_TX_CONTAINER: 1984 if (rc->tx_ring) 1985 return rc->tx_ring->q_vector; 1986 break; 1987 default: 1988 break; 1989 } 1990 1991 return NULL; 1992 } 1993 1994 /** 1995 * __ice_write_itr - write throttle rate to register 1996 * @q_vector: pointer to interrupt data structure 1997 * @rc: pointer to ring container 1998 * @itr: throttle rate in microseconds to write 1999 */ 2000 static void __ice_write_itr(struct ice_q_vector *q_vector, 2001 struct ice_ring_container *rc, u16 itr) 2002 { 2003 struct ice_hw *hw = &q_vector->vsi->back->hw; 2004 2005 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 2006 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); 2007 } 2008 2009 /** 2010 * ice_write_itr - write throttle rate to queue specific register 2011 * @rc: pointer to ring container 2012 * @itr: throttle rate in microseconds to write 2013 */ 2014 void ice_write_itr(struct ice_ring_container *rc, u16 itr) 2015 { 2016 struct ice_q_vector *q_vector; 2017 2018 q_vector = ice_pull_qvec_from_rc(rc); 2019 if (!q_vector) 2020 return; 2021 2022 __ice_write_itr(q_vector, rc, itr); 2023 } 2024 2025 /** 2026 * ice_set_q_vector_intrl - set up interrupt rate limiting 2027 * @q_vector: the vector to be configured 2028 * 2029 * Interrupt rate limiting is local to the vector, not per-queue so we must 2030 * detect if either ring container has dynamic moderation enabled to decide 2031 * what to set the interrupt rate limit to via INTRL settings. In the case that 2032 * dynamic moderation is disabled on both, write the value with the cached 2033 * setting to make sure INTRL register matches the user visible value. 2034 */ 2035 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) 2036 { 2037 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { 2038 /* in the case of dynamic enabled, cap each vector to no more 2039 * than (4 us) 250,000 ints/sec, which allows low latency 2040 * but still less than 500,000 interrupts per second, which 2041 * reduces CPU a bit in the case of the lowest latency 2042 * setting. The 4 here is a value in microseconds. 2043 */ 2044 ice_write_intrl(q_vector, 4); 2045 } else { 2046 ice_write_intrl(q_vector, q_vector->intrl); 2047 } 2048 } 2049 2050 /** 2051 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 2052 * @vsi: the VSI being configured 2053 * 2054 * This configures MSIX mode interrupts for the PF VSI, and should not be used 2055 * for the VF VSI. 2056 */ 2057 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 2058 { 2059 struct ice_pf *pf = vsi->back; 2060 struct ice_hw *hw = &pf->hw; 2061 u16 txq = 0, rxq = 0; 2062 int i, q; 2063 2064 ice_for_each_q_vector(vsi, i) { 2065 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2066 u16 reg_idx = q_vector->reg_idx; 2067 2068 ice_cfg_itr(hw, q_vector); 2069 2070 /* Both Transmit Queue Interrupt Cause Control register 2071 * and Receive Queue Interrupt Cause control register 2072 * expects MSIX_INDX field to be the vector index 2073 * within the function space and not the absolute 2074 * vector index across PF or across device. 2075 * For SR-IOV VF VSIs queue vector index always starts 2076 * with 1 since first vector index(0) is used for OICR 2077 * in VF space. Since VMDq and other PF VSIs are within 2078 * the PF function space, use the vector index that is 2079 * tracked for this PF. 2080 */ 2081 for (q = 0; q < q_vector->num_ring_tx; q++) { 2082 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 2083 q_vector->tx.itr_idx); 2084 txq++; 2085 } 2086 2087 for (q = 0; q < q_vector->num_ring_rx; q++) { 2088 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 2089 q_vector->rx.itr_idx); 2090 rxq++; 2091 } 2092 } 2093 } 2094 2095 /** 2096 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings 2097 * @vsi: the VSI whose rings are to be enabled 2098 * 2099 * Returns 0 on success and a negative value on error 2100 */ 2101 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) 2102 { 2103 return ice_vsi_ctrl_all_rx_rings(vsi, true); 2104 } 2105 2106 /** 2107 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings 2108 * @vsi: the VSI whose rings are to be disabled 2109 * 2110 * Returns 0 on success and a negative value on error 2111 */ 2112 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) 2113 { 2114 return ice_vsi_ctrl_all_rx_rings(vsi, false); 2115 } 2116 2117 /** 2118 * ice_vsi_stop_tx_rings - Disable Tx rings 2119 * @vsi: the VSI being configured 2120 * @rst_src: reset source 2121 * @rel_vmvf_num: Relative ID of VF/VM 2122 * @rings: Tx ring array to be stopped 2123 * @count: number of Tx ring array elements 2124 */ 2125 static int 2126 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2127 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) 2128 { 2129 u16 q_idx; 2130 2131 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2132 return -EINVAL; 2133 2134 for (q_idx = 0; q_idx < count; q_idx++) { 2135 struct ice_txq_meta txq_meta = { }; 2136 int status; 2137 2138 if (!rings || !rings[q_idx]) 2139 return -EINVAL; 2140 2141 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); 2142 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, 2143 rings[q_idx], &txq_meta); 2144 2145 if (status) 2146 return status; 2147 } 2148 2149 return 0; 2150 } 2151 2152 /** 2153 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2154 * @vsi: the VSI being configured 2155 * @rst_src: reset source 2156 * @rel_vmvf_num: Relative ID of VF/VM 2157 */ 2158 int 2159 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2160 u16 rel_vmvf_num) 2161 { 2162 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); 2163 } 2164 2165 /** 2166 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings 2167 * @vsi: the VSI being configured 2168 */ 2169 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) 2170 { 2171 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); 2172 } 2173 2174 /** 2175 * ice_vsi_is_rx_queue_active 2176 * @vsi: the VSI being configured 2177 * 2178 * Return true if at least one queue is active. 2179 */ 2180 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) 2181 { 2182 struct ice_pf *pf = vsi->back; 2183 struct ice_hw *hw = &pf->hw; 2184 int i; 2185 2186 ice_for_each_rxq(vsi, i) { 2187 u32 rx_reg; 2188 int pf_q; 2189 2190 pf_q = vsi->rxq_map[i]; 2191 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 2192 if (rx_reg & QRX_CTRL_QENA_STAT_M) 2193 return true; 2194 } 2195 2196 return false; 2197 } 2198 2199 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2200 { 2201 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { 2202 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; 2203 vsi->tc_cfg.numtc = 1; 2204 return; 2205 } 2206 2207 /* set VSI TC information based on DCB config */ 2208 ice_vsi_set_dcb_tc_cfg(vsi); 2209 } 2210 2211 /** 2212 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2213 * @vsi: the VSI being configured 2214 * @tx: bool to determine Tx or Rx rule 2215 * @create: bool to determine create or remove Rule 2216 */ 2217 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2218 { 2219 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, 2220 enum ice_sw_fwd_act_type act); 2221 struct ice_pf *pf = vsi->back; 2222 struct device *dev; 2223 int status; 2224 2225 dev = ice_pf_to_dev(pf); 2226 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; 2227 2228 if (tx) { 2229 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, 2230 ICE_DROP_PACKET); 2231 } else { 2232 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { 2233 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, 2234 create); 2235 } else { 2236 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, 2237 ICE_FWD_TO_VSI); 2238 } 2239 } 2240 2241 if (status) 2242 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", 2243 create ? "adding" : "removing", tx ? "TX" : "RX", 2244 vsi->vsi_num, status); 2245 } 2246 2247 /** 2248 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it 2249 * @vsi: pointer to the VSI 2250 * 2251 * This function will allocate new scheduler aggregator now if needed and will 2252 * move specified VSI into it. 2253 */ 2254 static void ice_set_agg_vsi(struct ice_vsi *vsi) 2255 { 2256 struct device *dev = ice_pf_to_dev(vsi->back); 2257 struct ice_agg_node *agg_node_iter = NULL; 2258 u32 agg_id = ICE_INVALID_AGG_NODE_ID; 2259 struct ice_agg_node *agg_node = NULL; 2260 int node_offset, max_agg_nodes = 0; 2261 struct ice_port_info *port_info; 2262 struct ice_pf *pf = vsi->back; 2263 u32 agg_node_id_start = 0; 2264 int status; 2265 2266 /* create (as needed) scheduler aggregator node and move VSI into 2267 * corresponding aggregator node 2268 * - PF aggregator node to contains VSIs of type _PF and _CTRL 2269 * - VF aggregator nodes will contain VF VSI 2270 */ 2271 port_info = pf->hw.port_info; 2272 if (!port_info) 2273 return; 2274 2275 switch (vsi->type) { 2276 case ICE_VSI_CTRL: 2277 case ICE_VSI_CHNL: 2278 case ICE_VSI_LB: 2279 case ICE_VSI_PF: 2280 case ICE_VSI_SWITCHDEV_CTRL: 2281 max_agg_nodes = ICE_MAX_PF_AGG_NODES; 2282 agg_node_id_start = ICE_PF_AGG_NODE_ID_START; 2283 agg_node_iter = &pf->pf_agg_node[0]; 2284 break; 2285 case ICE_VSI_VF: 2286 /* user can create 'n' VFs on a given PF, but since max children 2287 * per aggregator node can be only 64. Following code handles 2288 * aggregator(s) for VF VSIs, either selects a agg_node which 2289 * was already created provided num_vsis < 64, otherwise 2290 * select next available node, which will be created 2291 */ 2292 max_agg_nodes = ICE_MAX_VF_AGG_NODES; 2293 agg_node_id_start = ICE_VF_AGG_NODE_ID_START; 2294 agg_node_iter = &pf->vf_agg_node[0]; 2295 break; 2296 default: 2297 /* other VSI type, handle later if needed */ 2298 dev_dbg(dev, "unexpected VSI type %s\n", 2299 ice_vsi_type_str(vsi->type)); 2300 return; 2301 } 2302 2303 /* find the appropriate aggregator node */ 2304 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { 2305 /* see if we can find space in previously created 2306 * node if num_vsis < 64, otherwise skip 2307 */ 2308 if (agg_node_iter->num_vsis && 2309 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 2310 agg_node_iter++; 2311 continue; 2312 } 2313 2314 if (agg_node_iter->valid && 2315 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { 2316 agg_id = agg_node_iter->agg_id; 2317 agg_node = agg_node_iter; 2318 break; 2319 } 2320 2321 /* find unclaimed agg_id */ 2322 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { 2323 agg_id = node_offset + agg_node_id_start; 2324 agg_node = agg_node_iter; 2325 break; 2326 } 2327 /* move to next agg_node */ 2328 agg_node_iter++; 2329 } 2330 2331 if (!agg_node) 2332 return; 2333 2334 /* if selected aggregator node was not created, create it */ 2335 if (!agg_node->valid) { 2336 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG, 2337 (u8)vsi->tc_cfg.ena_tc); 2338 if (status) { 2339 dev_err(dev, "unable to create aggregator node with agg_id %u\n", 2340 agg_id); 2341 return; 2342 } 2343 /* aggregator node is created, store the needed info */ 2344 agg_node->valid = true; 2345 agg_node->agg_id = agg_id; 2346 } 2347 2348 /* move VSI to corresponding aggregator node */ 2349 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, 2350 (u8)vsi->tc_cfg.ena_tc); 2351 if (status) { 2352 dev_err(dev, "unable to move VSI idx %u into aggregator %u node", 2353 vsi->idx, agg_id); 2354 return; 2355 } 2356 2357 /* keep active children count for aggregator node */ 2358 agg_node->num_vsis++; 2359 2360 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved 2361 * to aggregator node 2362 */ 2363 vsi->agg_node = agg_node; 2364 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n", 2365 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, 2366 vsi->agg_node->num_vsis); 2367 } 2368 2369 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) 2370 { 2371 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2372 struct device *dev = ice_pf_to_dev(pf); 2373 int ret, i; 2374 2375 /* configure VSI nodes based on number of queues and TC's */ 2376 ice_for_each_traffic_class(i) { 2377 if (!(vsi->tc_cfg.ena_tc & BIT(i))) 2378 continue; 2379 2380 if (vsi->type == ICE_VSI_CHNL) { 2381 if (!vsi->alloc_txq && vsi->num_txq) 2382 max_txqs[i] = vsi->num_txq; 2383 else 2384 max_txqs[i] = pf->num_lan_tx; 2385 } else { 2386 max_txqs[i] = vsi->alloc_txq; 2387 } 2388 } 2389 2390 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); 2391 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2392 max_txqs); 2393 if (ret) { 2394 dev_err(dev, "VSI %d failed lan queue config, error %d\n", 2395 vsi->vsi_num, ret); 2396 return ret; 2397 } 2398 2399 return 0; 2400 } 2401 2402 /** 2403 * ice_vsi_cfg_def - configure default VSI based on the type 2404 * @vsi: pointer to VSI 2405 * @params: the parameters to configure this VSI with 2406 */ 2407 static int 2408 ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) 2409 { 2410 struct device *dev = ice_pf_to_dev(vsi->back); 2411 struct ice_pf *pf = vsi->back; 2412 int ret; 2413 2414 vsi->vsw = pf->first_sw; 2415 2416 ret = ice_vsi_alloc_def(vsi, params->ch); 2417 if (ret) 2418 return ret; 2419 2420 /* allocate memory for Tx/Rx ring stat pointers */ 2421 ret = ice_vsi_alloc_stat_arrays(vsi); 2422 if (ret) 2423 goto unroll_vsi_alloc; 2424 2425 ice_alloc_fd_res(vsi); 2426 2427 ret = ice_vsi_get_qs(vsi); 2428 if (ret) { 2429 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2430 vsi->idx); 2431 goto unroll_vsi_alloc_stat; 2432 } 2433 2434 /* set RSS capabilities */ 2435 ice_vsi_set_rss_params(vsi); 2436 2437 /* set TC configuration */ 2438 ice_vsi_set_tc_cfg(vsi); 2439 2440 /* create the VSI */ 2441 ret = ice_vsi_init(vsi, params->flags); 2442 if (ret) 2443 goto unroll_get_qs; 2444 2445 ice_vsi_init_vlan_ops(vsi); 2446 2447 switch (vsi->type) { 2448 case ICE_VSI_CTRL: 2449 case ICE_VSI_SWITCHDEV_CTRL: 2450 case ICE_VSI_PF: 2451 ret = ice_vsi_alloc_q_vectors(vsi); 2452 if (ret) 2453 goto unroll_vsi_init; 2454 2455 ret = ice_vsi_alloc_rings(vsi); 2456 if (ret) 2457 goto unroll_vector_base; 2458 2459 ret = ice_vsi_alloc_ring_stats(vsi); 2460 if (ret) 2461 goto unroll_vector_base; 2462 2463 ice_vsi_map_rings_to_vectors(vsi); 2464 vsi->stat_offsets_loaded = false; 2465 2466 if (ice_is_xdp_ena_vsi(vsi)) { 2467 ret = ice_vsi_determine_xdp_res(vsi); 2468 if (ret) 2469 goto unroll_vector_base; 2470 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); 2471 if (ret) 2472 goto unroll_vector_base; 2473 } 2474 2475 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2476 if (vsi->type != ICE_VSI_CTRL) 2477 /* Do not exit if configuring RSS had an issue, at 2478 * least receive traffic on first queue. Hence no 2479 * need to capture return value 2480 */ 2481 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2482 ice_vsi_cfg_rss_lut_key(vsi); 2483 ice_vsi_set_rss_flow_fld(vsi); 2484 } 2485 ice_init_arfs(vsi); 2486 break; 2487 case ICE_VSI_CHNL: 2488 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2489 ice_vsi_cfg_rss_lut_key(vsi); 2490 ice_vsi_set_rss_flow_fld(vsi); 2491 } 2492 break; 2493 case ICE_VSI_VF: 2494 /* VF driver will take care of creating netdev for this type and 2495 * map queues to vectors through Virtchnl, PF driver only 2496 * creates a VSI and corresponding structures for bookkeeping 2497 * purpose 2498 */ 2499 ret = ice_vsi_alloc_q_vectors(vsi); 2500 if (ret) 2501 goto unroll_vsi_init; 2502 2503 ret = ice_vsi_alloc_rings(vsi); 2504 if (ret) 2505 goto unroll_alloc_q_vector; 2506 2507 ret = ice_vsi_alloc_ring_stats(vsi); 2508 if (ret) 2509 goto unroll_vector_base; 2510 2511 vsi->stat_offsets_loaded = false; 2512 2513 /* Do not exit if configuring RSS had an issue, at least 2514 * receive traffic on first queue. Hence no need to capture 2515 * return value 2516 */ 2517 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2518 ice_vsi_cfg_rss_lut_key(vsi); 2519 ice_vsi_set_vf_rss_flow_fld(vsi); 2520 } 2521 break; 2522 case ICE_VSI_LB: 2523 ret = ice_vsi_alloc_rings(vsi); 2524 if (ret) 2525 goto unroll_vsi_init; 2526 2527 ret = ice_vsi_alloc_ring_stats(vsi); 2528 if (ret) 2529 goto unroll_vector_base; 2530 2531 break; 2532 default: 2533 /* clean up the resources and exit */ 2534 ret = -EINVAL; 2535 goto unroll_vsi_init; 2536 } 2537 2538 return 0; 2539 2540 unroll_vector_base: 2541 /* reclaim SW interrupts back to the common pool */ 2542 unroll_alloc_q_vector: 2543 ice_vsi_free_q_vectors(vsi); 2544 unroll_vsi_init: 2545 ice_vsi_delete_from_hw(vsi); 2546 unroll_get_qs: 2547 ice_vsi_put_qs(vsi); 2548 unroll_vsi_alloc_stat: 2549 ice_vsi_free_stats(vsi); 2550 unroll_vsi_alloc: 2551 ice_vsi_free_arrays(vsi); 2552 return ret; 2553 } 2554 2555 /** 2556 * ice_vsi_cfg - configure a previously allocated VSI 2557 * @vsi: pointer to VSI 2558 * @params: parameters used to configure this VSI 2559 */ 2560 int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) 2561 { 2562 struct ice_pf *pf = vsi->back; 2563 int ret; 2564 2565 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) 2566 return -EINVAL; 2567 2568 vsi->type = params->type; 2569 vsi->port_info = params->pi; 2570 2571 /* For VSIs which don't have a connected VF, this will be NULL */ 2572 vsi->vf = params->vf; 2573 2574 ret = ice_vsi_cfg_def(vsi, params); 2575 if (ret) 2576 return ret; 2577 2578 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); 2579 if (ret) 2580 ice_vsi_decfg(vsi); 2581 2582 if (vsi->type == ICE_VSI_CTRL) { 2583 if (vsi->vf) { 2584 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); 2585 vsi->vf->ctrl_vsi_idx = vsi->idx; 2586 } else { 2587 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); 2588 pf->ctrl_vsi_idx = vsi->idx; 2589 } 2590 } 2591 2592 return ret; 2593 } 2594 2595 /** 2596 * ice_vsi_decfg - remove all VSI configuration 2597 * @vsi: pointer to VSI 2598 */ 2599 void ice_vsi_decfg(struct ice_vsi *vsi) 2600 { 2601 struct ice_pf *pf = vsi->back; 2602 int err; 2603 2604 /* The Rx rule will only exist to remove if the LLDP FW 2605 * engine is currently stopped 2606 */ 2607 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && 2608 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 2609 ice_cfg_sw_lldp(vsi, false, false); 2610 2611 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2612 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); 2613 if (err) 2614 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", 2615 vsi->vsi_num, err); 2616 2617 if (ice_is_xdp_ena_vsi(vsi)) 2618 /* return value check can be skipped here, it always returns 2619 * 0 if reset is in progress 2620 */ 2621 ice_destroy_xdp_rings(vsi); 2622 2623 ice_vsi_clear_rings(vsi); 2624 ice_vsi_free_q_vectors(vsi); 2625 ice_vsi_put_qs(vsi); 2626 ice_vsi_free_arrays(vsi); 2627 2628 /* SR-IOV determines needed MSIX resources all at once instead of per 2629 * VSI since when VFs are spawned we know how many VFs there are and how 2630 * many interrupts each VF needs. SR-IOV MSIX resources are also 2631 * cleared in the same manner. 2632 */ 2633 2634 if (vsi->type == ICE_VSI_VF && 2635 vsi->agg_node && vsi->agg_node->valid) 2636 vsi->agg_node->num_vsis--; 2637 if (vsi->agg_node) { 2638 vsi->agg_node->valid = false; 2639 vsi->agg_node->agg_id = 0; 2640 } 2641 } 2642 2643 /** 2644 * ice_vsi_setup - Set up a VSI by a given type 2645 * @pf: board private structure 2646 * @params: parameters to use when creating the VSI 2647 * 2648 * This allocates the sw VSI structure and its queue resources. 2649 * 2650 * Returns pointer to the successfully allocated and configured VSI sw struct on 2651 * success, NULL on failure. 2652 */ 2653 struct ice_vsi * 2654 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) 2655 { 2656 struct device *dev = ice_pf_to_dev(pf); 2657 struct ice_vsi *vsi; 2658 int ret; 2659 2660 /* ice_vsi_setup can only initialize a new VSI, and we must have 2661 * a port_info structure for it. 2662 */ 2663 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || 2664 WARN_ON(!params->pi)) 2665 return NULL; 2666 2667 vsi = ice_vsi_alloc(pf); 2668 if (!vsi) { 2669 dev_err(dev, "could not allocate VSI\n"); 2670 return NULL; 2671 } 2672 2673 ret = ice_vsi_cfg(vsi, params); 2674 if (ret) 2675 goto err_vsi_cfg; 2676 2677 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2678 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2679 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2680 * The rule is added once for PF VSI in order to create appropriate 2681 * recipe, since VSI/VSI list is ignored with drop action... 2682 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to 2683 * be dropped so that VFs cannot send LLDP packets to reconfig DCB 2684 * settings in the HW. 2685 */ 2686 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { 2687 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2688 ICE_DROP_PACKET); 2689 ice_cfg_sw_lldp(vsi, true, true); 2690 } 2691 2692 if (!vsi->agg_node) 2693 ice_set_agg_vsi(vsi); 2694 2695 return vsi; 2696 2697 err_vsi_cfg: 2698 ice_vsi_free(vsi); 2699 2700 return NULL; 2701 } 2702 2703 /** 2704 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 2705 * @vsi: the VSI being cleaned up 2706 */ 2707 static void ice_vsi_release_msix(struct ice_vsi *vsi) 2708 { 2709 struct ice_pf *pf = vsi->back; 2710 struct ice_hw *hw = &pf->hw; 2711 u32 txq = 0; 2712 u32 rxq = 0; 2713 int i, q; 2714 2715 ice_for_each_q_vector(vsi, i) { 2716 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2717 2718 ice_write_intrl(q_vector, 0); 2719 for (q = 0; q < q_vector->num_ring_tx; q++) { 2720 ice_write_itr(&q_vector->tx, 0); 2721 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2722 if (ice_is_xdp_ena_vsi(vsi)) { 2723 u32 xdp_txq = txq + vsi->num_xdp_txq; 2724 2725 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); 2726 } 2727 txq++; 2728 } 2729 2730 for (q = 0; q < q_vector->num_ring_rx; q++) { 2731 ice_write_itr(&q_vector->rx, 0); 2732 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 2733 rxq++; 2734 } 2735 } 2736 2737 ice_flush(hw); 2738 } 2739 2740 /** 2741 * ice_vsi_free_irq - Free the IRQ association with the OS 2742 * @vsi: the VSI being configured 2743 */ 2744 void ice_vsi_free_irq(struct ice_vsi *vsi) 2745 { 2746 struct ice_pf *pf = vsi->back; 2747 int i; 2748 2749 if (!vsi->q_vectors || !vsi->irqs_ready) 2750 return; 2751 2752 ice_vsi_release_msix(vsi); 2753 if (vsi->type == ICE_VSI_VF) 2754 return; 2755 2756 vsi->irqs_ready = false; 2757 ice_free_cpu_rx_rmap(vsi); 2758 2759 ice_for_each_q_vector(vsi, i) { 2760 int irq_num; 2761 2762 irq_num = vsi->q_vectors[i]->irq.virq; 2763 2764 /* free only the irqs that were actually requested */ 2765 if (!vsi->q_vectors[i] || 2766 !(vsi->q_vectors[i]->num_ring_tx || 2767 vsi->q_vectors[i]->num_ring_rx)) 2768 continue; 2769 2770 /* clear the affinity notifier in the IRQ descriptor */ 2771 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2772 irq_set_affinity_notifier(irq_num, NULL); 2773 2774 /* clear the affinity_mask in the IRQ descriptor */ 2775 irq_set_affinity_hint(irq_num, NULL); 2776 synchronize_irq(irq_num); 2777 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); 2778 } 2779 } 2780 2781 /** 2782 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 2783 * @vsi: the VSI having resources freed 2784 */ 2785 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 2786 { 2787 int i; 2788 2789 if (!vsi->tx_rings) 2790 return; 2791 2792 ice_for_each_txq(vsi, i) 2793 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2794 ice_free_tx_ring(vsi->tx_rings[i]); 2795 } 2796 2797 /** 2798 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 2799 * @vsi: the VSI having resources freed 2800 */ 2801 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 2802 { 2803 int i; 2804 2805 if (!vsi->rx_rings) 2806 return; 2807 2808 ice_for_each_rxq(vsi, i) 2809 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2810 ice_free_rx_ring(vsi->rx_rings[i]); 2811 } 2812 2813 /** 2814 * ice_vsi_close - Shut down a VSI 2815 * @vsi: the VSI being shut down 2816 */ 2817 void ice_vsi_close(struct ice_vsi *vsi) 2818 { 2819 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 2820 ice_down(vsi); 2821 2822 ice_vsi_free_irq(vsi); 2823 ice_vsi_free_tx_rings(vsi); 2824 ice_vsi_free_rx_rings(vsi); 2825 } 2826 2827 /** 2828 * ice_ena_vsi - resume a VSI 2829 * @vsi: the VSI being resume 2830 * @locked: is the rtnl_lock already held 2831 */ 2832 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) 2833 { 2834 int err = 0; 2835 2836 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) 2837 return 0; 2838 2839 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2840 2841 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 2842 if (netif_running(vsi->netdev)) { 2843 if (!locked) 2844 rtnl_lock(); 2845 2846 err = ice_open_internal(vsi->netdev); 2847 2848 if (!locked) 2849 rtnl_unlock(); 2850 } 2851 } else if (vsi->type == ICE_VSI_CTRL) { 2852 err = ice_vsi_open_ctrl(vsi); 2853 } 2854 2855 return err; 2856 } 2857 2858 /** 2859 * ice_dis_vsi - pause a VSI 2860 * @vsi: the VSI being paused 2861 * @locked: is the rtnl_lock already held 2862 */ 2863 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 2864 { 2865 if (test_bit(ICE_VSI_DOWN, vsi->state)) 2866 return; 2867 2868 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 2869 2870 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 2871 if (netif_running(vsi->netdev)) { 2872 if (!locked) 2873 rtnl_lock(); 2874 2875 ice_vsi_close(vsi); 2876 2877 if (!locked) 2878 rtnl_unlock(); 2879 } else { 2880 ice_vsi_close(vsi); 2881 } 2882 } else if (vsi->type == ICE_VSI_CTRL || 2883 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 2884 ice_vsi_close(vsi); 2885 } 2886 } 2887 2888 /** 2889 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 2890 * @vsi: the VSI being un-configured 2891 */ 2892 void ice_vsi_dis_irq(struct ice_vsi *vsi) 2893 { 2894 struct ice_pf *pf = vsi->back; 2895 struct ice_hw *hw = &pf->hw; 2896 u32 val; 2897 int i; 2898 2899 /* disable interrupt causation from each queue */ 2900 if (vsi->tx_rings) { 2901 ice_for_each_txq(vsi, i) { 2902 if (vsi->tx_rings[i]) { 2903 u16 reg; 2904 2905 reg = vsi->tx_rings[i]->reg_idx; 2906 val = rd32(hw, QINT_TQCTL(reg)); 2907 val &= ~QINT_TQCTL_CAUSE_ENA_M; 2908 wr32(hw, QINT_TQCTL(reg), val); 2909 } 2910 } 2911 } 2912 2913 if (vsi->rx_rings) { 2914 ice_for_each_rxq(vsi, i) { 2915 if (vsi->rx_rings[i]) { 2916 u16 reg; 2917 2918 reg = vsi->rx_rings[i]->reg_idx; 2919 val = rd32(hw, QINT_RQCTL(reg)); 2920 val &= ~QINT_RQCTL_CAUSE_ENA_M; 2921 wr32(hw, QINT_RQCTL(reg), val); 2922 } 2923 } 2924 } 2925 2926 /* disable each interrupt */ 2927 ice_for_each_q_vector(vsi, i) { 2928 if (!vsi->q_vectors[i]) 2929 continue; 2930 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 2931 } 2932 2933 ice_flush(hw); 2934 2935 /* don't call synchronize_irq() for VF's from the host */ 2936 if (vsi->type == ICE_VSI_VF) 2937 return; 2938 2939 ice_for_each_q_vector(vsi, i) 2940 synchronize_irq(vsi->q_vectors[i]->irq.virq); 2941 } 2942 2943 /** 2944 * ice_vsi_release - Delete a VSI and free its resources 2945 * @vsi: the VSI being removed 2946 * 2947 * Returns 0 on success or < 0 on error 2948 */ 2949 int ice_vsi_release(struct ice_vsi *vsi) 2950 { 2951 struct ice_pf *pf; 2952 2953 if (!vsi->back) 2954 return -ENODEV; 2955 pf = vsi->back; 2956 2957 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2958 ice_rss_clean(vsi); 2959 2960 ice_vsi_close(vsi); 2961 ice_vsi_decfg(vsi); 2962 2963 /* retain SW VSI data structure since it is needed to unregister and 2964 * free VSI netdev when PF is not in reset recovery pending state,\ 2965 * for ex: during rmmod. 2966 */ 2967 if (!ice_is_reset_in_progress(pf->state)) 2968 ice_vsi_delete(vsi); 2969 2970 return 0; 2971 } 2972 2973 /** 2974 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors 2975 * @vsi: VSI connected with q_vectors 2976 * @coalesce: array of struct with stored coalesce 2977 * 2978 * Returns array size. 2979 */ 2980 static int 2981 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, 2982 struct ice_coalesce_stored *coalesce) 2983 { 2984 int i; 2985 2986 ice_for_each_q_vector(vsi, i) { 2987 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2988 2989 coalesce[i].itr_tx = q_vector->tx.itr_settings; 2990 coalesce[i].itr_rx = q_vector->rx.itr_settings; 2991 coalesce[i].intrl = q_vector->intrl; 2992 2993 if (i < vsi->num_txq) 2994 coalesce[i].tx_valid = true; 2995 if (i < vsi->num_rxq) 2996 coalesce[i].rx_valid = true; 2997 } 2998 2999 return vsi->num_q_vectors; 3000 } 3001 3002 /** 3003 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays 3004 * @vsi: VSI connected with q_vectors 3005 * @coalesce: pointer to array of struct with stored coalesce 3006 * @size: size of coalesce array 3007 * 3008 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save 3009 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce 3010 * to default value. 3011 */ 3012 static void 3013 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, 3014 struct ice_coalesce_stored *coalesce, int size) 3015 { 3016 struct ice_ring_container *rc; 3017 int i; 3018 3019 if ((size && !coalesce) || !vsi) 3020 return; 3021 3022 /* There are a couple of cases that have to be handled here: 3023 * 1. The case where the number of queue vectors stays the same, but 3024 * the number of Tx or Rx rings changes (the first for loop) 3025 * 2. The case where the number of queue vectors increased (the 3026 * second for loop) 3027 */ 3028 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { 3029 /* There are 2 cases to handle here and they are the same for 3030 * both Tx and Rx: 3031 * if the entry was valid previously (coalesce[i].[tr]x_valid 3032 * and the loop variable is less than the number of rings 3033 * allocated, then write the previous values 3034 * 3035 * if the entry was not valid previously, but the number of 3036 * rings is less than are allocated (this means the number of 3037 * rings increased from previously), then write out the 3038 * values in the first element 3039 * 3040 * Also, always write the ITR, even if in ITR_IS_DYNAMIC 3041 * as there is no harm because the dynamic algorithm 3042 * will just overwrite. 3043 */ 3044 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { 3045 rc = &vsi->q_vectors[i]->rx; 3046 rc->itr_settings = coalesce[i].itr_rx; 3047 ice_write_itr(rc, rc->itr_setting); 3048 } else if (i < vsi->alloc_rxq) { 3049 rc = &vsi->q_vectors[i]->rx; 3050 rc->itr_settings = coalesce[0].itr_rx; 3051 ice_write_itr(rc, rc->itr_setting); 3052 } 3053 3054 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { 3055 rc = &vsi->q_vectors[i]->tx; 3056 rc->itr_settings = coalesce[i].itr_tx; 3057 ice_write_itr(rc, rc->itr_setting); 3058 } else if (i < vsi->alloc_txq) { 3059 rc = &vsi->q_vectors[i]->tx; 3060 rc->itr_settings = coalesce[0].itr_tx; 3061 ice_write_itr(rc, rc->itr_setting); 3062 } 3063 3064 vsi->q_vectors[i]->intrl = coalesce[i].intrl; 3065 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3066 } 3067 3068 /* the number of queue vectors increased so write whatever is in 3069 * the first element 3070 */ 3071 for (; i < vsi->num_q_vectors; i++) { 3072 /* transmit */ 3073 rc = &vsi->q_vectors[i]->tx; 3074 rc->itr_settings = coalesce[0].itr_tx; 3075 ice_write_itr(rc, rc->itr_setting); 3076 3077 /* receive */ 3078 rc = &vsi->q_vectors[i]->rx; 3079 rc->itr_settings = coalesce[0].itr_rx; 3080 ice_write_itr(rc, rc->itr_setting); 3081 3082 vsi->q_vectors[i]->intrl = coalesce[0].intrl; 3083 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3084 } 3085 } 3086 3087 /** 3088 * ice_vsi_realloc_stat_arrays - Frees unused stat structures 3089 * @vsi: VSI pointer 3090 * @prev_txq: Number of Tx rings before ring reallocation 3091 * @prev_rxq: Number of Rx rings before ring reallocation 3092 */ 3093 static void 3094 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) 3095 { 3096 struct ice_vsi_stats *vsi_stat; 3097 struct ice_pf *pf = vsi->back; 3098 int i; 3099 3100 if (!prev_txq || !prev_rxq) 3101 return; 3102 if (vsi->type == ICE_VSI_CHNL) 3103 return; 3104 3105 vsi_stat = pf->vsi_stats[vsi->idx]; 3106 3107 if (vsi->num_txq < prev_txq) { 3108 for (i = vsi->num_txq; i < prev_txq; i++) { 3109 if (vsi_stat->tx_ring_stats[i]) { 3110 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 3111 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 3112 } 3113 } 3114 } 3115 3116 if (vsi->num_rxq < prev_rxq) { 3117 for (i = vsi->num_rxq; i < prev_rxq; i++) { 3118 if (vsi_stat->rx_ring_stats[i]) { 3119 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 3120 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 3121 } 3122 } 3123 } 3124 } 3125 3126 /** 3127 * ice_vsi_rebuild - Rebuild VSI after reset 3128 * @vsi: VSI to be rebuild 3129 * @vsi_flags: flags used for VSI rebuild flow 3130 * 3131 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or 3132 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware. 3133 * 3134 * Returns 0 on success and negative value on failure 3135 */ 3136 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) 3137 { 3138 struct ice_vsi_cfg_params params = {}; 3139 struct ice_coalesce_stored *coalesce; 3140 int ret, prev_txq, prev_rxq; 3141 int prev_num_q_vectors = 0; 3142 struct ice_pf *pf; 3143 3144 if (!vsi) 3145 return -EINVAL; 3146 3147 params = ice_vsi_to_params(vsi); 3148 params.flags = vsi_flags; 3149 3150 pf = vsi->back; 3151 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) 3152 return -EINVAL; 3153 3154 coalesce = kcalloc(vsi->num_q_vectors, 3155 sizeof(struct ice_coalesce_stored), GFP_KERNEL); 3156 if (!coalesce) 3157 return -ENOMEM; 3158 3159 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); 3160 3161 prev_txq = vsi->num_txq; 3162 prev_rxq = vsi->num_rxq; 3163 3164 ice_vsi_decfg(vsi); 3165 ret = ice_vsi_cfg_def(vsi, ¶ms); 3166 if (ret) 3167 goto err_vsi_cfg; 3168 3169 ret = ice_vsi_cfg_tc_lan(pf, vsi); 3170 if (ret) { 3171 if (vsi_flags & ICE_VSI_FLAG_INIT) { 3172 ret = -EIO; 3173 goto err_vsi_cfg_tc_lan; 3174 } 3175 3176 kfree(coalesce); 3177 return ice_schedule_reset(pf, ICE_RESET_PFR); 3178 } 3179 3180 ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); 3181 3182 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); 3183 kfree(coalesce); 3184 3185 return 0; 3186 3187 err_vsi_cfg_tc_lan: 3188 ice_vsi_decfg(vsi); 3189 err_vsi_cfg: 3190 kfree(coalesce); 3191 return ret; 3192 } 3193 3194 /** 3195 * ice_is_reset_in_progress - check for a reset in progress 3196 * @state: PF state field 3197 */ 3198 bool ice_is_reset_in_progress(unsigned long *state) 3199 { 3200 return test_bit(ICE_RESET_OICR_RECV, state) || 3201 test_bit(ICE_PFR_REQ, state) || 3202 test_bit(ICE_CORER_REQ, state) || 3203 test_bit(ICE_GLOBR_REQ, state); 3204 } 3205 3206 /** 3207 * ice_wait_for_reset - Wait for driver to finish reset and rebuild 3208 * @pf: pointer to the PF structure 3209 * @timeout: length of time to wait, in jiffies 3210 * 3211 * Wait (sleep) for a short time until the driver finishes cleaning up from 3212 * a device reset. The caller must be able to sleep. Use this to delay 3213 * operations that could fail while the driver is cleaning up after a device 3214 * reset. 3215 * 3216 * Returns 0 on success, -EBUSY if the reset is not finished within the 3217 * timeout, and -ERESTARTSYS if the thread was interrupted. 3218 */ 3219 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) 3220 { 3221 long ret; 3222 3223 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, 3224 !ice_is_reset_in_progress(pf->state), 3225 timeout); 3226 if (ret < 0) 3227 return ret; 3228 else if (!ret) 3229 return -EBUSY; 3230 else 3231 return 0; 3232 } 3233 3234 /** 3235 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3236 * @vsi: VSI being configured 3237 * @ctx: the context buffer returned from AQ VSI update command 3238 */ 3239 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3240 { 3241 vsi->info.mapping_flags = ctx->info.mapping_flags; 3242 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3243 sizeof(vsi->info.q_mapping)); 3244 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3245 sizeof(vsi->info.tc_mapping)); 3246 } 3247 3248 /** 3249 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 3250 * @vsi: the VSI being configured 3251 * @ena_tc: TC map to be enabled 3252 */ 3253 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 3254 { 3255 struct net_device *netdev = vsi->netdev; 3256 struct ice_pf *pf = vsi->back; 3257 int numtc = vsi->tc_cfg.numtc; 3258 struct ice_dcbx_cfg *dcbcfg; 3259 u8 netdev_tc; 3260 int i; 3261 3262 if (!netdev) 3263 return; 3264 3265 /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */ 3266 if (vsi->type == ICE_VSI_CHNL) 3267 return; 3268 3269 if (!ena_tc) { 3270 netdev_reset_tc(netdev); 3271 return; 3272 } 3273 3274 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) 3275 numtc = vsi->all_numtc; 3276 3277 if (netdev_set_num_tc(netdev, numtc)) 3278 return; 3279 3280 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 3281 3282 ice_for_each_traffic_class(i) 3283 if (vsi->tc_cfg.ena_tc & BIT(i)) 3284 netdev_set_tc_queue(netdev, 3285 vsi->tc_cfg.tc_info[i].netdev_tc, 3286 vsi->tc_cfg.tc_info[i].qcount_tx, 3287 vsi->tc_cfg.tc_info[i].qoffset); 3288 /* setup TC queue map for CHNL TCs */ 3289 ice_for_each_chnl_tc(i) { 3290 if (!(vsi->all_enatc & BIT(i))) 3291 break; 3292 if (!vsi->mqprio_qopt.qopt.count[i]) 3293 break; 3294 netdev_set_tc_queue(netdev, i, 3295 vsi->mqprio_qopt.qopt.count[i], 3296 vsi->mqprio_qopt.qopt.offset[i]); 3297 } 3298 3299 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3300 return; 3301 3302 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 3303 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 3304 3305 /* Get the mapped netdev TC# for the UP */ 3306 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 3307 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3308 } 3309 } 3310 3311 /** 3312 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config 3313 * @vsi: the VSI being configured, 3314 * @ctxt: VSI context structure 3315 * @ena_tc: number of traffic classes to enable 3316 * 3317 * Prepares VSI tc_config to have queue configurations based on MQPRIO options. 3318 */ 3319 static int 3320 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, 3321 u8 ena_tc) 3322 { 3323 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; 3324 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; 3325 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; 3326 u16 new_txq, new_rxq; 3327 u8 netdev_tc = 0; 3328 int i; 3329 3330 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; 3331 3332 pow = order_base_2(tc0_qcount); 3333 qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 3334 ICE_AQ_VSI_TC_Q_OFFSET_M) | 3335 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); 3336 3337 ice_for_each_traffic_class(i) { 3338 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 3339 /* TC is not enabled */ 3340 vsi->tc_cfg.tc_info[i].qoffset = 0; 3341 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 3342 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 3343 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 3344 ctxt->info.tc_mapping[i] = 0; 3345 continue; 3346 } 3347 3348 offset = vsi->mqprio_qopt.qopt.offset[i]; 3349 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3350 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3351 vsi->tc_cfg.tc_info[i].qoffset = offset; 3352 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 3353 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; 3354 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 3355 } 3356 3357 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { 3358 ice_for_each_chnl_tc(i) { 3359 if (!(vsi->all_enatc & BIT(i))) 3360 continue; 3361 offset = vsi->mqprio_qopt.qopt.offset[i]; 3362 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3363 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3364 } 3365 } 3366 3367 new_txq = offset + qcount_tx; 3368 if (new_txq > vsi->alloc_txq) { 3369 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 3370 new_txq, vsi->alloc_txq); 3371 return -EINVAL; 3372 } 3373 3374 new_rxq = offset + qcount_rx; 3375 if (new_rxq > vsi->alloc_rxq) { 3376 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 3377 new_rxq, vsi->alloc_rxq); 3378 return -EINVAL; 3379 } 3380 3381 /* Set actual Tx/Rx queue pairs */ 3382 vsi->num_txq = new_txq; 3383 vsi->num_rxq = new_rxq; 3384 3385 /* Setup queue TC[0].qmap for given VSI context */ 3386 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 3387 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 3388 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); 3389 3390 /* Find queue count available for channel VSIs and starting offset 3391 * for channel VSIs 3392 */ 3393 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { 3394 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; 3395 vsi->next_base_q = tc0_qcount; 3396 } 3397 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); 3398 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); 3399 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", 3400 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); 3401 3402 return 0; 3403 } 3404 3405 /** 3406 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3407 * @vsi: VSI to be configured 3408 * @ena_tc: TC bitmap 3409 * 3410 * VSI queues expected to be quiesced before calling this function 3411 */ 3412 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3413 { 3414 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3415 struct ice_pf *pf = vsi->back; 3416 struct ice_tc_cfg old_tc_cfg; 3417 struct ice_vsi_ctx *ctx; 3418 struct device *dev; 3419 int i, ret = 0; 3420 u8 num_tc = 0; 3421 3422 dev = ice_pf_to_dev(pf); 3423 if (vsi->tc_cfg.ena_tc == ena_tc && 3424 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) 3425 return 0; 3426 3427 ice_for_each_traffic_class(i) { 3428 /* build bitmap of enabled TCs */ 3429 if (ena_tc & BIT(i)) 3430 num_tc++; 3431 /* populate max_txqs per TC */ 3432 max_txqs[i] = vsi->alloc_txq; 3433 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are 3434 * zero for CHNL VSI, hence use num_txq instead as max_txqs 3435 */ 3436 if (vsi->type == ICE_VSI_CHNL && 3437 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3438 max_txqs[i] = vsi->num_txq; 3439 } 3440 3441 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); 3442 vsi->tc_cfg.ena_tc = ena_tc; 3443 vsi->tc_cfg.numtc = num_tc; 3444 3445 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3446 if (!ctx) 3447 return -ENOMEM; 3448 3449 ctx->vf_num = 0; 3450 ctx->info = vsi->info; 3451 3452 if (vsi->type == ICE_VSI_PF && 3453 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3454 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); 3455 else 3456 ret = ice_vsi_setup_q_map(vsi, ctx); 3457 3458 if (ret) { 3459 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); 3460 goto out; 3461 } 3462 3463 /* must to indicate which section of VSI context are being modified */ 3464 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3465 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3466 if (ret) { 3467 dev_info(dev, "Failed VSI Update\n"); 3468 goto out; 3469 } 3470 3471 if (vsi->type == ICE_VSI_PF && 3472 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3473 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); 3474 else 3475 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 3476 vsi->tc_cfg.ena_tc, max_txqs); 3477 3478 if (ret) { 3479 dev_err(dev, "VSI %d failed TC config, error %d\n", 3480 vsi->vsi_num, ret); 3481 goto out; 3482 } 3483 ice_vsi_update_q_map(vsi, ctx); 3484 vsi->info.valid_sections = 0; 3485 3486 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3487 out: 3488 kfree(ctx); 3489 return ret; 3490 } 3491 3492 /** 3493 * ice_update_ring_stats - Update ring statistics 3494 * @stats: stats to be updated 3495 * @pkts: number of processed packets 3496 * @bytes: number of processed bytes 3497 * 3498 * This function assumes that caller has acquired a u64_stats_sync lock. 3499 */ 3500 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes) 3501 { 3502 stats->bytes += bytes; 3503 stats->pkts += pkts; 3504 } 3505 3506 /** 3507 * ice_update_tx_ring_stats - Update Tx ring specific counters 3508 * @tx_ring: ring to update 3509 * @pkts: number of processed packets 3510 * @bytes: number of processed bytes 3511 */ 3512 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes) 3513 { 3514 u64_stats_update_begin(&tx_ring->ring_stats->syncp); 3515 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); 3516 u64_stats_update_end(&tx_ring->ring_stats->syncp); 3517 } 3518 3519 /** 3520 * ice_update_rx_ring_stats - Update Rx ring specific counters 3521 * @rx_ring: ring to update 3522 * @pkts: number of processed packets 3523 * @bytes: number of processed bytes 3524 */ 3525 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) 3526 { 3527 u64_stats_update_begin(&rx_ring->ring_stats->syncp); 3528 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); 3529 u64_stats_update_end(&rx_ring->ring_stats->syncp); 3530 } 3531 3532 /** 3533 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used 3534 * @pi: port info of the switch with default VSI 3535 * 3536 * Return true if the there is a single VSI in default forwarding VSI list 3537 */ 3538 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi) 3539 { 3540 bool exists = false; 3541 3542 ice_check_if_dflt_vsi(pi, 0, &exists); 3543 return exists; 3544 } 3545 3546 /** 3547 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI 3548 * @vsi: VSI to compare against default forwarding VSI 3549 * 3550 * If this VSI passed in is the default forwarding VSI then return true, else 3551 * return false 3552 */ 3553 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) 3554 { 3555 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); 3556 } 3557 3558 /** 3559 * ice_set_dflt_vsi - set the default forwarding VSI 3560 * @vsi: VSI getting set as the default forwarding VSI on the switch 3561 * 3562 * If the VSI passed in is already the default VSI and it's enabled just return 3563 * success. 3564 * 3565 * Otherwise try to set the VSI passed in as the switch's default VSI and 3566 * return the result. 3567 */ 3568 int ice_set_dflt_vsi(struct ice_vsi *vsi) 3569 { 3570 struct device *dev; 3571 int status; 3572 3573 if (!vsi) 3574 return -EINVAL; 3575 3576 dev = ice_pf_to_dev(vsi->back); 3577 3578 /* the VSI passed in is already the default VSI */ 3579 if (ice_is_vsi_dflt_vsi(vsi)) { 3580 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", 3581 vsi->vsi_num); 3582 return 0; 3583 } 3584 3585 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); 3586 if (status) { 3587 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", 3588 vsi->vsi_num, status); 3589 return status; 3590 } 3591 3592 return 0; 3593 } 3594 3595 /** 3596 * ice_clear_dflt_vsi - clear the default forwarding VSI 3597 * @vsi: VSI to remove from filter list 3598 * 3599 * If the switch has no default VSI or it's not enabled then return error. 3600 * 3601 * Otherwise try to clear the default VSI and return the result. 3602 */ 3603 int ice_clear_dflt_vsi(struct ice_vsi *vsi) 3604 { 3605 struct device *dev; 3606 int status; 3607 3608 if (!vsi) 3609 return -EINVAL; 3610 3611 dev = ice_pf_to_dev(vsi->back); 3612 3613 /* there is no default VSI configured */ 3614 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) 3615 return -ENODEV; 3616 3617 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, 3618 ICE_FLTR_RX); 3619 if (status) { 3620 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", 3621 vsi->vsi_num, status); 3622 return -EIO; 3623 } 3624 3625 return 0; 3626 } 3627 3628 /** 3629 * ice_get_link_speed_mbps - get link speed in Mbps 3630 * @vsi: the VSI whose link speed is being queried 3631 * 3632 * Return current VSI link speed and 0 if the speed is unknown. 3633 */ 3634 int ice_get_link_speed_mbps(struct ice_vsi *vsi) 3635 { 3636 unsigned int link_speed; 3637 3638 link_speed = vsi->port_info->phy.link_info.link_speed; 3639 3640 return (int)ice_get_link_speed(fls(link_speed) - 1); 3641 } 3642 3643 /** 3644 * ice_get_link_speed_kbps - get link speed in Kbps 3645 * @vsi: the VSI whose link speed is being queried 3646 * 3647 * Return current VSI link speed and 0 if the speed is unknown. 3648 */ 3649 int ice_get_link_speed_kbps(struct ice_vsi *vsi) 3650 { 3651 int speed_mbps; 3652 3653 speed_mbps = ice_get_link_speed_mbps(vsi); 3654 3655 return speed_mbps * 1000; 3656 } 3657 3658 /** 3659 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate 3660 * @vsi: VSI to be configured 3661 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit 3662 * 3663 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit 3664 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI 3665 * on TC 0. 3666 */ 3667 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) 3668 { 3669 struct ice_pf *pf = vsi->back; 3670 struct device *dev; 3671 int status; 3672 int speed; 3673 3674 dev = ice_pf_to_dev(pf); 3675 if (!vsi->port_info) { 3676 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 3677 vsi->idx, vsi->type); 3678 return -EINVAL; 3679 } 3680 3681 speed = ice_get_link_speed_kbps(vsi); 3682 if (min_tx_rate > (u64)speed) { 3683 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 3684 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 3685 speed); 3686 return -EINVAL; 3687 } 3688 3689 /* Configure min BW for VSI limit */ 3690 if (min_tx_rate) { 3691 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 3692 ICE_MIN_BW, min_tx_rate); 3693 if (status) { 3694 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", 3695 min_tx_rate, ice_vsi_type_str(vsi->type), 3696 vsi->idx); 3697 return status; 3698 } 3699 3700 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", 3701 min_tx_rate, ice_vsi_type_str(vsi->type)); 3702 } else { 3703 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 3704 vsi->idx, 0, 3705 ICE_MIN_BW); 3706 if (status) { 3707 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", 3708 ice_vsi_type_str(vsi->type), vsi->idx); 3709 return status; 3710 } 3711 3712 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", 3713 ice_vsi_type_str(vsi->type), vsi->idx); 3714 } 3715 3716 return 0; 3717 } 3718 3719 /** 3720 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate 3721 * @vsi: VSI to be configured 3722 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit 3723 * 3724 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit 3725 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI 3726 * on TC 0. 3727 */ 3728 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) 3729 { 3730 struct ice_pf *pf = vsi->back; 3731 struct device *dev; 3732 int status; 3733 int speed; 3734 3735 dev = ice_pf_to_dev(pf); 3736 if (!vsi->port_info) { 3737 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 3738 vsi->idx, vsi->type); 3739 return -EINVAL; 3740 } 3741 3742 speed = ice_get_link_speed_kbps(vsi); 3743 if (max_tx_rate > (u64)speed) { 3744 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 3745 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 3746 speed); 3747 return -EINVAL; 3748 } 3749 3750 /* Configure max BW for VSI limit */ 3751 if (max_tx_rate) { 3752 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 3753 ICE_MAX_BW, max_tx_rate); 3754 if (status) { 3755 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", 3756 max_tx_rate, ice_vsi_type_str(vsi->type), 3757 vsi->idx); 3758 return status; 3759 } 3760 3761 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", 3762 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); 3763 } else { 3764 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 3765 vsi->idx, 0, 3766 ICE_MAX_BW); 3767 if (status) { 3768 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", 3769 ice_vsi_type_str(vsi->type), vsi->idx); 3770 return status; 3771 } 3772 3773 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", 3774 ice_vsi_type_str(vsi->type), vsi->idx); 3775 } 3776 3777 return 0; 3778 } 3779 3780 /** 3781 * ice_set_link - turn on/off physical link 3782 * @vsi: VSI to modify physical link on 3783 * @ena: turn on/off physical link 3784 */ 3785 int ice_set_link(struct ice_vsi *vsi, bool ena) 3786 { 3787 struct device *dev = ice_pf_to_dev(vsi->back); 3788 struct ice_port_info *pi = vsi->port_info; 3789 struct ice_hw *hw = pi->hw; 3790 int status; 3791 3792 if (vsi->type != ICE_VSI_PF) 3793 return -EINVAL; 3794 3795 status = ice_aq_set_link_restart_an(pi, ena, NULL); 3796 3797 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE. 3798 * this is not a fatal error, so print a warning message and return 3799 * a success code. Return an error if FW returns an error code other 3800 * than ICE_AQ_RC_EMODE 3801 */ 3802 if (status == -EIO) { 3803 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3804 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", 3805 (ena ? "ON" : "OFF"), status, 3806 ice_aq_str(hw->adminq.sq_last_status)); 3807 } else if (status) { 3808 dev_err(dev, "can't set link to %s, err %d aq_err %s\n", 3809 (ena ? "ON" : "OFF"), status, 3810 ice_aq_str(hw->adminq.sq_last_status)); 3811 return status; 3812 } 3813 3814 return 0; 3815 } 3816 3817 /** 3818 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI 3819 * @vsi: VSI used to add VLAN filters 3820 * 3821 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based 3822 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't 3823 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via 3824 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. 3825 * 3826 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic 3827 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged 3828 * traffic in SVM, since the VLAN TPID isn't part of filtering. 3829 * 3830 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be 3831 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is 3832 * part of filtering. 3833 */ 3834 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) 3835 { 3836 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3837 struct ice_vlan vlan; 3838 int err; 3839 3840 vlan = ICE_VLAN(0, 0, 0); 3841 err = vlan_ops->add_vlan(vsi, &vlan); 3842 if (err && err != -EEXIST) 3843 return err; 3844 3845 /* in SVM both VLAN 0 filters are identical */ 3846 if (!ice_is_dvm_ena(&vsi->back->hw)) 3847 return 0; 3848 3849 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 3850 err = vlan_ops->add_vlan(vsi, &vlan); 3851 if (err && err != -EEXIST) 3852 return err; 3853 3854 return 0; 3855 } 3856 3857 /** 3858 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI 3859 * @vsi: VSI used to add VLAN filters 3860 * 3861 * Delete the VLAN 0 filters in the same manner that they were added in 3862 * ice_vsi_add_vlan_zero. 3863 */ 3864 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) 3865 { 3866 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3867 struct ice_vlan vlan; 3868 int err; 3869 3870 vlan = ICE_VLAN(0, 0, 0); 3871 err = vlan_ops->del_vlan(vsi, &vlan); 3872 if (err && err != -EEXIST) 3873 return err; 3874 3875 /* in SVM both VLAN 0 filters are identical */ 3876 if (!ice_is_dvm_ena(&vsi->back->hw)) 3877 return 0; 3878 3879 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 3880 err = vlan_ops->del_vlan(vsi, &vlan); 3881 if (err && err != -EEXIST) 3882 return err; 3883 3884 /* when deleting the last VLAN filter, make sure to disable the VLAN 3885 * promisc mode so the filter isn't left by accident 3886 */ 3887 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3888 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3889 } 3890 3891 /** 3892 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode 3893 * @vsi: VSI used to get the VLAN mode 3894 * 3895 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled 3896 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details. 3897 */ 3898 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) 3899 { 3900 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 3901 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 3902 /* no VLAN 0 filter is created when a port VLAN is active */ 3903 if (vsi->type == ICE_VSI_VF) { 3904 if (WARN_ON(!vsi->vf)) 3905 return 0; 3906 3907 if (ice_vf_is_port_vlan_ena(vsi->vf)) 3908 return 0; 3909 } 3910 3911 if (ice_is_dvm_ena(&vsi->back->hw)) 3912 return ICE_DVM_NUM_ZERO_VLAN_FLTRS; 3913 else 3914 return ICE_SVM_NUM_ZERO_VLAN_FLTRS; 3915 } 3916 3917 /** 3918 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs 3919 * @vsi: VSI used to determine if any non-zero VLANs have been added 3920 */ 3921 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) 3922 { 3923 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); 3924 } 3925 3926 /** 3927 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI 3928 * @vsi: VSI used to get the number of non-zero VLANs added 3929 */ 3930 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) 3931 { 3932 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); 3933 } 3934 3935 /** 3936 * ice_is_feature_supported 3937 * @pf: pointer to the struct ice_pf instance 3938 * @f: feature enum to be checked 3939 * 3940 * returns true if feature is supported, false otherwise 3941 */ 3942 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) 3943 { 3944 if (f < 0 || f >= ICE_F_MAX) 3945 return false; 3946 3947 return test_bit(f, pf->features); 3948 } 3949 3950 /** 3951 * ice_set_feature_support 3952 * @pf: pointer to the struct ice_pf instance 3953 * @f: feature enum to set 3954 */ 3955 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) 3956 { 3957 if (f < 0 || f >= ICE_F_MAX) 3958 return; 3959 3960 set_bit(f, pf->features); 3961 } 3962 3963 /** 3964 * ice_clear_feature_support 3965 * @pf: pointer to the struct ice_pf instance 3966 * @f: feature enum to clear 3967 */ 3968 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) 3969 { 3970 if (f < 0 || f >= ICE_F_MAX) 3971 return; 3972 3973 clear_bit(f, pf->features); 3974 } 3975 3976 /** 3977 * ice_init_feature_support 3978 * @pf: pointer to the struct ice_pf instance 3979 * 3980 * called during init to setup supported feature 3981 */ 3982 void ice_init_feature_support(struct ice_pf *pf) 3983 { 3984 switch (pf->hw.device_id) { 3985 case ICE_DEV_ID_E810C_BACKPLANE: 3986 case ICE_DEV_ID_E810C_QSFP: 3987 case ICE_DEV_ID_E810C_SFP: 3988 ice_set_feature_support(pf, ICE_F_DSCP); 3989 ice_set_feature_support(pf, ICE_F_PTP_EXTTS); 3990 if (ice_is_e810t(&pf->hw)) { 3991 ice_set_feature_support(pf, ICE_F_SMA_CTRL); 3992 if (ice_gnss_is_gps_present(&pf->hw)) 3993 ice_set_feature_support(pf, ICE_F_GNSS); 3994 } 3995 break; 3996 default: 3997 break; 3998 } 3999 } 4000 4001 /** 4002 * ice_vsi_update_security - update security block in VSI 4003 * @vsi: pointer to VSI structure 4004 * @fill: function pointer to fill ctx 4005 */ 4006 int 4007 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) 4008 { 4009 struct ice_vsi_ctx ctx = { 0 }; 4010 4011 ctx.info = vsi->info; 4012 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 4013 fill(&ctx); 4014 4015 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4016 return -ENODEV; 4017 4018 vsi->info = ctx.info; 4019 return 0; 4020 } 4021 4022 /** 4023 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx 4024 * @ctx: pointer to VSI ctx structure 4025 */ 4026 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) 4027 { 4028 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 4029 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4030 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4031 } 4032 4033 /** 4034 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx 4035 * @ctx: pointer to VSI ctx structure 4036 */ 4037 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) 4038 { 4039 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & 4040 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4041 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4042 } 4043 4044 /** 4045 * ice_vsi_ctx_set_allow_override - allow destination override on VSI 4046 * @ctx: pointer to VSI ctx structure 4047 */ 4048 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) 4049 { 4050 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4051 } 4052 4053 /** 4054 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI 4055 * @ctx: pointer to VSI ctx structure 4056 */ 4057 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) 4058 { 4059 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4060 } 4061 4062 /** 4063 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit 4064 * @vsi: pointer to VSI structure 4065 * @set: set or unset the bit 4066 */ 4067 int 4068 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) 4069 { 4070 struct ice_vsi_ctx ctx = { 4071 .info = vsi->info, 4072 }; 4073 4074 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 4075 if (set) 4076 ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4077 else 4078 ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4079 4080 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4081 return -ENODEV; 4082 4083 vsi->info = ctx.info; 4084 return 0; 4085 } 4086