1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_flow.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_devlink.h" 11 #include "ice_vsi_vlan_ops.h" 12 13 /** 14 * ice_vsi_type_str - maps VSI type enum to string equivalents 15 * @vsi_type: VSI type enum 16 */ 17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) 18 { 19 switch (vsi_type) { 20 case ICE_VSI_PF: 21 return "ICE_VSI_PF"; 22 case ICE_VSI_VF: 23 return "ICE_VSI_VF"; 24 case ICE_VSI_CTRL: 25 return "ICE_VSI_CTRL"; 26 case ICE_VSI_CHNL: 27 return "ICE_VSI_CHNL"; 28 case ICE_VSI_LB: 29 return "ICE_VSI_LB"; 30 case ICE_VSI_SWITCHDEV_CTRL: 31 return "ICE_VSI_SWITCHDEV_CTRL"; 32 default: 33 return "unknown"; 34 } 35 } 36 37 /** 38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings 39 * @vsi: the VSI being configured 40 * @ena: start or stop the Rx rings 41 * 42 * First enable/disable all of the Rx rings, flush any remaining writes, and 43 * then verify that they have all been enabled/disabled successfully. This will 44 * let all of the register writes complete when enabling/disabling the Rx rings 45 * before waiting for the change in hardware to complete. 46 */ 47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) 48 { 49 int ret = 0; 50 u16 i; 51 52 ice_for_each_rxq(vsi, i) 53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); 54 55 ice_flush(&vsi->back->hw); 56 57 ice_for_each_rxq(vsi, i) { 58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); 59 if (ret) 60 break; 61 } 62 63 return ret; 64 } 65 66 /** 67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI 68 * @vsi: VSI pointer 69 * 70 * On error: returns error code (negative) 71 * On success: returns 0 72 */ 73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) 74 { 75 struct ice_pf *pf = vsi->back; 76 struct device *dev; 77 78 dev = ice_pf_to_dev(pf); 79 if (vsi->type == ICE_VSI_CHNL) 80 return 0; 81 82 /* allocate memory for both Tx and Rx ring pointers */ 83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, 84 sizeof(*vsi->tx_rings), GFP_KERNEL); 85 if (!vsi->tx_rings) 86 return -ENOMEM; 87 88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, 89 sizeof(*vsi->rx_rings), GFP_KERNEL); 90 if (!vsi->rx_rings) 91 goto err_rings; 92 93 /* txq_map needs to have enough space to track both Tx (stack) rings 94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, 95 * so use num_possible_cpus() as we want to always provide XDP ring 96 * per CPU, regardless of queue count settings from user that might 97 * have come from ethtool's set_channels() callback; 98 */ 99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), 100 sizeof(*vsi->txq_map), GFP_KERNEL); 101 102 if (!vsi->txq_map) 103 goto err_txq_map; 104 105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, 106 sizeof(*vsi->rxq_map), GFP_KERNEL); 107 if (!vsi->rxq_map) 108 goto err_rxq_map; 109 110 /* There is no need to allocate q_vectors for a loopback VSI. */ 111 if (vsi->type == ICE_VSI_LB) 112 return 0; 113 114 /* allocate memory for q_vector pointers */ 115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, 116 sizeof(*vsi->q_vectors), GFP_KERNEL); 117 if (!vsi->q_vectors) 118 goto err_vectors; 119 120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); 121 if (!vsi->af_xdp_zc_qps) 122 goto err_zc_qps; 123 124 return 0; 125 126 err_zc_qps: 127 devm_kfree(dev, vsi->q_vectors); 128 err_vectors: 129 devm_kfree(dev, vsi->rxq_map); 130 err_rxq_map: 131 devm_kfree(dev, vsi->txq_map); 132 err_txq_map: 133 devm_kfree(dev, vsi->rx_rings); 134 err_rings: 135 devm_kfree(dev, vsi->tx_rings); 136 return -ENOMEM; 137 } 138 139 /** 140 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI 141 * @vsi: the VSI being configured 142 */ 143 static void ice_vsi_set_num_desc(struct ice_vsi *vsi) 144 { 145 switch (vsi->type) { 146 case ICE_VSI_PF: 147 case ICE_VSI_SWITCHDEV_CTRL: 148 case ICE_VSI_CTRL: 149 case ICE_VSI_LB: 150 /* a user could change the values of num_[tr]x_desc using 151 * ethtool -G so we should keep those values instead of 152 * overwriting them with the defaults. 153 */ 154 if (!vsi->num_rx_desc) 155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; 156 if (!vsi->num_tx_desc) 157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; 158 break; 159 default: 160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", 161 vsi->type); 162 break; 163 } 164 } 165 166 /** 167 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI 168 * @vsi: the VSI being configured 169 * 170 * Return 0 on success and a negative value on error 171 */ 172 static void ice_vsi_set_num_qs(struct ice_vsi *vsi) 173 { 174 enum ice_vsi_type vsi_type = vsi->type; 175 struct ice_pf *pf = vsi->back; 176 struct ice_vf *vf = vsi->vf; 177 178 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) 179 return; 180 181 switch (vsi_type) { 182 case ICE_VSI_PF: 183 if (vsi->req_txq) { 184 vsi->alloc_txq = vsi->req_txq; 185 vsi->num_txq = vsi->req_txq; 186 } else { 187 vsi->alloc_txq = min3(pf->num_lan_msix, 188 ice_get_avail_txq_count(pf), 189 (u16)num_online_cpus()); 190 } 191 192 pf->num_lan_tx = vsi->alloc_txq; 193 194 /* only 1 Rx queue unless RSS is enabled */ 195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 196 vsi->alloc_rxq = 1; 197 } else { 198 if (vsi->req_rxq) { 199 vsi->alloc_rxq = vsi->req_rxq; 200 vsi->num_rxq = vsi->req_rxq; 201 } else { 202 vsi->alloc_rxq = min3(pf->num_lan_msix, 203 ice_get_avail_rxq_count(pf), 204 (u16)num_online_cpus()); 205 } 206 } 207 208 pf->num_lan_rx = vsi->alloc_rxq; 209 210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, 211 max_t(int, vsi->alloc_rxq, 212 vsi->alloc_txq)); 213 break; 214 case ICE_VSI_SWITCHDEV_CTRL: 215 /* The number of queues for ctrl VSI is equal to number of VFs. 216 * Each ring is associated to the corresponding VF_PR netdev. 217 */ 218 vsi->alloc_txq = ice_get_num_vfs(pf); 219 vsi->alloc_rxq = vsi->alloc_txq; 220 vsi->num_q_vectors = 1; 221 break; 222 case ICE_VSI_VF: 223 if (vf->num_req_qs) 224 vf->num_vf_qs = vf->num_req_qs; 225 vsi->alloc_txq = vf->num_vf_qs; 226 vsi->alloc_rxq = vf->num_vf_qs; 227 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + 228 * data queue interrupts). Since vsi->num_q_vectors is number 229 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the 230 * original vector count 231 */ 232 vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; 233 break; 234 case ICE_VSI_CTRL: 235 vsi->alloc_txq = 1; 236 vsi->alloc_rxq = 1; 237 vsi->num_q_vectors = 1; 238 break; 239 case ICE_VSI_CHNL: 240 vsi->alloc_txq = 0; 241 vsi->alloc_rxq = 0; 242 break; 243 case ICE_VSI_LB: 244 vsi->alloc_txq = 1; 245 vsi->alloc_rxq = 1; 246 break; 247 default: 248 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); 249 break; 250 } 251 252 ice_vsi_set_num_desc(vsi); 253 } 254 255 /** 256 * ice_get_free_slot - get the next non-NULL location index in array 257 * @array: array to search 258 * @size: size of the array 259 * @curr: last known occupied index to be used as a search hint 260 * 261 * void * is being used to keep the functionality generic. This lets us use this 262 * function on any array of pointers. 263 */ 264 static int ice_get_free_slot(void *array, int size, int curr) 265 { 266 int **tmp_array = (int **)array; 267 int next; 268 269 if (curr < (size - 1) && !tmp_array[curr + 1]) { 270 next = curr + 1; 271 } else { 272 int i = 0; 273 274 while ((i < size) && (tmp_array[i])) 275 i++; 276 if (i == size) 277 next = ICE_NO_VSI; 278 else 279 next = i; 280 } 281 return next; 282 } 283 284 /** 285 * ice_vsi_delete_from_hw - delete a VSI from the switch 286 * @vsi: pointer to VSI being removed 287 */ 288 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) 289 { 290 struct ice_pf *pf = vsi->back; 291 struct ice_vsi_ctx *ctxt; 292 int status; 293 294 ice_fltr_remove_all(vsi); 295 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 296 if (!ctxt) 297 return; 298 299 if (vsi->type == ICE_VSI_VF) 300 ctxt->vf_num = vsi->vf->vf_id; 301 ctxt->vsi_num = vsi->vsi_num; 302 303 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 304 305 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 306 if (status) 307 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", 308 vsi->vsi_num, status); 309 310 kfree(ctxt); 311 } 312 313 /** 314 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI 315 * @vsi: pointer to VSI being cleared 316 */ 317 static void ice_vsi_free_arrays(struct ice_vsi *vsi) 318 { 319 struct ice_pf *pf = vsi->back; 320 struct device *dev; 321 322 dev = ice_pf_to_dev(pf); 323 324 if (vsi->af_xdp_zc_qps) { 325 bitmap_free(vsi->af_xdp_zc_qps); 326 vsi->af_xdp_zc_qps = NULL; 327 } 328 /* free the ring and vector containers */ 329 if (vsi->q_vectors) { 330 devm_kfree(dev, vsi->q_vectors); 331 vsi->q_vectors = NULL; 332 } 333 if (vsi->tx_rings) { 334 devm_kfree(dev, vsi->tx_rings); 335 vsi->tx_rings = NULL; 336 } 337 if (vsi->rx_rings) { 338 devm_kfree(dev, vsi->rx_rings); 339 vsi->rx_rings = NULL; 340 } 341 if (vsi->txq_map) { 342 devm_kfree(dev, vsi->txq_map); 343 vsi->txq_map = NULL; 344 } 345 if (vsi->rxq_map) { 346 devm_kfree(dev, vsi->rxq_map); 347 vsi->rxq_map = NULL; 348 } 349 } 350 351 /** 352 * ice_vsi_free_stats - Free the ring statistics structures 353 * @vsi: VSI pointer 354 */ 355 static void ice_vsi_free_stats(struct ice_vsi *vsi) 356 { 357 struct ice_vsi_stats *vsi_stat; 358 struct ice_pf *pf = vsi->back; 359 int i; 360 361 if (vsi->type == ICE_VSI_CHNL) 362 return; 363 if (!pf->vsi_stats) 364 return; 365 366 vsi_stat = pf->vsi_stats[vsi->idx]; 367 if (!vsi_stat) 368 return; 369 370 ice_for_each_alloc_txq(vsi, i) { 371 if (vsi_stat->tx_ring_stats[i]) { 372 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 373 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 374 } 375 } 376 377 ice_for_each_alloc_rxq(vsi, i) { 378 if (vsi_stat->rx_ring_stats[i]) { 379 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 380 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 381 } 382 } 383 384 kfree(vsi_stat->tx_ring_stats); 385 kfree(vsi_stat->rx_ring_stats); 386 kfree(vsi_stat); 387 pf->vsi_stats[vsi->idx] = NULL; 388 } 389 390 /** 391 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI 392 * @vsi: VSI which is having stats allocated 393 */ 394 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) 395 { 396 struct ice_ring_stats **tx_ring_stats; 397 struct ice_ring_stats **rx_ring_stats; 398 struct ice_vsi_stats *vsi_stats; 399 struct ice_pf *pf = vsi->back; 400 u16 i; 401 402 vsi_stats = pf->vsi_stats[vsi->idx]; 403 tx_ring_stats = vsi_stats->tx_ring_stats; 404 rx_ring_stats = vsi_stats->rx_ring_stats; 405 406 /* Allocate Tx ring stats */ 407 ice_for_each_alloc_txq(vsi, i) { 408 struct ice_ring_stats *ring_stats; 409 struct ice_tx_ring *ring; 410 411 ring = vsi->tx_rings[i]; 412 ring_stats = tx_ring_stats[i]; 413 414 if (!ring_stats) { 415 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 416 if (!ring_stats) 417 goto err_out; 418 419 WRITE_ONCE(tx_ring_stats[i], ring_stats); 420 } 421 422 ring->ring_stats = ring_stats; 423 } 424 425 /* Allocate Rx ring stats */ 426 ice_for_each_alloc_rxq(vsi, i) { 427 struct ice_ring_stats *ring_stats; 428 struct ice_rx_ring *ring; 429 430 ring = vsi->rx_rings[i]; 431 ring_stats = rx_ring_stats[i]; 432 433 if (!ring_stats) { 434 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 435 if (!ring_stats) 436 goto err_out; 437 438 WRITE_ONCE(rx_ring_stats[i], ring_stats); 439 } 440 441 ring->ring_stats = ring_stats; 442 } 443 444 return 0; 445 446 err_out: 447 ice_vsi_free_stats(vsi); 448 return -ENOMEM; 449 } 450 451 /** 452 * ice_vsi_free - clean up and deallocate the provided VSI 453 * @vsi: pointer to VSI being cleared 454 * 455 * This deallocates the VSI's queue resources, removes it from the PF's 456 * VSI array if necessary, and deallocates the VSI 457 */ 458 static void ice_vsi_free(struct ice_vsi *vsi) 459 { 460 struct ice_pf *pf = NULL; 461 struct device *dev; 462 463 if (!vsi || !vsi->back) 464 return; 465 466 pf = vsi->back; 467 dev = ice_pf_to_dev(pf); 468 469 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { 470 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); 471 return; 472 } 473 474 mutex_lock(&pf->sw_mutex); 475 /* updates the PF for this cleared VSI */ 476 477 pf->vsi[vsi->idx] = NULL; 478 pf->next_vsi = vsi->idx; 479 480 ice_vsi_free_stats(vsi); 481 ice_vsi_free_arrays(vsi); 482 mutex_unlock(&pf->sw_mutex); 483 devm_kfree(dev, vsi); 484 } 485 486 void ice_vsi_delete(struct ice_vsi *vsi) 487 { 488 ice_vsi_delete_from_hw(vsi); 489 ice_vsi_free(vsi); 490 } 491 492 /** 493 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI 494 * @irq: interrupt number 495 * @data: pointer to a q_vector 496 */ 497 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) 498 { 499 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 500 501 if (!q_vector->tx.tx_ring) 502 return IRQ_HANDLED; 503 504 #define FDIR_RX_DESC_CLEAN_BUDGET 64 505 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); 506 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); 507 508 return IRQ_HANDLED; 509 } 510 511 /** 512 * ice_msix_clean_rings - MSIX mode Interrupt Handler 513 * @irq: interrupt number 514 * @data: pointer to a q_vector 515 */ 516 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) 517 { 518 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 519 520 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 521 return IRQ_HANDLED; 522 523 q_vector->total_events++; 524 525 napi_schedule(&q_vector->napi); 526 527 return IRQ_HANDLED; 528 } 529 530 static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) 531 { 532 struct ice_q_vector *q_vector = (struct ice_q_vector *)data; 533 struct ice_pf *pf = q_vector->vsi->back; 534 struct ice_vf *vf; 535 unsigned int bkt; 536 537 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) 538 return IRQ_HANDLED; 539 540 rcu_read_lock(); 541 ice_for_each_vf_rcu(pf, bkt, vf) 542 napi_schedule(&vf->repr->q_vector->napi); 543 rcu_read_unlock(); 544 545 return IRQ_HANDLED; 546 } 547 548 /** 549 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays 550 * @vsi: VSI pointer 551 */ 552 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) 553 { 554 struct ice_vsi_stats *vsi_stat; 555 struct ice_pf *pf = vsi->back; 556 557 if (vsi->type == ICE_VSI_CHNL) 558 return 0; 559 if (!pf->vsi_stats) 560 return -ENOENT; 561 562 if (pf->vsi_stats[vsi->idx]) 563 /* realloc will happen in rebuild path */ 564 return 0; 565 566 vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); 567 if (!vsi_stat) 568 return -ENOMEM; 569 570 vsi_stat->tx_ring_stats = 571 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), 572 GFP_KERNEL); 573 if (!vsi_stat->tx_ring_stats) 574 goto err_alloc_tx; 575 576 vsi_stat->rx_ring_stats = 577 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), 578 GFP_KERNEL); 579 if (!vsi_stat->rx_ring_stats) 580 goto err_alloc_rx; 581 582 pf->vsi_stats[vsi->idx] = vsi_stat; 583 584 return 0; 585 586 err_alloc_rx: 587 kfree(vsi_stat->rx_ring_stats); 588 err_alloc_tx: 589 kfree(vsi_stat->tx_ring_stats); 590 kfree(vsi_stat); 591 pf->vsi_stats[vsi->idx] = NULL; 592 return -ENOMEM; 593 } 594 595 /** 596 * ice_vsi_alloc_def - set default values for already allocated VSI 597 * @vsi: ptr to VSI 598 * @ch: ptr to channel 599 */ 600 static int 601 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) 602 { 603 if (vsi->type != ICE_VSI_CHNL) { 604 ice_vsi_set_num_qs(vsi); 605 if (ice_vsi_alloc_arrays(vsi)) 606 return -ENOMEM; 607 } 608 609 switch (vsi->type) { 610 case ICE_VSI_SWITCHDEV_CTRL: 611 /* Setup eswitch MSIX irq handler for VSI */ 612 vsi->irq_handler = ice_eswitch_msix_clean_rings; 613 break; 614 case ICE_VSI_PF: 615 /* Setup default MSIX irq handler for VSI */ 616 vsi->irq_handler = ice_msix_clean_rings; 617 break; 618 case ICE_VSI_CTRL: 619 /* Setup ctrl VSI MSIX irq handler */ 620 vsi->irq_handler = ice_msix_clean_ctrl_vsi; 621 break; 622 case ICE_VSI_CHNL: 623 if (!ch) 624 return -EINVAL; 625 626 vsi->num_rxq = ch->num_rxq; 627 vsi->num_txq = ch->num_txq; 628 vsi->next_base_q = ch->base_q; 629 break; 630 case ICE_VSI_VF: 631 case ICE_VSI_LB: 632 break; 633 default: 634 ice_vsi_free_arrays(vsi); 635 return -EINVAL; 636 } 637 638 return 0; 639 } 640 641 /** 642 * ice_vsi_alloc - Allocates the next available struct VSI in the PF 643 * @pf: board private structure 644 * 645 * Reserves a VSI index from the PF and allocates an empty VSI structure 646 * without a type. The VSI structure must later be initialized by calling 647 * ice_vsi_cfg(). 648 * 649 * returns a pointer to a VSI on success, NULL on failure. 650 */ 651 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) 652 { 653 struct device *dev = ice_pf_to_dev(pf); 654 struct ice_vsi *vsi = NULL; 655 656 /* Need to protect the allocation of the VSIs at the PF level */ 657 mutex_lock(&pf->sw_mutex); 658 659 /* If we have already allocated our maximum number of VSIs, 660 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index 661 * is available to be populated 662 */ 663 if (pf->next_vsi == ICE_NO_VSI) { 664 dev_dbg(dev, "out of VSI slots!\n"); 665 goto unlock_pf; 666 } 667 668 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); 669 if (!vsi) 670 goto unlock_pf; 671 672 vsi->back = pf; 673 set_bit(ICE_VSI_DOWN, vsi->state); 674 675 /* fill slot and make note of the index */ 676 vsi->idx = pf->next_vsi; 677 pf->vsi[pf->next_vsi] = vsi; 678 679 /* prepare pf->next_vsi for next use */ 680 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, 681 pf->next_vsi); 682 683 unlock_pf: 684 mutex_unlock(&pf->sw_mutex); 685 return vsi; 686 } 687 688 /** 689 * ice_alloc_fd_res - Allocate FD resource for a VSI 690 * @vsi: pointer to the ice_vsi 691 * 692 * This allocates the FD resources 693 * 694 * Returns 0 on success, -EPERM on no-op or -EIO on failure 695 */ 696 static int ice_alloc_fd_res(struct ice_vsi *vsi) 697 { 698 struct ice_pf *pf = vsi->back; 699 u32 g_val, b_val; 700 701 /* Flow Director filters are only allocated/assigned to the PF VSI or 702 * CHNL VSI which passes the traffic. The CTRL VSI is only used to 703 * add/delete filters so resources are not allocated to it 704 */ 705 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 706 return -EPERM; 707 708 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || 709 vsi->type == ICE_VSI_CHNL)) 710 return -EPERM; 711 712 /* FD filters from guaranteed pool per VSI */ 713 g_val = pf->hw.func_caps.fd_fltr_guar; 714 if (!g_val) 715 return -EPERM; 716 717 /* FD filters from best effort pool */ 718 b_val = pf->hw.func_caps.fd_fltr_best_effort; 719 if (!b_val) 720 return -EPERM; 721 722 /* PF main VSI gets only 64 FD resources from guaranteed pool 723 * when ADQ is configured. 724 */ 725 #define ICE_PF_VSI_GFLTR 64 726 727 /* determine FD filter resources per VSI from shared(best effort) and 728 * dedicated pool 729 */ 730 if (vsi->type == ICE_VSI_PF) { 731 vsi->num_gfltr = g_val; 732 /* if MQPRIO is configured, main VSI doesn't get all FD 733 * resources from guaranteed pool. PF VSI gets 64 FD resources 734 */ 735 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 736 if (g_val < ICE_PF_VSI_GFLTR) 737 return -EPERM; 738 /* allow bare minimum entries for PF VSI */ 739 vsi->num_gfltr = ICE_PF_VSI_GFLTR; 740 } 741 742 /* each VSI gets same "best_effort" quota */ 743 vsi->num_bfltr = b_val; 744 } else if (vsi->type == ICE_VSI_VF) { 745 vsi->num_gfltr = 0; 746 747 /* each VSI gets same "best_effort" quota */ 748 vsi->num_bfltr = b_val; 749 } else { 750 struct ice_vsi *main_vsi; 751 int numtc; 752 753 main_vsi = ice_get_main_vsi(pf); 754 if (!main_vsi) 755 return -EPERM; 756 757 if (!main_vsi->all_numtc) 758 return -EINVAL; 759 760 /* figure out ADQ numtc */ 761 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; 762 763 /* only one TC but still asking resources for channels, 764 * invalid config 765 */ 766 if (numtc < ICE_CHNL_START_TC) 767 return -EPERM; 768 769 g_val -= ICE_PF_VSI_GFLTR; 770 /* channel VSIs gets equal share from guaranteed pool */ 771 vsi->num_gfltr = g_val / numtc; 772 773 /* each VSI gets same "best_effort" quota */ 774 vsi->num_bfltr = b_val; 775 } 776 777 return 0; 778 } 779 780 /** 781 * ice_vsi_get_qs - Assign queues from PF to VSI 782 * @vsi: the VSI to assign queues to 783 * 784 * Returns 0 on success and a negative value on error 785 */ 786 static int ice_vsi_get_qs(struct ice_vsi *vsi) 787 { 788 struct ice_pf *pf = vsi->back; 789 struct ice_qs_cfg tx_qs_cfg = { 790 .qs_mutex = &pf->avail_q_mutex, 791 .pf_map = pf->avail_txqs, 792 .pf_map_size = pf->max_pf_txqs, 793 .q_count = vsi->alloc_txq, 794 .scatter_count = ICE_MAX_SCATTER_TXQS, 795 .vsi_map = vsi->txq_map, 796 .vsi_map_offset = 0, 797 .mapping_mode = ICE_VSI_MAP_CONTIG 798 }; 799 struct ice_qs_cfg rx_qs_cfg = { 800 .qs_mutex = &pf->avail_q_mutex, 801 .pf_map = pf->avail_rxqs, 802 .pf_map_size = pf->max_pf_rxqs, 803 .q_count = vsi->alloc_rxq, 804 .scatter_count = ICE_MAX_SCATTER_RXQS, 805 .vsi_map = vsi->rxq_map, 806 .vsi_map_offset = 0, 807 .mapping_mode = ICE_VSI_MAP_CONTIG 808 }; 809 int ret; 810 811 if (vsi->type == ICE_VSI_CHNL) 812 return 0; 813 814 ret = __ice_vsi_get_qs(&tx_qs_cfg); 815 if (ret) 816 return ret; 817 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; 818 819 ret = __ice_vsi_get_qs(&rx_qs_cfg); 820 if (ret) 821 return ret; 822 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; 823 824 return 0; 825 } 826 827 /** 828 * ice_vsi_put_qs - Release queues from VSI to PF 829 * @vsi: the VSI that is going to release queues 830 */ 831 static void ice_vsi_put_qs(struct ice_vsi *vsi) 832 { 833 struct ice_pf *pf = vsi->back; 834 int i; 835 836 mutex_lock(&pf->avail_q_mutex); 837 838 ice_for_each_alloc_txq(vsi, i) { 839 clear_bit(vsi->txq_map[i], pf->avail_txqs); 840 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; 841 } 842 843 ice_for_each_alloc_rxq(vsi, i) { 844 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); 845 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; 846 } 847 848 mutex_unlock(&pf->avail_q_mutex); 849 } 850 851 /** 852 * ice_is_safe_mode 853 * @pf: pointer to the PF struct 854 * 855 * returns true if driver is in safe mode, false otherwise 856 */ 857 bool ice_is_safe_mode(struct ice_pf *pf) 858 { 859 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 860 } 861 862 /** 863 * ice_is_rdma_ena 864 * @pf: pointer to the PF struct 865 * 866 * returns true if RDMA is currently supported, false otherwise 867 */ 868 bool ice_is_rdma_ena(struct ice_pf *pf) 869 { 870 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); 871 } 872 873 /** 874 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration 875 * @vsi: the VSI being cleaned up 876 * 877 * This function deletes RSS input set for all flows that were configured 878 * for this VSI 879 */ 880 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) 881 { 882 struct ice_pf *pf = vsi->back; 883 int status; 884 885 if (ice_is_safe_mode(pf)) 886 return; 887 888 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); 889 if (status) 890 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", 891 vsi->vsi_num, status); 892 } 893 894 /** 895 * ice_rss_clean - Delete RSS related VSI structures and configuration 896 * @vsi: the VSI being removed 897 */ 898 static void ice_rss_clean(struct ice_vsi *vsi) 899 { 900 struct ice_pf *pf = vsi->back; 901 struct device *dev; 902 903 dev = ice_pf_to_dev(pf); 904 905 if (vsi->rss_hkey_user) 906 devm_kfree(dev, vsi->rss_hkey_user); 907 if (vsi->rss_lut_user) 908 devm_kfree(dev, vsi->rss_lut_user); 909 910 ice_vsi_clean_rss_flow_fld(vsi); 911 /* remove RSS replay list */ 912 if (!ice_is_safe_mode(pf)) 913 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); 914 } 915 916 /** 917 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 918 * @vsi: the VSI being configured 919 */ 920 static void ice_vsi_set_rss_params(struct ice_vsi *vsi) 921 { 922 struct ice_hw_common_caps *cap; 923 struct ice_pf *pf = vsi->back; 924 925 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 926 vsi->rss_size = 1; 927 return; 928 } 929 930 cap = &pf->hw.func_caps.common_cap; 931 switch (vsi->type) { 932 case ICE_VSI_CHNL: 933 case ICE_VSI_PF: 934 /* PF VSI will inherit RSS instance of PF */ 935 vsi->rss_table_size = (u16)cap->rss_table_size; 936 if (vsi->type == ICE_VSI_CHNL) 937 vsi->rss_size = min_t(u16, vsi->num_rxq, 938 BIT(cap->rss_table_entry_width)); 939 else 940 vsi->rss_size = min_t(u16, num_online_cpus(), 941 BIT(cap->rss_table_entry_width)); 942 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 943 break; 944 case ICE_VSI_SWITCHDEV_CTRL: 945 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 946 vsi->rss_size = min_t(u16, num_online_cpus(), 947 BIT(cap->rss_table_entry_width)); 948 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 949 break; 950 case ICE_VSI_VF: 951 /* VF VSI will get a small RSS table. 952 * For VSI_LUT, LUT size should be set to 64 bytes. 953 */ 954 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 955 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; 956 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 957 break; 958 case ICE_VSI_LB: 959 break; 960 default: 961 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", 962 ice_vsi_type_str(vsi->type)); 963 break; 964 } 965 } 966 967 /** 968 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI 969 * @hw: HW structure used to determine the VLAN mode of the device 970 * @ctxt: the VSI context being set 971 * 972 * This initializes a default VSI context for all sections except the Queues. 973 */ 974 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) 975 { 976 u32 table = 0; 977 978 memset(&ctxt->info, 0, sizeof(ctxt->info)); 979 /* VSI's should be allocated from shared pool */ 980 ctxt->alloc_from_pool = true; 981 /* Src pruning enabled by default */ 982 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 983 /* Traffic from VSI can be sent to LAN */ 984 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 985 /* allow all untagged/tagged packets by default on Tx */ 986 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & 987 ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> 988 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); 989 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which 990 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. 991 * 992 * DVM - leave inner VLAN in packet by default 993 */ 994 if (ice_is_dvm_ena(hw)) { 995 ctxt->info.inner_vlan_flags |= 996 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 997 ctxt->info.outer_vlan_flags = 998 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << 999 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & 1000 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; 1001 ctxt->info.outer_vlan_flags |= 1002 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << 1003 ICE_AQ_VSI_OUTER_TAG_TYPE_S) & 1004 ICE_AQ_VSI_OUTER_TAG_TYPE_M; 1005 ctxt->info.outer_vlan_flags |= 1006 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, 1007 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); 1008 } 1009 /* Have 1:1 UP mapping for both ingress/egress tables */ 1010 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 1011 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 1012 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 1013 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 1014 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 1015 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 1016 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 1017 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 1018 ctxt->info.ingress_table = cpu_to_le32(table); 1019 ctxt->info.egress_table = cpu_to_le32(table); 1020 /* Have 1:1 UP mapping for outer to inner UP table */ 1021 ctxt->info.outer_up_table = cpu_to_le32(table); 1022 /* No Outer tag support outer_tag_flags remains to zero */ 1023 } 1024 1025 /** 1026 * ice_vsi_setup_q_map - Setup a VSI queue map 1027 * @vsi: the VSI being configured 1028 * @ctxt: VSI context structure 1029 */ 1030 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1031 { 1032 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; 1033 u16 num_txq_per_tc, num_rxq_per_tc; 1034 u16 qcount_tx = vsi->alloc_txq; 1035 u16 qcount_rx = vsi->alloc_rxq; 1036 u8 netdev_tc = 0; 1037 int i; 1038 1039 if (!vsi->tc_cfg.numtc) { 1040 /* at least TC0 should be enabled by default */ 1041 vsi->tc_cfg.numtc = 1; 1042 vsi->tc_cfg.ena_tc = 1; 1043 } 1044 1045 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); 1046 if (!num_rxq_per_tc) 1047 num_rxq_per_tc = 1; 1048 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; 1049 if (!num_txq_per_tc) 1050 num_txq_per_tc = 1; 1051 1052 /* find the (rounded up) power-of-2 of qcount */ 1053 pow = (u16)order_base_2(num_rxq_per_tc); 1054 1055 /* TC mapping is a function of the number of Rx queues assigned to the 1056 * VSI for each traffic class and the offset of these queues. 1057 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of 1058 * queues allocated to TC0. No:of queues is a power-of-2. 1059 * 1060 * If TC is not enabled, the queue offset is set to 0, and allocate one 1061 * queue, this way, traffic for the given TC will be sent to the default 1062 * queue. 1063 * 1064 * Setup number and offset of Rx queues for all TCs for the VSI 1065 */ 1066 ice_for_each_traffic_class(i) { 1067 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1068 /* TC is not enabled */ 1069 vsi->tc_cfg.tc_info[i].qoffset = 0; 1070 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 1071 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 1072 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 1073 ctxt->info.tc_mapping[i] = 0; 1074 continue; 1075 } 1076 1077 /* TC is enabled */ 1078 vsi->tc_cfg.tc_info[i].qoffset = offset; 1079 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; 1080 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; 1081 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 1082 1083 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 1084 ICE_AQ_VSI_TC_Q_OFFSET_M) | 1085 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 1086 ICE_AQ_VSI_TC_Q_NUM_M); 1087 offset += num_rxq_per_tc; 1088 tx_count += num_txq_per_tc; 1089 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1090 } 1091 1092 /* if offset is non-zero, means it is calculated correctly based on 1093 * enabled TCs for a given VSI otherwise qcount_rx will always 1094 * be correct and non-zero because it is based off - VSI's 1095 * allocated Rx queues which is at least 1 (hence qcount_tx will be 1096 * at least 1) 1097 */ 1098 if (offset) 1099 rx_count = offset; 1100 else 1101 rx_count = num_rxq_per_tc; 1102 1103 if (rx_count > vsi->alloc_rxq) { 1104 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 1105 rx_count, vsi->alloc_rxq); 1106 return -EINVAL; 1107 } 1108 1109 if (tx_count > vsi->alloc_txq) { 1110 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 1111 tx_count, vsi->alloc_txq); 1112 return -EINVAL; 1113 } 1114 1115 vsi->num_txq = tx_count; 1116 vsi->num_rxq = rx_count; 1117 1118 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 1119 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 1120 /* since there is a chance that num_rxq could have been changed 1121 * in the above for loop, make num_txq equal to num_rxq. 1122 */ 1123 vsi->num_txq = vsi->num_rxq; 1124 } 1125 1126 /* Rx queue mapping */ 1127 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1128 /* q_mapping buffer holds the info for the first queue allocated for 1129 * this VSI in the PF space and also the number of queues associated 1130 * with this VSI. 1131 */ 1132 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 1133 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI 1140 * @ctxt: the VSI context being set 1141 * @vsi: the VSI being configured 1142 */ 1143 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1144 { 1145 u8 dflt_q_group, dflt_q_prio; 1146 u16 dflt_q, report_q, val; 1147 1148 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && 1149 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) 1150 return; 1151 1152 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; 1153 ctxt->info.valid_sections |= cpu_to_le16(val); 1154 dflt_q = 0; 1155 dflt_q_group = 0; 1156 report_q = 0; 1157 dflt_q_prio = 0; 1158 1159 /* enable flow director filtering/programming */ 1160 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; 1161 ctxt->info.fd_options = cpu_to_le16(val); 1162 /* max of allocated flow director filters */ 1163 ctxt->info.max_fd_fltr_dedicated = 1164 cpu_to_le16(vsi->num_gfltr); 1165 /* max of shared flow director filters any VSI may program */ 1166 ctxt->info.max_fd_fltr_shared = 1167 cpu_to_le16(vsi->num_bfltr); 1168 /* default queue index within the VSI of the default FD */ 1169 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & 1170 ICE_AQ_VSI_FD_DEF_Q_M); 1171 /* target queue or queue group to the FD filter */ 1172 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & 1173 ICE_AQ_VSI_FD_DEF_GRP_M); 1174 ctxt->info.fd_def_q = cpu_to_le16(val); 1175 /* queue index on which FD filter completion is reported */ 1176 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & 1177 ICE_AQ_VSI_FD_REPORT_Q_M); 1178 /* priority of the default qindex action */ 1179 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & 1180 ICE_AQ_VSI_FD_DEF_PRIORITY_M); 1181 ctxt->info.fd_report_opt = cpu_to_le16(val); 1182 } 1183 1184 /** 1185 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI 1186 * @ctxt: the VSI context being set 1187 * @vsi: the VSI being configured 1188 */ 1189 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) 1190 { 1191 u8 lut_type, hash_type; 1192 struct device *dev; 1193 struct ice_pf *pf; 1194 1195 pf = vsi->back; 1196 dev = ice_pf_to_dev(pf); 1197 1198 switch (vsi->type) { 1199 case ICE_VSI_CHNL: 1200 case ICE_VSI_PF: 1201 /* PF VSI will inherit RSS instance of PF */ 1202 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 1203 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1204 break; 1205 case ICE_VSI_VF: 1206 /* VF VSI will gets a small RSS table which is a VSI LUT type */ 1207 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 1208 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 1209 break; 1210 default: 1211 dev_dbg(dev, "Unsupported VSI type %s\n", 1212 ice_vsi_type_str(vsi->type)); 1213 return; 1214 } 1215 1216 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 1217 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 1218 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 1219 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 1220 } 1221 1222 static void 1223 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 1224 { 1225 struct ice_pf *pf = vsi->back; 1226 u16 qcount, qmap; 1227 u8 offset = 0; 1228 int pow; 1229 1230 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); 1231 1232 pow = order_base_2(qcount); 1233 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 1234 ICE_AQ_VSI_TC_Q_OFFSET_M) | 1235 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 1236 ICE_AQ_VSI_TC_Q_NUM_M); 1237 1238 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 1239 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 1240 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); 1241 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); 1242 } 1243 1244 /** 1245 * ice_vsi_init - Create and initialize a VSI 1246 * @vsi: the VSI being configured 1247 * @vsi_flags: VSI configuration flags 1248 * 1249 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to 1250 * reconfigure an existing context. 1251 * 1252 * This initializes a VSI context depending on the VSI type to be added and 1253 * passes it down to the add_vsi aq command to create a new VSI. 1254 */ 1255 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) 1256 { 1257 struct ice_pf *pf = vsi->back; 1258 struct ice_hw *hw = &pf->hw; 1259 struct ice_vsi_ctx *ctxt; 1260 struct device *dev; 1261 int ret = 0; 1262 1263 dev = ice_pf_to_dev(pf); 1264 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1265 if (!ctxt) 1266 return -ENOMEM; 1267 1268 switch (vsi->type) { 1269 case ICE_VSI_CTRL: 1270 case ICE_VSI_LB: 1271 case ICE_VSI_PF: 1272 ctxt->flags = ICE_AQ_VSI_TYPE_PF; 1273 break; 1274 case ICE_VSI_SWITCHDEV_CTRL: 1275 case ICE_VSI_CHNL: 1276 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; 1277 break; 1278 case ICE_VSI_VF: 1279 ctxt->flags = ICE_AQ_VSI_TYPE_VF; 1280 /* VF number here is the absolute VF number (0-255) */ 1281 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; 1282 break; 1283 default: 1284 ret = -ENODEV; 1285 goto out; 1286 } 1287 1288 /* Handle VLAN pruning for channel VSI if main VSI has VLAN 1289 * prune enabled 1290 */ 1291 if (vsi->type == ICE_VSI_CHNL) { 1292 struct ice_vsi *main_vsi; 1293 1294 main_vsi = ice_get_main_vsi(pf); 1295 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi)) 1296 ctxt->info.sw_flags2 |= 1297 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1298 else 1299 ctxt->info.sw_flags2 &= 1300 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1301 } 1302 1303 ice_set_dflt_vsi_ctx(hw, ctxt); 1304 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 1305 ice_set_fd_vsi_ctx(ctxt, vsi); 1306 /* if the switch is in VEB mode, allow VSI loopback */ 1307 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 1308 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 1309 1310 /* Set LUT type and HASH type if RSS is enabled */ 1311 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && 1312 vsi->type != ICE_VSI_CTRL) { 1313 ice_set_rss_vsi_ctx(ctxt, vsi); 1314 /* if updating VSI context, make sure to set valid_section: 1315 * to indicate which section of VSI context being updated 1316 */ 1317 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1318 ctxt->info.valid_sections |= 1319 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 1320 } 1321 1322 ctxt->info.sw_id = vsi->port_info->sw_id; 1323 if (vsi->type == ICE_VSI_CHNL) { 1324 ice_chnl_vsi_setup_q_map(vsi, ctxt); 1325 } else { 1326 ret = ice_vsi_setup_q_map(vsi, ctxt); 1327 if (ret) 1328 goto out; 1329 1330 if (!(vsi_flags & ICE_VSI_FLAG_INIT)) 1331 /* means VSI being updated */ 1332 /* must to indicate which section of VSI context are 1333 * being modified 1334 */ 1335 ctxt->info.valid_sections |= 1336 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 1337 } 1338 1339 /* Allow control frames out of main VSI */ 1340 if (vsi->type == ICE_VSI_PF) { 1341 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 1342 ctxt->info.valid_sections |= 1343 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1344 } 1345 1346 if (vsi_flags & ICE_VSI_FLAG_INIT) { 1347 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 1348 if (ret) { 1349 dev_err(dev, "Add VSI failed, err %d\n", ret); 1350 ret = -EIO; 1351 goto out; 1352 } 1353 } else { 1354 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1355 if (ret) { 1356 dev_err(dev, "Update VSI failed, err %d\n", ret); 1357 ret = -EIO; 1358 goto out; 1359 } 1360 } 1361 1362 /* keep context for update VSI operations */ 1363 vsi->info = ctxt->info; 1364 1365 /* record VSI number returned */ 1366 vsi->vsi_num = ctxt->vsi_num; 1367 1368 out: 1369 kfree(ctxt); 1370 return ret; 1371 } 1372 1373 /** 1374 * ice_free_res - free a block of resources 1375 * @res: pointer to the resource 1376 * @index: starting index previously returned by ice_get_res 1377 * @id: identifier to track owner 1378 * 1379 * Returns number of resources freed 1380 */ 1381 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) 1382 { 1383 int count = 0; 1384 int i; 1385 1386 if (!res || index >= res->end) 1387 return -EINVAL; 1388 1389 id |= ICE_RES_VALID_BIT; 1390 for (i = index; i < res->end && res->list[i] == id; i++) { 1391 res->list[i] = 0; 1392 count++; 1393 } 1394 1395 return count; 1396 } 1397 1398 /** 1399 * ice_search_res - Search the tracker for a block of resources 1400 * @res: pointer to the resource 1401 * @needed: size of the block needed 1402 * @id: identifier to track owner 1403 * 1404 * Returns the base item index of the block, or -ENOMEM for error 1405 */ 1406 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) 1407 { 1408 u16 start = 0, end = 0; 1409 1410 if (needed > res->end) 1411 return -ENOMEM; 1412 1413 id |= ICE_RES_VALID_BIT; 1414 1415 do { 1416 /* skip already allocated entries */ 1417 if (res->list[end++] & ICE_RES_VALID_BIT) { 1418 start = end; 1419 if ((start + needed) > res->end) 1420 break; 1421 } 1422 1423 if (end == (start + needed)) { 1424 int i = start; 1425 1426 /* there was enough, so assign it to the requestor */ 1427 while (i != end) 1428 res->list[i++] = id; 1429 1430 return start; 1431 } 1432 } while (end < res->end); 1433 1434 return -ENOMEM; 1435 } 1436 1437 /** 1438 * ice_get_free_res_count - Get free count from a resource tracker 1439 * @res: Resource tracker instance 1440 */ 1441 static u16 ice_get_free_res_count(struct ice_res_tracker *res) 1442 { 1443 u16 i, count = 0; 1444 1445 for (i = 0; i < res->end; i++) 1446 if (!(res->list[i] & ICE_RES_VALID_BIT)) 1447 count++; 1448 1449 return count; 1450 } 1451 1452 /** 1453 * ice_get_res - get a block of resources 1454 * @pf: board private structure 1455 * @res: pointer to the resource 1456 * @needed: size of the block needed 1457 * @id: identifier to track owner 1458 * 1459 * Returns the base item index of the block, or negative for error 1460 */ 1461 int 1462 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) 1463 { 1464 if (!res || !pf) 1465 return -EINVAL; 1466 1467 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { 1468 dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", 1469 needed, res->num_entries, id); 1470 return -EINVAL; 1471 } 1472 1473 return ice_search_res(res, needed, id); 1474 } 1475 1476 /** 1477 * ice_get_vf_ctrl_res - Get VF control VSI resource 1478 * @pf: pointer to the PF structure 1479 * @vsi: the VSI to allocate a resource for 1480 * 1481 * Look up whether another VF has already allocated the control VSI resource. 1482 * If so, re-use this resource so that we share it among all VFs. 1483 * 1484 * Otherwise, allocate the resource and return it. 1485 */ 1486 static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) 1487 { 1488 struct ice_vf *vf; 1489 unsigned int bkt; 1490 int base; 1491 1492 rcu_read_lock(); 1493 ice_for_each_vf_rcu(pf, bkt, vf) { 1494 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1495 base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; 1496 rcu_read_unlock(); 1497 return base; 1498 } 1499 } 1500 rcu_read_unlock(); 1501 1502 return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors, 1503 ICE_RES_VF_CTRL_VEC_ID); 1504 } 1505 1506 /** 1507 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI 1508 * @vsi: ptr to the VSI 1509 * 1510 * This should only be called after ice_vsi_alloc_def() which allocates the 1511 * corresponding SW VSI structure and initializes num_queue_pairs for the 1512 * newly allocated VSI. 1513 * 1514 * Returns 0 on success or negative on failure 1515 */ 1516 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) 1517 { 1518 struct ice_pf *pf = vsi->back; 1519 struct device *dev; 1520 u16 num_q_vectors; 1521 int base; 1522 1523 dev = ice_pf_to_dev(pf); 1524 /* SRIOV doesn't grab irq_tracker entries for each VSI */ 1525 if (vsi->type == ICE_VSI_VF) 1526 return 0; 1527 if (vsi->type == ICE_VSI_CHNL) 1528 return 0; 1529 1530 if (vsi->base_vector) { 1531 dev_dbg(dev, "VSI %d has non-zero base vector %d\n", 1532 vsi->vsi_num, vsi->base_vector); 1533 return -EEXIST; 1534 } 1535 1536 num_q_vectors = vsi->num_q_vectors; 1537 /* reserve slots from OS requested IRQs */ 1538 if (vsi->type == ICE_VSI_CTRL && vsi->vf) { 1539 base = ice_get_vf_ctrl_res(pf, vsi); 1540 } else { 1541 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, 1542 vsi->idx); 1543 } 1544 1545 if (base < 0) { 1546 dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", 1547 ice_get_free_res_count(pf->irq_tracker), 1548 ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); 1549 return -ENOENT; 1550 } 1551 vsi->base_vector = (u16)base; 1552 pf->num_avail_sw_msix -= num_q_vectors; 1553 1554 return 0; 1555 } 1556 1557 /** 1558 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1559 * @vsi: the VSI having rings deallocated 1560 */ 1561 static void ice_vsi_clear_rings(struct ice_vsi *vsi) 1562 { 1563 int i; 1564 1565 /* Avoid stale references by clearing map from vector to ring */ 1566 if (vsi->q_vectors) { 1567 ice_for_each_q_vector(vsi, i) { 1568 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1569 1570 if (q_vector) { 1571 q_vector->tx.tx_ring = NULL; 1572 q_vector->rx.rx_ring = NULL; 1573 } 1574 } 1575 } 1576 1577 if (vsi->tx_rings) { 1578 ice_for_each_alloc_txq(vsi, i) { 1579 if (vsi->tx_rings[i]) { 1580 kfree_rcu(vsi->tx_rings[i], rcu); 1581 WRITE_ONCE(vsi->tx_rings[i], NULL); 1582 } 1583 } 1584 } 1585 if (vsi->rx_rings) { 1586 ice_for_each_alloc_rxq(vsi, i) { 1587 if (vsi->rx_rings[i]) { 1588 kfree_rcu(vsi->rx_rings[i], rcu); 1589 WRITE_ONCE(vsi->rx_rings[i], NULL); 1590 } 1591 } 1592 } 1593 } 1594 1595 /** 1596 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI 1597 * @vsi: VSI which is having rings allocated 1598 */ 1599 static int ice_vsi_alloc_rings(struct ice_vsi *vsi) 1600 { 1601 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); 1602 struct ice_pf *pf = vsi->back; 1603 struct device *dev; 1604 u16 i; 1605 1606 dev = ice_pf_to_dev(pf); 1607 /* Allocate Tx rings */ 1608 ice_for_each_alloc_txq(vsi, i) { 1609 struct ice_tx_ring *ring; 1610 1611 /* allocate with kzalloc(), free with kfree_rcu() */ 1612 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1613 1614 if (!ring) 1615 goto err_out; 1616 1617 ring->q_index = i; 1618 ring->reg_idx = vsi->txq_map[i]; 1619 ring->vsi = vsi; 1620 ring->tx_tstamps = &pf->ptp.port.tx; 1621 ring->dev = dev; 1622 ring->count = vsi->num_tx_desc; 1623 ring->txq_teid = ICE_INVAL_TEID; 1624 if (dvm_ena) 1625 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; 1626 else 1627 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; 1628 WRITE_ONCE(vsi->tx_rings[i], ring); 1629 } 1630 1631 /* Allocate Rx rings */ 1632 ice_for_each_alloc_rxq(vsi, i) { 1633 struct ice_rx_ring *ring; 1634 1635 /* allocate with kzalloc(), free with kfree_rcu() */ 1636 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1637 if (!ring) 1638 goto err_out; 1639 1640 ring->q_index = i; 1641 ring->reg_idx = vsi->rxq_map[i]; 1642 ring->vsi = vsi; 1643 ring->netdev = vsi->netdev; 1644 ring->dev = dev; 1645 ring->count = vsi->num_rx_desc; 1646 ring->cached_phctime = pf->ptp.cached_phc_time; 1647 WRITE_ONCE(vsi->rx_rings[i], ring); 1648 } 1649 1650 return 0; 1651 1652 err_out: 1653 ice_vsi_clear_rings(vsi); 1654 return -ENOMEM; 1655 } 1656 1657 /** 1658 * ice_vsi_manage_rss_lut - disable/enable RSS 1659 * @vsi: the VSI being changed 1660 * @ena: boolean value indicating if this is an enable or disable request 1661 * 1662 * In the event of disable request for RSS, this function will zero out RSS 1663 * LUT, while in the event of enable request for RSS, it will reconfigure RSS 1664 * LUT. 1665 */ 1666 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) 1667 { 1668 u8 *lut; 1669 1670 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1671 if (!lut) 1672 return; 1673 1674 if (ena) { 1675 if (vsi->rss_lut_user) 1676 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1677 else 1678 ice_fill_rss_lut(lut, vsi->rss_table_size, 1679 vsi->rss_size); 1680 } 1681 1682 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1683 kfree(lut); 1684 } 1685 1686 /** 1687 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI 1688 * @vsi: VSI to be configured 1689 * @disable: set to true to have FCS / CRC in the frame data 1690 */ 1691 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) 1692 { 1693 int i; 1694 1695 ice_for_each_rxq(vsi, i) 1696 if (disable) 1697 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; 1698 else 1699 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; 1700 } 1701 1702 /** 1703 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI 1704 * @vsi: VSI to be configured 1705 */ 1706 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) 1707 { 1708 struct ice_pf *pf = vsi->back; 1709 struct device *dev; 1710 u8 *lut, *key; 1711 int err; 1712 1713 dev = ice_pf_to_dev(pf); 1714 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && 1715 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { 1716 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); 1717 } else { 1718 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); 1719 1720 /* If orig_rss_size is valid and it is less than determined 1721 * main VSI's rss_size, update main VSI's rss_size to be 1722 * orig_rss_size so that when tc-qdisc is deleted, main VSI 1723 * RSS table gets programmed to be correct (whatever it was 1724 * to begin with (prior to setup-tc for ADQ config) 1725 */ 1726 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && 1727 vsi->orig_rss_size <= vsi->num_rxq) { 1728 vsi->rss_size = vsi->orig_rss_size; 1729 /* now orig_rss_size is used, reset it to zero */ 1730 vsi->orig_rss_size = 0; 1731 } 1732 } 1733 1734 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1735 if (!lut) 1736 return -ENOMEM; 1737 1738 if (vsi->rss_lut_user) 1739 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1740 else 1741 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 1742 1743 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 1744 if (err) { 1745 dev_err(dev, "set_rss_lut failed, error %d\n", err); 1746 goto ice_vsi_cfg_rss_exit; 1747 } 1748 1749 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); 1750 if (!key) { 1751 err = -ENOMEM; 1752 goto ice_vsi_cfg_rss_exit; 1753 } 1754 1755 if (vsi->rss_hkey_user) 1756 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1757 else 1758 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); 1759 1760 err = ice_set_rss_key(vsi, key); 1761 if (err) 1762 dev_err(dev, "set_rss_key failed, error %d\n", err); 1763 1764 kfree(key); 1765 ice_vsi_cfg_rss_exit: 1766 kfree(lut); 1767 return err; 1768 } 1769 1770 /** 1771 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows 1772 * @vsi: VSI to be configured 1773 * 1774 * This function will only be called during the VF VSI setup. Upon successful 1775 * completion of package download, this function will configure default RSS 1776 * input sets for VF VSI. 1777 */ 1778 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) 1779 { 1780 struct ice_pf *pf = vsi->back; 1781 struct device *dev; 1782 int status; 1783 1784 dev = ice_pf_to_dev(pf); 1785 if (ice_is_safe_mode(pf)) { 1786 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1787 vsi->vsi_num); 1788 return; 1789 } 1790 1791 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); 1792 if (status) 1793 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", 1794 vsi->vsi_num, status); 1795 } 1796 1797 /** 1798 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows 1799 * @vsi: VSI to be configured 1800 * 1801 * This function will only be called after successful download package call 1802 * during initialization of PF. Since the downloaded package will erase the 1803 * RSS section, this function will configure RSS input sets for different 1804 * flow types. The last profile added has the highest priority, therefore 2 1805 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles 1806 * (i.e. IPv4 src/dst TCP src/dst port). 1807 */ 1808 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) 1809 { 1810 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; 1811 struct ice_pf *pf = vsi->back; 1812 struct ice_hw *hw = &pf->hw; 1813 struct device *dev; 1814 int status; 1815 1816 dev = ice_pf_to_dev(pf); 1817 if (ice_is_safe_mode(pf)) { 1818 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 1819 vsi_num); 1820 return; 1821 } 1822 /* configure RSS for IPv4 with input set IP src/dst */ 1823 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1824 ICE_FLOW_SEG_HDR_IPV4); 1825 if (status) 1826 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", 1827 vsi_num, status); 1828 1829 /* configure RSS for IPv6 with input set IPv6 src/dst */ 1830 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1831 ICE_FLOW_SEG_HDR_IPV6); 1832 if (status) 1833 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", 1834 vsi_num, status); 1835 1836 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ 1837 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, 1838 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); 1839 if (status) 1840 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", 1841 vsi_num, status); 1842 1843 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ 1844 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, 1845 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); 1846 if (status) 1847 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", 1848 vsi_num, status); 1849 1850 /* configure RSS for sctp4 with input set IP src/dst */ 1851 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, 1852 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); 1853 if (status) 1854 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", 1855 vsi_num, status); 1856 1857 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ 1858 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, 1859 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); 1860 if (status) 1861 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", 1862 vsi_num, status); 1863 1864 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ 1865 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, 1866 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); 1867 if (status) 1868 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", 1869 vsi_num, status); 1870 1871 /* configure RSS for sctp6 with input set IPv6 src/dst */ 1872 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, 1873 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); 1874 if (status) 1875 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", 1876 vsi_num, status); 1877 1878 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, 1879 ICE_FLOW_SEG_HDR_ESP); 1880 if (status) 1881 dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", 1882 vsi_num, status); 1883 } 1884 1885 /** 1886 * ice_pf_state_is_nominal - checks the PF for nominal state 1887 * @pf: pointer to PF to check 1888 * 1889 * Check the PF's state for a collection of bits that would indicate 1890 * the PF is in a state that would inhibit normal operation for 1891 * driver functionality. 1892 * 1893 * Returns true if PF is in a nominal state, false otherwise 1894 */ 1895 bool ice_pf_state_is_nominal(struct ice_pf *pf) 1896 { 1897 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; 1898 1899 if (!pf) 1900 return false; 1901 1902 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); 1903 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) 1904 return false; 1905 1906 return true; 1907 } 1908 1909 /** 1910 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters 1911 * @vsi: the VSI to be updated 1912 */ 1913 void ice_update_eth_stats(struct ice_vsi *vsi) 1914 { 1915 struct ice_eth_stats *prev_es, *cur_es; 1916 struct ice_hw *hw = &vsi->back->hw; 1917 struct ice_pf *pf = vsi->back; 1918 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ 1919 1920 prev_es = &vsi->eth_stats_prev; 1921 cur_es = &vsi->eth_stats; 1922 1923 if (ice_is_reset_in_progress(pf->state)) 1924 vsi->stat_offsets_loaded = false; 1925 1926 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, 1927 &prev_es->rx_bytes, &cur_es->rx_bytes); 1928 1929 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, 1930 &prev_es->rx_unicast, &cur_es->rx_unicast); 1931 1932 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, 1933 &prev_es->rx_multicast, &cur_es->rx_multicast); 1934 1935 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, 1936 &prev_es->rx_broadcast, &cur_es->rx_broadcast); 1937 1938 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, 1939 &prev_es->rx_discards, &cur_es->rx_discards); 1940 1941 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, 1942 &prev_es->tx_bytes, &cur_es->tx_bytes); 1943 1944 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, 1945 &prev_es->tx_unicast, &cur_es->tx_unicast); 1946 1947 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, 1948 &prev_es->tx_multicast, &cur_es->tx_multicast); 1949 1950 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, 1951 &prev_es->tx_broadcast, &cur_es->tx_broadcast); 1952 1953 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, 1954 &prev_es->tx_errors, &cur_es->tx_errors); 1955 1956 vsi->stat_offsets_loaded = true; 1957 } 1958 1959 /** 1960 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length 1961 * @vsi: VSI 1962 */ 1963 void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) 1964 { 1965 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { 1966 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; 1967 vsi->rx_buf_len = ICE_RXBUF_1664; 1968 #if (PAGE_SIZE < 8192) 1969 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && 1970 (vsi->netdev->mtu <= ETH_DATA_LEN)) { 1971 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; 1972 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; 1973 #endif 1974 } else { 1975 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1976 vsi->rx_buf_len = ICE_RXBUF_3072; 1977 } 1978 } 1979 1980 /** 1981 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register 1982 * @hw: HW pointer 1983 * @pf_q: index of the Rx queue in the PF's queue space 1984 * @rxdid: flexible descriptor RXDID 1985 * @prio: priority for the RXDID for this queue 1986 * @ena_ts: true to enable timestamp and false to disable timestamp 1987 */ 1988 void 1989 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 1990 bool ena_ts) 1991 { 1992 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1993 1994 /* clear any previous values */ 1995 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | 1996 QRXFLXP_CNTXT_RXDID_PRIO_M | 1997 QRXFLXP_CNTXT_TS_M); 1998 1999 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 2000 QRXFLXP_CNTXT_RXDID_IDX_M; 2001 2002 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & 2003 QRXFLXP_CNTXT_RXDID_PRIO_M; 2004 2005 if (ena_ts) 2006 /* Enable TimeSync on this queue */ 2007 regval |= QRXFLXP_CNTXT_TS_M; 2008 2009 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 2010 } 2011 2012 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) 2013 { 2014 if (q_idx >= vsi->num_rxq) 2015 return -EINVAL; 2016 2017 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); 2018 } 2019 2020 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) 2021 { 2022 struct ice_aqc_add_tx_qgrp *qg_buf; 2023 int err; 2024 2025 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) 2026 return -EINVAL; 2027 2028 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); 2029 if (!qg_buf) 2030 return -ENOMEM; 2031 2032 qg_buf->num_txqs = 1; 2033 2034 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); 2035 kfree(qg_buf); 2036 return err; 2037 } 2038 2039 /** 2040 * ice_vsi_cfg_rxqs - Configure the VSI for Rx 2041 * @vsi: the VSI being configured 2042 * 2043 * Return 0 on success and a negative value on error 2044 * Configure the Rx VSI for operation. 2045 */ 2046 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) 2047 { 2048 u16 i; 2049 2050 if (vsi->type == ICE_VSI_VF) 2051 goto setup_rings; 2052 2053 ice_vsi_cfg_frame_size(vsi); 2054 setup_rings: 2055 /* set up individual rings */ 2056 ice_for_each_rxq(vsi, i) { 2057 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); 2058 2059 if (err) 2060 return err; 2061 } 2062 2063 return 0; 2064 } 2065 2066 /** 2067 * ice_vsi_cfg_txqs - Configure the VSI for Tx 2068 * @vsi: the VSI being configured 2069 * @rings: Tx ring array to be configured 2070 * @count: number of Tx ring array elements 2071 * 2072 * Return 0 on success and a negative value on error 2073 * Configure the Tx VSI for operation. 2074 */ 2075 static int 2076 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) 2077 { 2078 struct ice_aqc_add_tx_qgrp *qg_buf; 2079 u16 q_idx = 0; 2080 int err = 0; 2081 2082 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); 2083 if (!qg_buf) 2084 return -ENOMEM; 2085 2086 qg_buf->num_txqs = 1; 2087 2088 for (q_idx = 0; q_idx < count; q_idx++) { 2089 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); 2090 if (err) 2091 goto err_cfg_txqs; 2092 } 2093 2094 err_cfg_txqs: 2095 kfree(qg_buf); 2096 return err; 2097 } 2098 2099 /** 2100 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx 2101 * @vsi: the VSI being configured 2102 * 2103 * Return 0 on success and a negative value on error 2104 * Configure the Tx VSI for operation. 2105 */ 2106 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) 2107 { 2108 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); 2109 } 2110 2111 /** 2112 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI 2113 * @vsi: the VSI being configured 2114 * 2115 * Return 0 on success and a negative value on error 2116 * Configure the Tx queues dedicated for XDP in given VSI for operation. 2117 */ 2118 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) 2119 { 2120 int ret; 2121 int i; 2122 2123 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); 2124 if (ret) 2125 return ret; 2126 2127 ice_for_each_rxq(vsi, i) 2128 ice_tx_xsk_pool(vsi, i); 2129 2130 return 0; 2131 } 2132 2133 /** 2134 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 2135 * @intrl: interrupt rate limit in usecs 2136 * @gran: interrupt rate limit granularity in usecs 2137 * 2138 * This function converts a decimal interrupt rate limit in usecs to the format 2139 * expected by firmware. 2140 */ 2141 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) 2142 { 2143 u32 val = intrl / gran; 2144 2145 if (val) 2146 return val | GLINT_RATE_INTRL_ENA_M; 2147 return 0; 2148 } 2149 2150 /** 2151 * ice_write_intrl - write throttle rate limit to interrupt specific register 2152 * @q_vector: pointer to interrupt specific structure 2153 * @intrl: throttle rate limit in microseconds to write 2154 */ 2155 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) 2156 { 2157 struct ice_hw *hw = &q_vector->vsi->back->hw; 2158 2159 wr32(hw, GLINT_RATE(q_vector->reg_idx), 2160 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); 2161 } 2162 2163 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) 2164 { 2165 switch (rc->type) { 2166 case ICE_RX_CONTAINER: 2167 if (rc->rx_ring) 2168 return rc->rx_ring->q_vector; 2169 break; 2170 case ICE_TX_CONTAINER: 2171 if (rc->tx_ring) 2172 return rc->tx_ring->q_vector; 2173 break; 2174 default: 2175 break; 2176 } 2177 2178 return NULL; 2179 } 2180 2181 /** 2182 * __ice_write_itr - write throttle rate to register 2183 * @q_vector: pointer to interrupt data structure 2184 * @rc: pointer to ring container 2185 * @itr: throttle rate in microseconds to write 2186 */ 2187 static void __ice_write_itr(struct ice_q_vector *q_vector, 2188 struct ice_ring_container *rc, u16 itr) 2189 { 2190 struct ice_hw *hw = &q_vector->vsi->back->hw; 2191 2192 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 2193 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); 2194 } 2195 2196 /** 2197 * ice_write_itr - write throttle rate to queue specific register 2198 * @rc: pointer to ring container 2199 * @itr: throttle rate in microseconds to write 2200 */ 2201 void ice_write_itr(struct ice_ring_container *rc, u16 itr) 2202 { 2203 struct ice_q_vector *q_vector; 2204 2205 q_vector = ice_pull_qvec_from_rc(rc); 2206 if (!q_vector) 2207 return; 2208 2209 __ice_write_itr(q_vector, rc, itr); 2210 } 2211 2212 /** 2213 * ice_set_q_vector_intrl - set up interrupt rate limiting 2214 * @q_vector: the vector to be configured 2215 * 2216 * Interrupt rate limiting is local to the vector, not per-queue so we must 2217 * detect if either ring container has dynamic moderation enabled to decide 2218 * what to set the interrupt rate limit to via INTRL settings. In the case that 2219 * dynamic moderation is disabled on both, write the value with the cached 2220 * setting to make sure INTRL register matches the user visible value. 2221 */ 2222 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) 2223 { 2224 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { 2225 /* in the case of dynamic enabled, cap each vector to no more 2226 * than (4 us) 250,000 ints/sec, which allows low latency 2227 * but still less than 500,000 interrupts per second, which 2228 * reduces CPU a bit in the case of the lowest latency 2229 * setting. The 4 here is a value in microseconds. 2230 */ 2231 ice_write_intrl(q_vector, 4); 2232 } else { 2233 ice_write_intrl(q_vector, q_vector->intrl); 2234 } 2235 } 2236 2237 /** 2238 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 2239 * @vsi: the VSI being configured 2240 * 2241 * This configures MSIX mode interrupts for the PF VSI, and should not be used 2242 * for the VF VSI. 2243 */ 2244 void ice_vsi_cfg_msix(struct ice_vsi *vsi) 2245 { 2246 struct ice_pf *pf = vsi->back; 2247 struct ice_hw *hw = &pf->hw; 2248 u16 txq = 0, rxq = 0; 2249 int i, q; 2250 2251 ice_for_each_q_vector(vsi, i) { 2252 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2253 u16 reg_idx = q_vector->reg_idx; 2254 2255 ice_cfg_itr(hw, q_vector); 2256 2257 /* Both Transmit Queue Interrupt Cause Control register 2258 * and Receive Queue Interrupt Cause control register 2259 * expects MSIX_INDX field to be the vector index 2260 * within the function space and not the absolute 2261 * vector index across PF or across device. 2262 * For SR-IOV VF VSIs queue vector index always starts 2263 * with 1 since first vector index(0) is used for OICR 2264 * in VF space. Since VMDq and other PF VSIs are within 2265 * the PF function space, use the vector index that is 2266 * tracked for this PF. 2267 */ 2268 for (q = 0; q < q_vector->num_ring_tx; q++) { 2269 ice_cfg_txq_interrupt(vsi, txq, reg_idx, 2270 q_vector->tx.itr_idx); 2271 txq++; 2272 } 2273 2274 for (q = 0; q < q_vector->num_ring_rx; q++) { 2275 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, 2276 q_vector->rx.itr_idx); 2277 rxq++; 2278 } 2279 } 2280 } 2281 2282 /** 2283 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings 2284 * @vsi: the VSI whose rings are to be enabled 2285 * 2286 * Returns 0 on success and a negative value on error 2287 */ 2288 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) 2289 { 2290 return ice_vsi_ctrl_all_rx_rings(vsi, true); 2291 } 2292 2293 /** 2294 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings 2295 * @vsi: the VSI whose rings are to be disabled 2296 * 2297 * Returns 0 on success and a negative value on error 2298 */ 2299 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) 2300 { 2301 return ice_vsi_ctrl_all_rx_rings(vsi, false); 2302 } 2303 2304 /** 2305 * ice_vsi_stop_tx_rings - Disable Tx rings 2306 * @vsi: the VSI being configured 2307 * @rst_src: reset source 2308 * @rel_vmvf_num: Relative ID of VF/VM 2309 * @rings: Tx ring array to be stopped 2310 * @count: number of Tx ring array elements 2311 */ 2312 static int 2313 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2314 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) 2315 { 2316 u16 q_idx; 2317 2318 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2319 return -EINVAL; 2320 2321 for (q_idx = 0; q_idx < count; q_idx++) { 2322 struct ice_txq_meta txq_meta = { }; 2323 int status; 2324 2325 if (!rings || !rings[q_idx]) 2326 return -EINVAL; 2327 2328 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); 2329 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, 2330 rings[q_idx], &txq_meta); 2331 2332 if (status) 2333 return status; 2334 } 2335 2336 return 0; 2337 } 2338 2339 /** 2340 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings 2341 * @vsi: the VSI being configured 2342 * @rst_src: reset source 2343 * @rel_vmvf_num: Relative ID of VF/VM 2344 */ 2345 int 2346 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 2347 u16 rel_vmvf_num) 2348 { 2349 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); 2350 } 2351 2352 /** 2353 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings 2354 * @vsi: the VSI being configured 2355 */ 2356 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) 2357 { 2358 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); 2359 } 2360 2361 /** 2362 * ice_vsi_is_rx_queue_active 2363 * @vsi: the VSI being configured 2364 * 2365 * Return true if at least one queue is active. 2366 */ 2367 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) 2368 { 2369 struct ice_pf *pf = vsi->back; 2370 struct ice_hw *hw = &pf->hw; 2371 int i; 2372 2373 ice_for_each_rxq(vsi, i) { 2374 u32 rx_reg; 2375 int pf_q; 2376 2377 pf_q = vsi->rxq_map[i]; 2378 rx_reg = rd32(hw, QRX_CTRL(pf_q)); 2379 if (rx_reg & QRX_CTRL_QENA_STAT_M) 2380 return true; 2381 } 2382 2383 return false; 2384 } 2385 2386 /** 2387 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 2388 * @vsi: VSI to check whether or not VLAN pruning is enabled. 2389 * 2390 * returns true if Rx VLAN pruning is enabled and false otherwise. 2391 */ 2392 bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 2393 { 2394 if (!vsi) 2395 return false; 2396 2397 return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); 2398 } 2399 2400 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2401 { 2402 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { 2403 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; 2404 vsi->tc_cfg.numtc = 1; 2405 return; 2406 } 2407 2408 /* set VSI TC information based on DCB config */ 2409 ice_vsi_set_dcb_tc_cfg(vsi); 2410 } 2411 2412 /** 2413 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors 2414 * @vsi: VSI to set the q_vectors register index on 2415 */ 2416 static int 2417 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) 2418 { 2419 u16 i; 2420 2421 if (!vsi || !vsi->q_vectors) 2422 return -EINVAL; 2423 2424 ice_for_each_q_vector(vsi, i) { 2425 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2426 2427 if (!q_vector) { 2428 dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n", 2429 i, vsi->vsi_num); 2430 goto clear_reg_idx; 2431 } 2432 2433 if (vsi->type == ICE_VSI_VF) { 2434 struct ice_vf *vf = vsi->vf; 2435 2436 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); 2437 } else { 2438 q_vector->reg_idx = 2439 q_vector->v_idx + vsi->base_vector; 2440 } 2441 } 2442 2443 return 0; 2444 2445 clear_reg_idx: 2446 ice_for_each_q_vector(vsi, i) { 2447 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2448 2449 if (q_vector) 2450 q_vector->reg_idx = 0; 2451 } 2452 2453 return -EINVAL; 2454 } 2455 2456 /** 2457 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling 2458 * @vsi: the VSI being configured 2459 * @tx: bool to determine Tx or Rx rule 2460 * @create: bool to determine create or remove Rule 2461 */ 2462 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) 2463 { 2464 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, 2465 enum ice_sw_fwd_act_type act); 2466 struct ice_pf *pf = vsi->back; 2467 struct device *dev; 2468 int status; 2469 2470 dev = ice_pf_to_dev(pf); 2471 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; 2472 2473 if (tx) { 2474 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, 2475 ICE_DROP_PACKET); 2476 } else { 2477 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { 2478 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, 2479 create); 2480 } else { 2481 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, 2482 ICE_FWD_TO_VSI); 2483 } 2484 } 2485 2486 if (status) 2487 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", 2488 create ? "adding" : "removing", tx ? "TX" : "RX", 2489 vsi->vsi_num, status); 2490 } 2491 2492 /** 2493 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it 2494 * @vsi: pointer to the VSI 2495 * 2496 * This function will allocate new scheduler aggregator now if needed and will 2497 * move specified VSI into it. 2498 */ 2499 static void ice_set_agg_vsi(struct ice_vsi *vsi) 2500 { 2501 struct device *dev = ice_pf_to_dev(vsi->back); 2502 struct ice_agg_node *agg_node_iter = NULL; 2503 u32 agg_id = ICE_INVALID_AGG_NODE_ID; 2504 struct ice_agg_node *agg_node = NULL; 2505 int node_offset, max_agg_nodes = 0; 2506 struct ice_port_info *port_info; 2507 struct ice_pf *pf = vsi->back; 2508 u32 agg_node_id_start = 0; 2509 int status; 2510 2511 /* create (as needed) scheduler aggregator node and move VSI into 2512 * corresponding aggregator node 2513 * - PF aggregator node to contains VSIs of type _PF and _CTRL 2514 * - VF aggregator nodes will contain VF VSI 2515 */ 2516 port_info = pf->hw.port_info; 2517 if (!port_info) 2518 return; 2519 2520 switch (vsi->type) { 2521 case ICE_VSI_CTRL: 2522 case ICE_VSI_CHNL: 2523 case ICE_VSI_LB: 2524 case ICE_VSI_PF: 2525 case ICE_VSI_SWITCHDEV_CTRL: 2526 max_agg_nodes = ICE_MAX_PF_AGG_NODES; 2527 agg_node_id_start = ICE_PF_AGG_NODE_ID_START; 2528 agg_node_iter = &pf->pf_agg_node[0]; 2529 break; 2530 case ICE_VSI_VF: 2531 /* user can create 'n' VFs on a given PF, but since max children 2532 * per aggregator node can be only 64. Following code handles 2533 * aggregator(s) for VF VSIs, either selects a agg_node which 2534 * was already created provided num_vsis < 64, otherwise 2535 * select next available node, which will be created 2536 */ 2537 max_agg_nodes = ICE_MAX_VF_AGG_NODES; 2538 agg_node_id_start = ICE_VF_AGG_NODE_ID_START; 2539 agg_node_iter = &pf->vf_agg_node[0]; 2540 break; 2541 default: 2542 /* other VSI type, handle later if needed */ 2543 dev_dbg(dev, "unexpected VSI type %s\n", 2544 ice_vsi_type_str(vsi->type)); 2545 return; 2546 } 2547 2548 /* find the appropriate aggregator node */ 2549 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { 2550 /* see if we can find space in previously created 2551 * node if num_vsis < 64, otherwise skip 2552 */ 2553 if (agg_node_iter->num_vsis && 2554 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 2555 agg_node_iter++; 2556 continue; 2557 } 2558 2559 if (agg_node_iter->valid && 2560 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { 2561 agg_id = agg_node_iter->agg_id; 2562 agg_node = agg_node_iter; 2563 break; 2564 } 2565 2566 /* find unclaimed agg_id */ 2567 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { 2568 agg_id = node_offset + agg_node_id_start; 2569 agg_node = agg_node_iter; 2570 break; 2571 } 2572 /* move to next agg_node */ 2573 agg_node_iter++; 2574 } 2575 2576 if (!agg_node) 2577 return; 2578 2579 /* if selected aggregator node was not created, create it */ 2580 if (!agg_node->valid) { 2581 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG, 2582 (u8)vsi->tc_cfg.ena_tc); 2583 if (status) { 2584 dev_err(dev, "unable to create aggregator node with agg_id %u\n", 2585 agg_id); 2586 return; 2587 } 2588 /* aggregator node is created, store the needed info */ 2589 agg_node->valid = true; 2590 agg_node->agg_id = agg_id; 2591 } 2592 2593 /* move VSI to corresponding aggregator node */ 2594 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, 2595 (u8)vsi->tc_cfg.ena_tc); 2596 if (status) { 2597 dev_err(dev, "unable to move VSI idx %u into aggregator %u node", 2598 vsi->idx, agg_id); 2599 return; 2600 } 2601 2602 /* keep active children count for aggregator node */ 2603 agg_node->num_vsis++; 2604 2605 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved 2606 * to aggregator node 2607 */ 2608 vsi->agg_node = agg_node; 2609 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n", 2610 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, 2611 vsi->agg_node->num_vsis); 2612 } 2613 2614 /** 2615 * ice_free_vf_ctrl_res - Free the VF control VSI resource 2616 * @pf: pointer to PF structure 2617 * @vsi: the VSI to free resources for 2618 * 2619 * Check if the VF control VSI resource is still in use. If no VF is using it 2620 * any more, release the VSI resource. Otherwise, leave it to be cleaned up 2621 * once no other VF uses it. 2622 */ 2623 static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) 2624 { 2625 struct ice_vf *vf; 2626 unsigned int bkt; 2627 2628 rcu_read_lock(); 2629 ice_for_each_vf_rcu(pf, bkt, vf) { 2630 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 2631 rcu_read_unlock(); 2632 return; 2633 } 2634 } 2635 rcu_read_unlock(); 2636 2637 /* No other VFs left that have control VSI. It is now safe to reclaim 2638 * SW interrupts back to the common pool. 2639 */ 2640 ice_free_res(pf->irq_tracker, vsi->base_vector, 2641 ICE_RES_VF_CTRL_VEC_ID); 2642 pf->num_avail_sw_msix += vsi->num_q_vectors; 2643 } 2644 2645 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) 2646 { 2647 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2648 struct device *dev = ice_pf_to_dev(pf); 2649 int ret, i; 2650 2651 /* configure VSI nodes based on number of queues and TC's */ 2652 ice_for_each_traffic_class(i) { 2653 if (!(vsi->tc_cfg.ena_tc & BIT(i))) 2654 continue; 2655 2656 if (vsi->type == ICE_VSI_CHNL) { 2657 if (!vsi->alloc_txq && vsi->num_txq) 2658 max_txqs[i] = vsi->num_txq; 2659 else 2660 max_txqs[i] = pf->num_lan_tx; 2661 } else { 2662 max_txqs[i] = vsi->alloc_txq; 2663 } 2664 } 2665 2666 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); 2667 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2668 max_txqs); 2669 if (ret) { 2670 dev_err(dev, "VSI %d failed lan queue config, error %d\n", 2671 vsi->vsi_num, ret); 2672 return ret; 2673 } 2674 2675 return 0; 2676 } 2677 2678 /** 2679 * ice_vsi_cfg_def - configure default VSI based on the type 2680 * @vsi: pointer to VSI 2681 * @params: the parameters to configure this VSI with 2682 */ 2683 static int 2684 ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) 2685 { 2686 struct device *dev = ice_pf_to_dev(vsi->back); 2687 struct ice_pf *pf = vsi->back; 2688 int ret; 2689 2690 vsi->vsw = pf->first_sw; 2691 2692 ret = ice_vsi_alloc_def(vsi, params->ch); 2693 if (ret) 2694 return ret; 2695 2696 /* allocate memory for Tx/Rx ring stat pointers */ 2697 ret = ice_vsi_alloc_stat_arrays(vsi); 2698 if (ret) 2699 goto unroll_vsi_alloc; 2700 2701 ice_alloc_fd_res(vsi); 2702 2703 ret = ice_vsi_get_qs(vsi); 2704 if (ret) { 2705 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2706 vsi->idx); 2707 goto unroll_vsi_alloc_stat; 2708 } 2709 2710 /* set RSS capabilities */ 2711 ice_vsi_set_rss_params(vsi); 2712 2713 /* set TC configuration */ 2714 ice_vsi_set_tc_cfg(vsi); 2715 2716 /* create the VSI */ 2717 ret = ice_vsi_init(vsi, params->flags); 2718 if (ret) 2719 goto unroll_get_qs; 2720 2721 ice_vsi_init_vlan_ops(vsi); 2722 2723 switch (vsi->type) { 2724 case ICE_VSI_CTRL: 2725 case ICE_VSI_SWITCHDEV_CTRL: 2726 case ICE_VSI_PF: 2727 ret = ice_vsi_alloc_q_vectors(vsi); 2728 if (ret) 2729 goto unroll_vsi_init; 2730 2731 ret = ice_vsi_setup_vector_base(vsi); 2732 if (ret) 2733 goto unroll_alloc_q_vector; 2734 2735 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2736 if (ret) 2737 goto unroll_vector_base; 2738 2739 ret = ice_vsi_alloc_rings(vsi); 2740 if (ret) 2741 goto unroll_vector_base; 2742 2743 ret = ice_vsi_alloc_ring_stats(vsi); 2744 if (ret) 2745 goto unroll_vector_base; 2746 2747 ice_vsi_map_rings_to_vectors(vsi); 2748 if (ice_is_xdp_ena_vsi(vsi)) { 2749 ret = ice_vsi_determine_xdp_res(vsi); 2750 if (ret) 2751 goto unroll_vector_base; 2752 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); 2753 if (ret) 2754 goto unroll_vector_base; 2755 } 2756 2757 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ 2758 if (vsi->type != ICE_VSI_CTRL) 2759 /* Do not exit if configuring RSS had an issue, at 2760 * least receive traffic on first queue. Hence no 2761 * need to capture return value 2762 */ 2763 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2764 ice_vsi_cfg_rss_lut_key(vsi); 2765 ice_vsi_set_rss_flow_fld(vsi); 2766 } 2767 ice_init_arfs(vsi); 2768 break; 2769 case ICE_VSI_CHNL: 2770 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2771 ice_vsi_cfg_rss_lut_key(vsi); 2772 ice_vsi_set_rss_flow_fld(vsi); 2773 } 2774 break; 2775 case ICE_VSI_VF: 2776 /* VF driver will take care of creating netdev for this type and 2777 * map queues to vectors through Virtchnl, PF driver only 2778 * creates a VSI and corresponding structures for bookkeeping 2779 * purpose 2780 */ 2781 ret = ice_vsi_alloc_q_vectors(vsi); 2782 if (ret) 2783 goto unroll_vsi_init; 2784 2785 ret = ice_vsi_alloc_rings(vsi); 2786 if (ret) 2787 goto unroll_alloc_q_vector; 2788 2789 ret = ice_vsi_set_q_vectors_reg_idx(vsi); 2790 if (ret) 2791 goto unroll_vector_base; 2792 2793 ret = ice_vsi_alloc_ring_stats(vsi); 2794 if (ret) 2795 goto unroll_vector_base; 2796 /* Do not exit if configuring RSS had an issue, at least 2797 * receive traffic on first queue. Hence no need to capture 2798 * return value 2799 */ 2800 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2801 ice_vsi_cfg_rss_lut_key(vsi); 2802 ice_vsi_set_vf_rss_flow_fld(vsi); 2803 } 2804 break; 2805 case ICE_VSI_LB: 2806 ret = ice_vsi_alloc_rings(vsi); 2807 if (ret) 2808 goto unroll_vsi_init; 2809 2810 ret = ice_vsi_alloc_ring_stats(vsi); 2811 if (ret) 2812 goto unroll_vector_base; 2813 2814 break; 2815 default: 2816 /* clean up the resources and exit */ 2817 ret = -EINVAL; 2818 goto unroll_vsi_init; 2819 } 2820 2821 return 0; 2822 2823 unroll_vector_base: 2824 /* reclaim SW interrupts back to the common pool */ 2825 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2826 pf->num_avail_sw_msix += vsi->num_q_vectors; 2827 unroll_alloc_q_vector: 2828 ice_vsi_free_q_vectors(vsi); 2829 unroll_vsi_init: 2830 ice_vsi_delete_from_hw(vsi); 2831 unroll_get_qs: 2832 ice_vsi_put_qs(vsi); 2833 unroll_vsi_alloc_stat: 2834 ice_vsi_free_stats(vsi); 2835 unroll_vsi_alloc: 2836 ice_vsi_free_arrays(vsi); 2837 return ret; 2838 } 2839 2840 /** 2841 * ice_vsi_cfg - configure a previously allocated VSI 2842 * @vsi: pointer to VSI 2843 * @params: parameters used to configure this VSI 2844 */ 2845 int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) 2846 { 2847 struct ice_pf *pf = vsi->back; 2848 int ret; 2849 2850 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) 2851 return -EINVAL; 2852 2853 vsi->type = params->type; 2854 vsi->port_info = params->pi; 2855 2856 /* For VSIs which don't have a connected VF, this will be NULL */ 2857 vsi->vf = params->vf; 2858 2859 ret = ice_vsi_cfg_def(vsi, params); 2860 if (ret) 2861 return ret; 2862 2863 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); 2864 if (ret) 2865 ice_vsi_decfg(vsi); 2866 2867 if (vsi->type == ICE_VSI_CTRL) { 2868 if (vsi->vf) { 2869 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); 2870 vsi->vf->ctrl_vsi_idx = vsi->idx; 2871 } else { 2872 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); 2873 pf->ctrl_vsi_idx = vsi->idx; 2874 } 2875 } 2876 2877 return ret; 2878 } 2879 2880 /** 2881 * ice_vsi_decfg - remove all VSI configuration 2882 * @vsi: pointer to VSI 2883 */ 2884 void ice_vsi_decfg(struct ice_vsi *vsi) 2885 { 2886 struct ice_pf *pf = vsi->back; 2887 int err; 2888 2889 /* The Rx rule will only exist to remove if the LLDP FW 2890 * engine is currently stopped 2891 */ 2892 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && 2893 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) 2894 ice_cfg_sw_lldp(vsi, false, false); 2895 2896 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); 2897 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); 2898 if (err) 2899 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", 2900 vsi->vsi_num, err); 2901 2902 if (ice_is_xdp_ena_vsi(vsi)) 2903 /* return value check can be skipped here, it always returns 2904 * 0 if reset is in progress 2905 */ 2906 ice_destroy_xdp_rings(vsi); 2907 2908 ice_vsi_clear_rings(vsi); 2909 ice_vsi_free_q_vectors(vsi); 2910 ice_vsi_put_qs(vsi); 2911 ice_vsi_free_arrays(vsi); 2912 2913 /* SR-IOV determines needed MSIX resources all at once instead of per 2914 * VSI since when VFs are spawned we know how many VFs there are and how 2915 * many interrupts each VF needs. SR-IOV MSIX resources are also 2916 * cleared in the same manner. 2917 */ 2918 if (vsi->type == ICE_VSI_CTRL && vsi->vf) { 2919 ice_free_vf_ctrl_res(pf, vsi); 2920 } else if (vsi->type != ICE_VSI_VF) { 2921 /* reclaim SW interrupts back to the common pool */ 2922 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); 2923 pf->num_avail_sw_msix += vsi->num_q_vectors; 2924 vsi->base_vector = 0; 2925 } 2926 2927 if (vsi->type == ICE_VSI_VF && 2928 vsi->agg_node && vsi->agg_node->valid) 2929 vsi->agg_node->num_vsis--; 2930 if (vsi->agg_node) { 2931 vsi->agg_node->valid = false; 2932 vsi->agg_node->agg_id = 0; 2933 } 2934 } 2935 2936 /** 2937 * ice_vsi_setup - Set up a VSI by a given type 2938 * @pf: board private structure 2939 * @params: parameters to use when creating the VSI 2940 * 2941 * This allocates the sw VSI structure and its queue resources. 2942 * 2943 * Returns pointer to the successfully allocated and configured VSI sw struct on 2944 * success, NULL on failure. 2945 */ 2946 struct ice_vsi * 2947 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) 2948 { 2949 struct device *dev = ice_pf_to_dev(pf); 2950 struct ice_vsi *vsi; 2951 int ret; 2952 2953 /* ice_vsi_setup can only initialize a new VSI, and we must have 2954 * a port_info structure for it. 2955 */ 2956 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || 2957 WARN_ON(!params->pi)) 2958 return NULL; 2959 2960 vsi = ice_vsi_alloc(pf); 2961 if (!vsi) { 2962 dev_err(dev, "could not allocate VSI\n"); 2963 return NULL; 2964 } 2965 2966 ret = ice_vsi_cfg(vsi, params); 2967 if (ret) 2968 goto err_vsi_cfg; 2969 2970 /* Add switch rule to drop all Tx Flow Control Frames, of look up 2971 * type ETHERTYPE from VSIs, and restrict malicious VF from sending 2972 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. 2973 * The rule is added once for PF VSI in order to create appropriate 2974 * recipe, since VSI/VSI list is ignored with drop action... 2975 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to 2976 * be dropped so that VFs cannot send LLDP packets to reconfig DCB 2977 * settings in the HW. 2978 */ 2979 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { 2980 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, 2981 ICE_DROP_PACKET); 2982 ice_cfg_sw_lldp(vsi, true, true); 2983 } 2984 2985 if (!vsi->agg_node) 2986 ice_set_agg_vsi(vsi); 2987 2988 return vsi; 2989 2990 err_vsi_cfg: 2991 if (params->type == ICE_VSI_VF) 2992 ice_enable_lag(pf->lag); 2993 ice_vsi_free(vsi); 2994 2995 return NULL; 2996 } 2997 2998 /** 2999 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW 3000 * @vsi: the VSI being cleaned up 3001 */ 3002 static void ice_vsi_release_msix(struct ice_vsi *vsi) 3003 { 3004 struct ice_pf *pf = vsi->back; 3005 struct ice_hw *hw = &pf->hw; 3006 u32 txq = 0; 3007 u32 rxq = 0; 3008 int i, q; 3009 3010 ice_for_each_q_vector(vsi, i) { 3011 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 3012 3013 ice_write_intrl(q_vector, 0); 3014 for (q = 0; q < q_vector->num_ring_tx; q++) { 3015 ice_write_itr(&q_vector->tx, 0); 3016 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 3017 if (ice_is_xdp_ena_vsi(vsi)) { 3018 u32 xdp_txq = txq + vsi->num_xdp_txq; 3019 3020 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); 3021 } 3022 txq++; 3023 } 3024 3025 for (q = 0; q < q_vector->num_ring_rx; q++) { 3026 ice_write_itr(&q_vector->rx, 0); 3027 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); 3028 rxq++; 3029 } 3030 } 3031 3032 ice_flush(hw); 3033 } 3034 3035 /** 3036 * ice_vsi_free_irq - Free the IRQ association with the OS 3037 * @vsi: the VSI being configured 3038 */ 3039 void ice_vsi_free_irq(struct ice_vsi *vsi) 3040 { 3041 struct ice_pf *pf = vsi->back; 3042 int base = vsi->base_vector; 3043 int i; 3044 3045 if (!vsi->q_vectors || !vsi->irqs_ready) 3046 return; 3047 3048 ice_vsi_release_msix(vsi); 3049 if (vsi->type == ICE_VSI_VF) 3050 return; 3051 3052 vsi->irqs_ready = false; 3053 ice_free_cpu_rx_rmap(vsi); 3054 3055 ice_for_each_q_vector(vsi, i) { 3056 u16 vector = i + base; 3057 int irq_num; 3058 3059 irq_num = pf->msix_entries[vector].vector; 3060 3061 /* free only the irqs that were actually requested */ 3062 if (!vsi->q_vectors[i] || 3063 !(vsi->q_vectors[i]->num_ring_tx || 3064 vsi->q_vectors[i]->num_ring_rx)) 3065 continue; 3066 3067 /* clear the affinity notifier in the IRQ descriptor */ 3068 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 3069 irq_set_affinity_notifier(irq_num, NULL); 3070 3071 /* clear the affinity_mask in the IRQ descriptor */ 3072 irq_set_affinity_hint(irq_num, NULL); 3073 synchronize_irq(irq_num); 3074 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); 3075 } 3076 } 3077 3078 /** 3079 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues 3080 * @vsi: the VSI having resources freed 3081 */ 3082 void ice_vsi_free_tx_rings(struct ice_vsi *vsi) 3083 { 3084 int i; 3085 3086 if (!vsi->tx_rings) 3087 return; 3088 3089 ice_for_each_txq(vsi, i) 3090 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 3091 ice_free_tx_ring(vsi->tx_rings[i]); 3092 } 3093 3094 /** 3095 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues 3096 * @vsi: the VSI having resources freed 3097 */ 3098 void ice_vsi_free_rx_rings(struct ice_vsi *vsi) 3099 { 3100 int i; 3101 3102 if (!vsi->rx_rings) 3103 return; 3104 3105 ice_for_each_rxq(vsi, i) 3106 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 3107 ice_free_rx_ring(vsi->rx_rings[i]); 3108 } 3109 3110 /** 3111 * ice_vsi_close - Shut down a VSI 3112 * @vsi: the VSI being shut down 3113 */ 3114 void ice_vsi_close(struct ice_vsi *vsi) 3115 { 3116 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 3117 ice_down(vsi); 3118 3119 ice_vsi_free_irq(vsi); 3120 ice_vsi_free_tx_rings(vsi); 3121 ice_vsi_free_rx_rings(vsi); 3122 } 3123 3124 /** 3125 * ice_ena_vsi - resume a VSI 3126 * @vsi: the VSI being resume 3127 * @locked: is the rtnl_lock already held 3128 */ 3129 int ice_ena_vsi(struct ice_vsi *vsi, bool locked) 3130 { 3131 int err = 0; 3132 3133 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) 3134 return 0; 3135 3136 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 3137 3138 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 3139 if (netif_running(vsi->netdev)) { 3140 if (!locked) 3141 rtnl_lock(); 3142 3143 err = ice_open_internal(vsi->netdev); 3144 3145 if (!locked) 3146 rtnl_unlock(); 3147 } 3148 } else if (vsi->type == ICE_VSI_CTRL) { 3149 err = ice_vsi_open_ctrl(vsi); 3150 } 3151 3152 return err; 3153 } 3154 3155 /** 3156 * ice_dis_vsi - pause a VSI 3157 * @vsi: the VSI being paused 3158 * @locked: is the rtnl_lock already held 3159 */ 3160 void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 3161 { 3162 if (test_bit(ICE_VSI_DOWN, vsi->state)) 3163 return; 3164 3165 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 3166 3167 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 3168 if (netif_running(vsi->netdev)) { 3169 if (!locked) 3170 rtnl_lock(); 3171 3172 ice_vsi_close(vsi); 3173 3174 if (!locked) 3175 rtnl_unlock(); 3176 } else { 3177 ice_vsi_close(vsi); 3178 } 3179 } else if (vsi->type == ICE_VSI_CTRL || 3180 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 3181 ice_vsi_close(vsi); 3182 } 3183 } 3184 3185 /** 3186 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 3187 * @vsi: the VSI being un-configured 3188 */ 3189 void ice_vsi_dis_irq(struct ice_vsi *vsi) 3190 { 3191 int base = vsi->base_vector; 3192 struct ice_pf *pf = vsi->back; 3193 struct ice_hw *hw = &pf->hw; 3194 u32 val; 3195 int i; 3196 3197 /* disable interrupt causation from each queue */ 3198 if (vsi->tx_rings) { 3199 ice_for_each_txq(vsi, i) { 3200 if (vsi->tx_rings[i]) { 3201 u16 reg; 3202 3203 reg = vsi->tx_rings[i]->reg_idx; 3204 val = rd32(hw, QINT_TQCTL(reg)); 3205 val &= ~QINT_TQCTL_CAUSE_ENA_M; 3206 wr32(hw, QINT_TQCTL(reg), val); 3207 } 3208 } 3209 } 3210 3211 if (vsi->rx_rings) { 3212 ice_for_each_rxq(vsi, i) { 3213 if (vsi->rx_rings[i]) { 3214 u16 reg; 3215 3216 reg = vsi->rx_rings[i]->reg_idx; 3217 val = rd32(hw, QINT_RQCTL(reg)); 3218 val &= ~QINT_RQCTL_CAUSE_ENA_M; 3219 wr32(hw, QINT_RQCTL(reg), val); 3220 } 3221 } 3222 } 3223 3224 /* disable each interrupt */ 3225 ice_for_each_q_vector(vsi, i) { 3226 if (!vsi->q_vectors[i]) 3227 continue; 3228 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 3229 } 3230 3231 ice_flush(hw); 3232 3233 /* don't call synchronize_irq() for VF's from the host */ 3234 if (vsi->type == ICE_VSI_VF) 3235 return; 3236 3237 ice_for_each_q_vector(vsi, i) 3238 synchronize_irq(pf->msix_entries[i + base].vector); 3239 } 3240 3241 /** 3242 * ice_napi_del - Remove NAPI handler for the VSI 3243 * @vsi: VSI for which NAPI handler is to be removed 3244 */ 3245 void ice_napi_del(struct ice_vsi *vsi) 3246 { 3247 int v_idx; 3248 3249 if (!vsi->netdev) 3250 return; 3251 3252 ice_for_each_q_vector(vsi, v_idx) 3253 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 3254 } 3255 3256 /** 3257 * ice_vsi_release - Delete a VSI and free its resources 3258 * @vsi: the VSI being removed 3259 * 3260 * Returns 0 on success or < 0 on error 3261 */ 3262 int ice_vsi_release(struct ice_vsi *vsi) 3263 { 3264 struct ice_pf *pf; 3265 3266 if (!vsi->back) 3267 return -ENODEV; 3268 pf = vsi->back; 3269 3270 /* do not unregister while driver is in the reset recovery pending 3271 * state. Since reset/rebuild happens through PF service task workqueue, 3272 * it's not a good idea to unregister netdev that is associated to the 3273 * PF that is running the work queue items currently. This is done to 3274 * avoid check_flush_dependency() warning on this wq 3275 */ 3276 if (vsi->netdev && !ice_is_reset_in_progress(pf->state) && 3277 (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) { 3278 unregister_netdev(vsi->netdev); 3279 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 3280 } 3281 3282 if (vsi->type == ICE_VSI_PF) 3283 ice_devlink_destroy_pf_port(pf); 3284 3285 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 3286 ice_rss_clean(vsi); 3287 3288 ice_vsi_close(vsi); 3289 ice_vsi_decfg(vsi); 3290 3291 if (vsi->netdev) { 3292 if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { 3293 unregister_netdev(vsi->netdev); 3294 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 3295 } 3296 if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) { 3297 free_netdev(vsi->netdev); 3298 vsi->netdev = NULL; 3299 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3300 } 3301 } 3302 3303 /* retain SW VSI data structure since it is needed to unregister and 3304 * free VSI netdev when PF is not in reset recovery pending state,\ 3305 * for ex: during rmmod. 3306 */ 3307 if (!ice_is_reset_in_progress(pf->state)) 3308 ice_vsi_delete(vsi); 3309 3310 return 0; 3311 } 3312 3313 /** 3314 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors 3315 * @vsi: VSI connected with q_vectors 3316 * @coalesce: array of struct with stored coalesce 3317 * 3318 * Returns array size. 3319 */ 3320 static int 3321 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, 3322 struct ice_coalesce_stored *coalesce) 3323 { 3324 int i; 3325 3326 ice_for_each_q_vector(vsi, i) { 3327 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 3328 3329 coalesce[i].itr_tx = q_vector->tx.itr_settings; 3330 coalesce[i].itr_rx = q_vector->rx.itr_settings; 3331 coalesce[i].intrl = q_vector->intrl; 3332 3333 if (i < vsi->num_txq) 3334 coalesce[i].tx_valid = true; 3335 if (i < vsi->num_rxq) 3336 coalesce[i].rx_valid = true; 3337 } 3338 3339 return vsi->num_q_vectors; 3340 } 3341 3342 /** 3343 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays 3344 * @vsi: VSI connected with q_vectors 3345 * @coalesce: pointer to array of struct with stored coalesce 3346 * @size: size of coalesce array 3347 * 3348 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save 3349 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce 3350 * to default value. 3351 */ 3352 static void 3353 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, 3354 struct ice_coalesce_stored *coalesce, int size) 3355 { 3356 struct ice_ring_container *rc; 3357 int i; 3358 3359 if ((size && !coalesce) || !vsi) 3360 return; 3361 3362 /* There are a couple of cases that have to be handled here: 3363 * 1. The case where the number of queue vectors stays the same, but 3364 * the number of Tx or Rx rings changes (the first for loop) 3365 * 2. The case where the number of queue vectors increased (the 3366 * second for loop) 3367 */ 3368 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { 3369 /* There are 2 cases to handle here and they are the same for 3370 * both Tx and Rx: 3371 * if the entry was valid previously (coalesce[i].[tr]x_valid 3372 * and the loop variable is less than the number of rings 3373 * allocated, then write the previous values 3374 * 3375 * if the entry was not valid previously, but the number of 3376 * rings is less than are allocated (this means the number of 3377 * rings increased from previously), then write out the 3378 * values in the first element 3379 * 3380 * Also, always write the ITR, even if in ITR_IS_DYNAMIC 3381 * as there is no harm because the dynamic algorithm 3382 * will just overwrite. 3383 */ 3384 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { 3385 rc = &vsi->q_vectors[i]->rx; 3386 rc->itr_settings = coalesce[i].itr_rx; 3387 ice_write_itr(rc, rc->itr_setting); 3388 } else if (i < vsi->alloc_rxq) { 3389 rc = &vsi->q_vectors[i]->rx; 3390 rc->itr_settings = coalesce[0].itr_rx; 3391 ice_write_itr(rc, rc->itr_setting); 3392 } 3393 3394 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { 3395 rc = &vsi->q_vectors[i]->tx; 3396 rc->itr_settings = coalesce[i].itr_tx; 3397 ice_write_itr(rc, rc->itr_setting); 3398 } else if (i < vsi->alloc_txq) { 3399 rc = &vsi->q_vectors[i]->tx; 3400 rc->itr_settings = coalesce[0].itr_tx; 3401 ice_write_itr(rc, rc->itr_setting); 3402 } 3403 3404 vsi->q_vectors[i]->intrl = coalesce[i].intrl; 3405 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3406 } 3407 3408 /* the number of queue vectors increased so write whatever is in 3409 * the first element 3410 */ 3411 for (; i < vsi->num_q_vectors; i++) { 3412 /* transmit */ 3413 rc = &vsi->q_vectors[i]->tx; 3414 rc->itr_settings = coalesce[0].itr_tx; 3415 ice_write_itr(rc, rc->itr_setting); 3416 3417 /* receive */ 3418 rc = &vsi->q_vectors[i]->rx; 3419 rc->itr_settings = coalesce[0].itr_rx; 3420 ice_write_itr(rc, rc->itr_setting); 3421 3422 vsi->q_vectors[i]->intrl = coalesce[0].intrl; 3423 ice_set_q_vector_intrl(vsi->q_vectors[i]); 3424 } 3425 } 3426 3427 /** 3428 * ice_vsi_realloc_stat_arrays - Frees unused stat structures 3429 * @vsi: VSI pointer 3430 * @prev_txq: Number of Tx rings before ring reallocation 3431 * @prev_rxq: Number of Rx rings before ring reallocation 3432 */ 3433 static void 3434 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) 3435 { 3436 struct ice_vsi_stats *vsi_stat; 3437 struct ice_pf *pf = vsi->back; 3438 int i; 3439 3440 if (!prev_txq || !prev_rxq) 3441 return; 3442 if (vsi->type == ICE_VSI_CHNL) 3443 return; 3444 3445 vsi_stat = pf->vsi_stats[vsi->idx]; 3446 3447 if (vsi->num_txq < prev_txq) { 3448 for (i = vsi->num_txq; i < prev_txq; i++) { 3449 if (vsi_stat->tx_ring_stats[i]) { 3450 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); 3451 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); 3452 } 3453 } 3454 } 3455 3456 if (vsi->num_rxq < prev_rxq) { 3457 for (i = vsi->num_rxq; i < prev_rxq; i++) { 3458 if (vsi_stat->rx_ring_stats[i]) { 3459 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); 3460 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); 3461 } 3462 } 3463 } 3464 } 3465 3466 /** 3467 * ice_vsi_rebuild - Rebuild VSI after reset 3468 * @vsi: VSI to be rebuild 3469 * @vsi_flags: flags used for VSI rebuild flow 3470 * 3471 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or 3472 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware. 3473 * 3474 * Returns 0 on success and negative value on failure 3475 */ 3476 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) 3477 { 3478 struct ice_vsi_cfg_params params = {}; 3479 struct ice_coalesce_stored *coalesce; 3480 int ret, prev_txq, prev_rxq; 3481 int prev_num_q_vectors = 0; 3482 struct ice_pf *pf; 3483 3484 if (!vsi) 3485 return -EINVAL; 3486 3487 params = ice_vsi_to_params(vsi); 3488 params.flags = vsi_flags; 3489 3490 pf = vsi->back; 3491 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) 3492 return -EINVAL; 3493 3494 coalesce = kcalloc(vsi->num_q_vectors, 3495 sizeof(struct ice_coalesce_stored), GFP_KERNEL); 3496 if (!coalesce) 3497 return -ENOMEM; 3498 3499 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); 3500 3501 prev_txq = vsi->num_txq; 3502 prev_rxq = vsi->num_rxq; 3503 3504 ice_vsi_decfg(vsi); 3505 ret = ice_vsi_cfg_def(vsi, ¶ms); 3506 if (ret) 3507 goto err_vsi_cfg; 3508 3509 ret = ice_vsi_cfg_tc_lan(pf, vsi); 3510 if (ret) { 3511 if (vsi_flags & ICE_VSI_FLAG_INIT) { 3512 ret = -EIO; 3513 goto err_vsi_cfg_tc_lan; 3514 } 3515 3516 kfree(coalesce); 3517 return ice_schedule_reset(pf, ICE_RESET_PFR); 3518 } 3519 3520 ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); 3521 3522 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); 3523 kfree(coalesce); 3524 3525 return 0; 3526 3527 err_vsi_cfg_tc_lan: 3528 ice_vsi_decfg(vsi); 3529 err_vsi_cfg: 3530 kfree(coalesce); 3531 return ret; 3532 } 3533 3534 /** 3535 * ice_is_reset_in_progress - check for a reset in progress 3536 * @state: PF state field 3537 */ 3538 bool ice_is_reset_in_progress(unsigned long *state) 3539 { 3540 return test_bit(ICE_RESET_OICR_RECV, state) || 3541 test_bit(ICE_PFR_REQ, state) || 3542 test_bit(ICE_CORER_REQ, state) || 3543 test_bit(ICE_GLOBR_REQ, state); 3544 } 3545 3546 /** 3547 * ice_wait_for_reset - Wait for driver to finish reset and rebuild 3548 * @pf: pointer to the PF structure 3549 * @timeout: length of time to wait, in jiffies 3550 * 3551 * Wait (sleep) for a short time until the driver finishes cleaning up from 3552 * a device reset. The caller must be able to sleep. Use this to delay 3553 * operations that could fail while the driver is cleaning up after a device 3554 * reset. 3555 * 3556 * Returns 0 on success, -EBUSY if the reset is not finished within the 3557 * timeout, and -ERESTARTSYS if the thread was interrupted. 3558 */ 3559 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) 3560 { 3561 long ret; 3562 3563 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, 3564 !ice_is_reset_in_progress(pf->state), 3565 timeout); 3566 if (ret < 0) 3567 return ret; 3568 else if (!ret) 3569 return -EBUSY; 3570 else 3571 return 0; 3572 } 3573 3574 /** 3575 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 3576 * @vsi: VSI being configured 3577 * @ctx: the context buffer returned from AQ VSI update command 3578 */ 3579 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 3580 { 3581 vsi->info.mapping_flags = ctx->info.mapping_flags; 3582 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 3583 sizeof(vsi->info.q_mapping)); 3584 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 3585 sizeof(vsi->info.tc_mapping)); 3586 } 3587 3588 /** 3589 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 3590 * @vsi: the VSI being configured 3591 * @ena_tc: TC map to be enabled 3592 */ 3593 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 3594 { 3595 struct net_device *netdev = vsi->netdev; 3596 struct ice_pf *pf = vsi->back; 3597 int numtc = vsi->tc_cfg.numtc; 3598 struct ice_dcbx_cfg *dcbcfg; 3599 u8 netdev_tc; 3600 int i; 3601 3602 if (!netdev) 3603 return; 3604 3605 /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */ 3606 if (vsi->type == ICE_VSI_CHNL) 3607 return; 3608 3609 if (!ena_tc) { 3610 netdev_reset_tc(netdev); 3611 return; 3612 } 3613 3614 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) 3615 numtc = vsi->all_numtc; 3616 3617 if (netdev_set_num_tc(netdev, numtc)) 3618 return; 3619 3620 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 3621 3622 ice_for_each_traffic_class(i) 3623 if (vsi->tc_cfg.ena_tc & BIT(i)) 3624 netdev_set_tc_queue(netdev, 3625 vsi->tc_cfg.tc_info[i].netdev_tc, 3626 vsi->tc_cfg.tc_info[i].qcount_tx, 3627 vsi->tc_cfg.tc_info[i].qoffset); 3628 /* setup TC queue map for CHNL TCs */ 3629 ice_for_each_chnl_tc(i) { 3630 if (!(vsi->all_enatc & BIT(i))) 3631 break; 3632 if (!vsi->mqprio_qopt.qopt.count[i]) 3633 break; 3634 netdev_set_tc_queue(netdev, i, 3635 vsi->mqprio_qopt.qopt.count[i], 3636 vsi->mqprio_qopt.qopt.offset[i]); 3637 } 3638 3639 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3640 return; 3641 3642 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 3643 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 3644 3645 /* Get the mapped netdev TC# for the UP */ 3646 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 3647 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3648 } 3649 } 3650 3651 /** 3652 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config 3653 * @vsi: the VSI being configured, 3654 * @ctxt: VSI context structure 3655 * @ena_tc: number of traffic classes to enable 3656 * 3657 * Prepares VSI tc_config to have queue configurations based on MQPRIO options. 3658 */ 3659 static int 3660 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, 3661 u8 ena_tc) 3662 { 3663 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; 3664 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; 3665 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; 3666 u16 new_txq, new_rxq; 3667 u8 netdev_tc = 0; 3668 int i; 3669 3670 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; 3671 3672 pow = order_base_2(tc0_qcount); 3673 qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 3674 ICE_AQ_VSI_TC_Q_OFFSET_M) | 3675 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); 3676 3677 ice_for_each_traffic_class(i) { 3678 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 3679 /* TC is not enabled */ 3680 vsi->tc_cfg.tc_info[i].qoffset = 0; 3681 vsi->tc_cfg.tc_info[i].qcount_rx = 1; 3682 vsi->tc_cfg.tc_info[i].qcount_tx = 1; 3683 vsi->tc_cfg.tc_info[i].netdev_tc = 0; 3684 ctxt->info.tc_mapping[i] = 0; 3685 continue; 3686 } 3687 3688 offset = vsi->mqprio_qopt.qopt.offset[i]; 3689 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3690 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3691 vsi->tc_cfg.tc_info[i].qoffset = offset; 3692 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; 3693 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; 3694 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; 3695 } 3696 3697 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { 3698 ice_for_each_chnl_tc(i) { 3699 if (!(vsi->all_enatc & BIT(i))) 3700 continue; 3701 offset = vsi->mqprio_qopt.qopt.offset[i]; 3702 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 3703 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 3704 } 3705 } 3706 3707 new_txq = offset + qcount_tx; 3708 if (new_txq > vsi->alloc_txq) { 3709 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", 3710 new_txq, vsi->alloc_txq); 3711 return -EINVAL; 3712 } 3713 3714 new_rxq = offset + qcount_rx; 3715 if (new_rxq > vsi->alloc_rxq) { 3716 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", 3717 new_rxq, vsi->alloc_rxq); 3718 return -EINVAL; 3719 } 3720 3721 /* Set actual Tx/Rx queue pairs */ 3722 vsi->num_txq = new_txq; 3723 vsi->num_rxq = new_rxq; 3724 3725 /* Setup queue TC[0].qmap for given VSI context */ 3726 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 3727 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); 3728 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); 3729 3730 /* Find queue count available for channel VSIs and starting offset 3731 * for channel VSIs 3732 */ 3733 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { 3734 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; 3735 vsi->next_base_q = tc0_qcount; 3736 } 3737 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); 3738 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); 3739 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", 3740 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); 3741 3742 return 0; 3743 } 3744 3745 /** 3746 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 3747 * @vsi: VSI to be configured 3748 * @ena_tc: TC bitmap 3749 * 3750 * VSI queues expected to be quiesced before calling this function 3751 */ 3752 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 3753 { 3754 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 3755 struct ice_pf *pf = vsi->back; 3756 struct ice_tc_cfg old_tc_cfg; 3757 struct ice_vsi_ctx *ctx; 3758 struct device *dev; 3759 int i, ret = 0; 3760 u8 num_tc = 0; 3761 3762 dev = ice_pf_to_dev(pf); 3763 if (vsi->tc_cfg.ena_tc == ena_tc && 3764 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) 3765 return 0; 3766 3767 ice_for_each_traffic_class(i) { 3768 /* build bitmap of enabled TCs */ 3769 if (ena_tc & BIT(i)) 3770 num_tc++; 3771 /* populate max_txqs per TC */ 3772 max_txqs[i] = vsi->alloc_txq; 3773 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are 3774 * zero for CHNL VSI, hence use num_txq instead as max_txqs 3775 */ 3776 if (vsi->type == ICE_VSI_CHNL && 3777 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3778 max_txqs[i] = vsi->num_txq; 3779 } 3780 3781 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); 3782 vsi->tc_cfg.ena_tc = ena_tc; 3783 vsi->tc_cfg.numtc = num_tc; 3784 3785 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3786 if (!ctx) 3787 return -ENOMEM; 3788 3789 ctx->vf_num = 0; 3790 ctx->info = vsi->info; 3791 3792 if (vsi->type == ICE_VSI_PF && 3793 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3794 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); 3795 else 3796 ret = ice_vsi_setup_q_map(vsi, ctx); 3797 3798 if (ret) { 3799 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); 3800 goto out; 3801 } 3802 3803 /* must to indicate which section of VSI context are being modified */ 3804 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 3805 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 3806 if (ret) { 3807 dev_info(dev, "Failed VSI Update\n"); 3808 goto out; 3809 } 3810 3811 if (vsi->type == ICE_VSI_PF && 3812 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 3813 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); 3814 else 3815 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 3816 vsi->tc_cfg.ena_tc, max_txqs); 3817 3818 if (ret) { 3819 dev_err(dev, "VSI %d failed TC config, error %d\n", 3820 vsi->vsi_num, ret); 3821 goto out; 3822 } 3823 ice_vsi_update_q_map(vsi, ctx); 3824 vsi->info.valid_sections = 0; 3825 3826 ice_vsi_cfg_netdev_tc(vsi, ena_tc); 3827 out: 3828 kfree(ctx); 3829 return ret; 3830 } 3831 3832 /** 3833 * ice_update_ring_stats - Update ring statistics 3834 * @stats: stats to be updated 3835 * @pkts: number of processed packets 3836 * @bytes: number of processed bytes 3837 * 3838 * This function assumes that caller has acquired a u64_stats_sync lock. 3839 */ 3840 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes) 3841 { 3842 stats->bytes += bytes; 3843 stats->pkts += pkts; 3844 } 3845 3846 /** 3847 * ice_update_tx_ring_stats - Update Tx ring specific counters 3848 * @tx_ring: ring to update 3849 * @pkts: number of processed packets 3850 * @bytes: number of processed bytes 3851 */ 3852 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes) 3853 { 3854 u64_stats_update_begin(&tx_ring->ring_stats->syncp); 3855 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); 3856 u64_stats_update_end(&tx_ring->ring_stats->syncp); 3857 } 3858 3859 /** 3860 * ice_update_rx_ring_stats - Update Rx ring specific counters 3861 * @rx_ring: ring to update 3862 * @pkts: number of processed packets 3863 * @bytes: number of processed bytes 3864 */ 3865 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) 3866 { 3867 u64_stats_update_begin(&rx_ring->ring_stats->syncp); 3868 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); 3869 u64_stats_update_end(&rx_ring->ring_stats->syncp); 3870 } 3871 3872 /** 3873 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used 3874 * @pi: port info of the switch with default VSI 3875 * 3876 * Return true if the there is a single VSI in default forwarding VSI list 3877 */ 3878 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi) 3879 { 3880 bool exists = false; 3881 3882 ice_check_if_dflt_vsi(pi, 0, &exists); 3883 return exists; 3884 } 3885 3886 /** 3887 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI 3888 * @vsi: VSI to compare against default forwarding VSI 3889 * 3890 * If this VSI passed in is the default forwarding VSI then return true, else 3891 * return false 3892 */ 3893 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) 3894 { 3895 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); 3896 } 3897 3898 /** 3899 * ice_set_dflt_vsi - set the default forwarding VSI 3900 * @vsi: VSI getting set as the default forwarding VSI on the switch 3901 * 3902 * If the VSI passed in is already the default VSI and it's enabled just return 3903 * success. 3904 * 3905 * Otherwise try to set the VSI passed in as the switch's default VSI and 3906 * return the result. 3907 */ 3908 int ice_set_dflt_vsi(struct ice_vsi *vsi) 3909 { 3910 struct device *dev; 3911 int status; 3912 3913 if (!vsi) 3914 return -EINVAL; 3915 3916 dev = ice_pf_to_dev(vsi->back); 3917 3918 /* the VSI passed in is already the default VSI */ 3919 if (ice_is_vsi_dflt_vsi(vsi)) { 3920 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", 3921 vsi->vsi_num); 3922 return 0; 3923 } 3924 3925 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); 3926 if (status) { 3927 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", 3928 vsi->vsi_num, status); 3929 return status; 3930 } 3931 3932 return 0; 3933 } 3934 3935 /** 3936 * ice_clear_dflt_vsi - clear the default forwarding VSI 3937 * @vsi: VSI to remove from filter list 3938 * 3939 * If the switch has no default VSI or it's not enabled then return error. 3940 * 3941 * Otherwise try to clear the default VSI and return the result. 3942 */ 3943 int ice_clear_dflt_vsi(struct ice_vsi *vsi) 3944 { 3945 struct device *dev; 3946 int status; 3947 3948 if (!vsi) 3949 return -EINVAL; 3950 3951 dev = ice_pf_to_dev(vsi->back); 3952 3953 /* there is no default VSI configured */ 3954 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) 3955 return -ENODEV; 3956 3957 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, 3958 ICE_FLTR_RX); 3959 if (status) { 3960 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", 3961 vsi->vsi_num, status); 3962 return -EIO; 3963 } 3964 3965 return 0; 3966 } 3967 3968 /** 3969 * ice_get_link_speed_mbps - get link speed in Mbps 3970 * @vsi: the VSI whose link speed is being queried 3971 * 3972 * Return current VSI link speed and 0 if the speed is unknown. 3973 */ 3974 int ice_get_link_speed_mbps(struct ice_vsi *vsi) 3975 { 3976 unsigned int link_speed; 3977 3978 link_speed = vsi->port_info->phy.link_info.link_speed; 3979 3980 return (int)ice_get_link_speed(fls(link_speed) - 1); 3981 } 3982 3983 /** 3984 * ice_get_link_speed_kbps - get link speed in Kbps 3985 * @vsi: the VSI whose link speed is being queried 3986 * 3987 * Return current VSI link speed and 0 if the speed is unknown. 3988 */ 3989 int ice_get_link_speed_kbps(struct ice_vsi *vsi) 3990 { 3991 int speed_mbps; 3992 3993 speed_mbps = ice_get_link_speed_mbps(vsi); 3994 3995 return speed_mbps * 1000; 3996 } 3997 3998 /** 3999 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate 4000 * @vsi: VSI to be configured 4001 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit 4002 * 4003 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit 4004 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI 4005 * on TC 0. 4006 */ 4007 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) 4008 { 4009 struct ice_pf *pf = vsi->back; 4010 struct device *dev; 4011 int status; 4012 int speed; 4013 4014 dev = ice_pf_to_dev(pf); 4015 if (!vsi->port_info) { 4016 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 4017 vsi->idx, vsi->type); 4018 return -EINVAL; 4019 } 4020 4021 speed = ice_get_link_speed_kbps(vsi); 4022 if (min_tx_rate > (u64)speed) { 4023 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 4024 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 4025 speed); 4026 return -EINVAL; 4027 } 4028 4029 /* Configure min BW for VSI limit */ 4030 if (min_tx_rate) { 4031 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 4032 ICE_MIN_BW, min_tx_rate); 4033 if (status) { 4034 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", 4035 min_tx_rate, ice_vsi_type_str(vsi->type), 4036 vsi->idx); 4037 return status; 4038 } 4039 4040 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", 4041 min_tx_rate, ice_vsi_type_str(vsi->type)); 4042 } else { 4043 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 4044 vsi->idx, 0, 4045 ICE_MIN_BW); 4046 if (status) { 4047 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", 4048 ice_vsi_type_str(vsi->type), vsi->idx); 4049 return status; 4050 } 4051 4052 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", 4053 ice_vsi_type_str(vsi->type), vsi->idx); 4054 } 4055 4056 return 0; 4057 } 4058 4059 /** 4060 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate 4061 * @vsi: VSI to be configured 4062 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit 4063 * 4064 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit 4065 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI 4066 * on TC 0. 4067 */ 4068 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) 4069 { 4070 struct ice_pf *pf = vsi->back; 4071 struct device *dev; 4072 int status; 4073 int speed; 4074 4075 dev = ice_pf_to_dev(pf); 4076 if (!vsi->port_info) { 4077 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", 4078 vsi->idx, vsi->type); 4079 return -EINVAL; 4080 } 4081 4082 speed = ice_get_link_speed_kbps(vsi); 4083 if (max_tx_rate > (u64)speed) { 4084 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", 4085 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, 4086 speed); 4087 return -EINVAL; 4088 } 4089 4090 /* Configure max BW for VSI limit */ 4091 if (max_tx_rate) { 4092 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, 4093 ICE_MAX_BW, max_tx_rate); 4094 if (status) { 4095 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", 4096 max_tx_rate, ice_vsi_type_str(vsi->type), 4097 vsi->idx); 4098 return status; 4099 } 4100 4101 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", 4102 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); 4103 } else { 4104 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, 4105 vsi->idx, 0, 4106 ICE_MAX_BW); 4107 if (status) { 4108 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", 4109 ice_vsi_type_str(vsi->type), vsi->idx); 4110 return status; 4111 } 4112 4113 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", 4114 ice_vsi_type_str(vsi->type), vsi->idx); 4115 } 4116 4117 return 0; 4118 } 4119 4120 /** 4121 * ice_set_link - turn on/off physical link 4122 * @vsi: VSI to modify physical link on 4123 * @ena: turn on/off physical link 4124 */ 4125 int ice_set_link(struct ice_vsi *vsi, bool ena) 4126 { 4127 struct device *dev = ice_pf_to_dev(vsi->back); 4128 struct ice_port_info *pi = vsi->port_info; 4129 struct ice_hw *hw = pi->hw; 4130 int status; 4131 4132 if (vsi->type != ICE_VSI_PF) 4133 return -EINVAL; 4134 4135 status = ice_aq_set_link_restart_an(pi, ena, NULL); 4136 4137 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE. 4138 * this is not a fatal error, so print a warning message and return 4139 * a success code. Return an error if FW returns an error code other 4140 * than ICE_AQ_RC_EMODE 4141 */ 4142 if (status == -EIO) { 4143 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 4144 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", 4145 (ena ? "ON" : "OFF"), status, 4146 ice_aq_str(hw->adminq.sq_last_status)); 4147 } else if (status) { 4148 dev_err(dev, "can't set link to %s, err %d aq_err %s\n", 4149 (ena ? "ON" : "OFF"), status, 4150 ice_aq_str(hw->adminq.sq_last_status)); 4151 return status; 4152 } 4153 4154 return 0; 4155 } 4156 4157 /** 4158 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI 4159 * @vsi: VSI used to add VLAN filters 4160 * 4161 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based 4162 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't 4163 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via 4164 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. 4165 * 4166 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic 4167 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged 4168 * traffic in SVM, since the VLAN TPID isn't part of filtering. 4169 * 4170 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be 4171 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is 4172 * part of filtering. 4173 */ 4174 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) 4175 { 4176 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 4177 struct ice_vlan vlan; 4178 int err; 4179 4180 vlan = ICE_VLAN(0, 0, 0); 4181 err = vlan_ops->add_vlan(vsi, &vlan); 4182 if (err && err != -EEXIST) 4183 return err; 4184 4185 /* in SVM both VLAN 0 filters are identical */ 4186 if (!ice_is_dvm_ena(&vsi->back->hw)) 4187 return 0; 4188 4189 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 4190 err = vlan_ops->add_vlan(vsi, &vlan); 4191 if (err && err != -EEXIST) 4192 return err; 4193 4194 return 0; 4195 } 4196 4197 /** 4198 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI 4199 * @vsi: VSI used to add VLAN filters 4200 * 4201 * Delete the VLAN 0 filters in the same manner that they were added in 4202 * ice_vsi_add_vlan_zero. 4203 */ 4204 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) 4205 { 4206 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 4207 struct ice_vlan vlan; 4208 int err; 4209 4210 vlan = ICE_VLAN(0, 0, 0); 4211 err = vlan_ops->del_vlan(vsi, &vlan); 4212 if (err && err != -EEXIST) 4213 return err; 4214 4215 /* in SVM both VLAN 0 filters are identical */ 4216 if (!ice_is_dvm_ena(&vsi->back->hw)) 4217 return 0; 4218 4219 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); 4220 err = vlan_ops->del_vlan(vsi, &vlan); 4221 if (err && err != -EEXIST) 4222 return err; 4223 4224 /* when deleting the last VLAN filter, make sure to disable the VLAN 4225 * promisc mode so the filter isn't left by accident 4226 */ 4227 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 4228 ICE_MCAST_VLAN_PROMISC_BITS, 0); 4229 } 4230 4231 /** 4232 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode 4233 * @vsi: VSI used to get the VLAN mode 4234 * 4235 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled 4236 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details. 4237 */ 4238 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) 4239 { 4240 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 4241 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 4242 /* no VLAN 0 filter is created when a port VLAN is active */ 4243 if (vsi->type == ICE_VSI_VF) { 4244 if (WARN_ON(!vsi->vf)) 4245 return 0; 4246 4247 if (ice_vf_is_port_vlan_ena(vsi->vf)) 4248 return 0; 4249 } 4250 4251 if (ice_is_dvm_ena(&vsi->back->hw)) 4252 return ICE_DVM_NUM_ZERO_VLAN_FLTRS; 4253 else 4254 return ICE_SVM_NUM_ZERO_VLAN_FLTRS; 4255 } 4256 4257 /** 4258 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs 4259 * @vsi: VSI used to determine if any non-zero VLANs have been added 4260 */ 4261 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) 4262 { 4263 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); 4264 } 4265 4266 /** 4267 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI 4268 * @vsi: VSI used to get the number of non-zero VLANs added 4269 */ 4270 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) 4271 { 4272 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); 4273 } 4274 4275 /** 4276 * ice_is_feature_supported 4277 * @pf: pointer to the struct ice_pf instance 4278 * @f: feature enum to be checked 4279 * 4280 * returns true if feature is supported, false otherwise 4281 */ 4282 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) 4283 { 4284 if (f < 0 || f >= ICE_F_MAX) 4285 return false; 4286 4287 return test_bit(f, pf->features); 4288 } 4289 4290 /** 4291 * ice_set_feature_support 4292 * @pf: pointer to the struct ice_pf instance 4293 * @f: feature enum to set 4294 */ 4295 static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) 4296 { 4297 if (f < 0 || f >= ICE_F_MAX) 4298 return; 4299 4300 set_bit(f, pf->features); 4301 } 4302 4303 /** 4304 * ice_clear_feature_support 4305 * @pf: pointer to the struct ice_pf instance 4306 * @f: feature enum to clear 4307 */ 4308 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) 4309 { 4310 if (f < 0 || f >= ICE_F_MAX) 4311 return; 4312 4313 clear_bit(f, pf->features); 4314 } 4315 4316 /** 4317 * ice_init_feature_support 4318 * @pf: pointer to the struct ice_pf instance 4319 * 4320 * called during init to setup supported feature 4321 */ 4322 void ice_init_feature_support(struct ice_pf *pf) 4323 { 4324 switch (pf->hw.device_id) { 4325 case ICE_DEV_ID_E810C_BACKPLANE: 4326 case ICE_DEV_ID_E810C_QSFP: 4327 case ICE_DEV_ID_E810C_SFP: 4328 ice_set_feature_support(pf, ICE_F_DSCP); 4329 ice_set_feature_support(pf, ICE_F_PTP_EXTTS); 4330 if (ice_is_e810t(&pf->hw)) { 4331 ice_set_feature_support(pf, ICE_F_SMA_CTRL); 4332 if (ice_gnss_is_gps_present(&pf->hw)) 4333 ice_set_feature_support(pf, ICE_F_GNSS); 4334 } 4335 break; 4336 default: 4337 break; 4338 } 4339 } 4340 4341 /** 4342 * ice_vsi_update_security - update security block in VSI 4343 * @vsi: pointer to VSI structure 4344 * @fill: function pointer to fill ctx 4345 */ 4346 int 4347 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) 4348 { 4349 struct ice_vsi_ctx ctx = { 0 }; 4350 4351 ctx.info = vsi->info; 4352 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 4353 fill(&ctx); 4354 4355 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4356 return -ENODEV; 4357 4358 vsi->info = ctx.info; 4359 return 0; 4360 } 4361 4362 /** 4363 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx 4364 * @ctx: pointer to VSI ctx structure 4365 */ 4366 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) 4367 { 4368 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | 4369 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4370 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4371 } 4372 4373 /** 4374 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx 4375 * @ctx: pointer to VSI ctx structure 4376 */ 4377 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) 4378 { 4379 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & 4380 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4381 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4382 } 4383 4384 /** 4385 * ice_vsi_ctx_set_allow_override - allow destination override on VSI 4386 * @ctx: pointer to VSI ctx structure 4387 */ 4388 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) 4389 { 4390 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4391 } 4392 4393 /** 4394 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI 4395 * @ctx: pointer to VSI ctx structure 4396 */ 4397 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) 4398 { 4399 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4400 } 4401