1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_vf_lib_private.h" 5 #include "ice.h" 6 #include "ice_lib.h" 7 #include "ice_fltr.h" 8 #include "ice_virtchnl_allowlist.h" 9 10 /* Public functions which may be accessed by all driver files */ 11 12 /** 13 * ice_get_vf_by_id - Get pointer to VF by ID 14 * @pf: the PF private structure 15 * @vf_id: the VF ID to locate 16 * 17 * Locate and return a pointer to the VF structure associated with a given ID. 18 * Returns NULL if the ID does not have a valid VF structure associated with 19 * it. 20 * 21 * This function takes a reference to the VF, which must be released by 22 * calling ice_put_vf() once the caller is finished accessing the VF structure 23 * returned. 24 */ 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 26 { 27 struct ice_vf *vf; 28 29 rcu_read_lock(); 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 31 if (vf->vf_id == vf_id) { 32 struct ice_vf *found; 33 34 if (kref_get_unless_zero(&vf->refcnt)) 35 found = vf; 36 else 37 found = NULL; 38 39 rcu_read_unlock(); 40 return found; 41 } 42 } 43 rcu_read_unlock(); 44 45 return NULL; 46 } 47 48 /** 49 * ice_release_vf - Release VF associated with a refcount 50 * @ref: the kref decremented to zero 51 * 52 * Callback function for kref_put to release a VF once its reference count has 53 * hit zero. 54 */ 55 static void ice_release_vf(struct kref *ref) 56 { 57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 58 59 vf->vf_ops->free(vf); 60 } 61 62 /** 63 * ice_put_vf - Release a reference to a VF 64 * @vf: the VF structure to decrease reference count on 65 * 66 * Decrease the reference count for a VF, and free the entry if it is no 67 * longer in use. 68 * 69 * This must be called after ice_get_vf_by_id() once the reference to the VF 70 * structure is no longer used. Otherwise, the VF structure will never be 71 * freed. 72 */ 73 void ice_put_vf(struct ice_vf *vf) 74 { 75 kref_put(&vf->refcnt, ice_release_vf); 76 } 77 78 /** 79 * ice_has_vfs - Return true if the PF has any associated VFs 80 * @pf: the PF private structure 81 * 82 * Return whether or not the PF has any allocated VFs. 83 * 84 * Note that this function only guarantees that there are no VFs at the point 85 * of calling it. It does not guarantee that no more VFs will be added. 86 */ 87 bool ice_has_vfs(struct ice_pf *pf) 88 { 89 /* A simple check that the hash table is not empty does not require 90 * the mutex or rcu_read_lock. 91 */ 92 return !hash_empty(pf->vfs.table); 93 } 94 95 /** 96 * ice_get_num_vfs - Get number of allocated VFs 97 * @pf: the PF private structure 98 * 99 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 100 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 101 * the output of this function. 102 */ 103 u16 ice_get_num_vfs(struct ice_pf *pf) 104 { 105 struct ice_vf *vf; 106 unsigned int bkt; 107 u16 num_vfs = 0; 108 109 rcu_read_lock(); 110 ice_for_each_vf_rcu(pf, bkt, vf) 111 num_vfs++; 112 rcu_read_unlock(); 113 114 return num_vfs; 115 } 116 117 /** 118 * ice_get_vf_vsi - get VF's VSI based on the stored index 119 * @vf: VF used to get VSI 120 */ 121 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 122 { 123 if (vf->lan_vsi_idx == ICE_NO_VSI) 124 return NULL; 125 126 return vf->pf->vsi[vf->lan_vsi_idx]; 127 } 128 129 /** 130 * ice_is_vf_disabled 131 * @vf: pointer to the VF info 132 * 133 * If the PF has been disabled, there is no need resetting VF until PF is 134 * active again. Similarly, if the VF has been disabled, this means something 135 * else is resetting the VF, so we shouldn't continue. 136 * 137 * Returns true if the caller should consider the VF as disabled whether 138 * because that single VF is explicitly disabled or because the PF is 139 * currently disabled. 140 */ 141 bool ice_is_vf_disabled(struct ice_vf *vf) 142 { 143 struct ice_pf *pf = vf->pf; 144 145 return (test_bit(ICE_VF_DIS, pf->state) || 146 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 147 } 148 149 /** 150 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 151 * @vf: The VF being resseting 152 * 153 * The max poll time is about ~800ms, which is about the maximum time it takes 154 * for a VF to be reset and/or a VF driver to be removed. 155 */ 156 static void ice_wait_on_vf_reset(struct ice_vf *vf) 157 { 158 int i; 159 160 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 161 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 162 break; 163 msleep(ICE_MAX_VF_RESET_SLEEP_MS); 164 } 165 } 166 167 /** 168 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 169 * @vf: VF to check if it's ready to be configured/queried 170 * 171 * The purpose of this function is to make sure the VF is not in reset, not 172 * disabled, and initialized so it can be configured and/or queried by a host 173 * administrator. 174 */ 175 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 176 { 177 ice_wait_on_vf_reset(vf); 178 179 if (ice_is_vf_disabled(vf)) 180 return -EINVAL; 181 182 if (ice_check_vf_init(vf)) 183 return -EBUSY; 184 185 return 0; 186 } 187 188 /** 189 * ice_check_vf_ready_for_reset - check if VF is ready to be reset 190 * @vf: VF to check if it's ready to be reset 191 * 192 * The purpose of this function is to ensure that the VF is not in reset, 193 * disabled, and is both initialized and active, thus enabling us to safely 194 * initialize another reset. 195 */ 196 int ice_check_vf_ready_for_reset(struct ice_vf *vf) 197 { 198 int ret; 199 200 ret = ice_check_vf_ready_for_cfg(vf); 201 if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 202 ret = -EAGAIN; 203 204 return ret; 205 } 206 207 /** 208 * ice_trigger_vf_reset - Reset a VF on HW 209 * @vf: pointer to the VF structure 210 * @is_vflr: true if VFLR was issued, false if not 211 * @is_pfr: true if the reset was triggered due to a previous PFR 212 * 213 * Trigger hardware to start a reset for a particular VF. Expects the caller 214 * to wait the proper amount of time to allow hardware to reset the VF before 215 * it cleans up and restores VF functionality. 216 */ 217 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 218 { 219 /* Inform VF that it is no longer active, as a warning */ 220 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 221 222 /* Disable VF's configuration API during reset. The flag is re-enabled 223 * when it's safe again to access VF's VSI. 224 */ 225 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 226 227 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 228 * needs to clear them in the case of VFR/VFLR. If this is done for 229 * PFR, it can mess up VF resets because the VF driver may already 230 * have started cleanup by the time we get here. 231 */ 232 if (!is_pfr) 233 vf->vf_ops->clear_mbx_register(vf); 234 235 vf->vf_ops->trigger_reset_register(vf, is_vflr); 236 } 237 238 static void ice_vf_clear_counters(struct ice_vf *vf) 239 { 240 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 241 242 if (vsi) 243 vsi->num_vlan = 0; 244 245 vf->num_mac = 0; 246 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 247 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 248 } 249 250 /** 251 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 252 * @vf: VF to perform pre VSI rebuild tasks 253 * 254 * These tasks are items that don't need to be amortized since they are most 255 * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 256 */ 257 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 258 { 259 /* Close any IRQ mapping now */ 260 if (vf->vf_ops->irq_close) 261 vf->vf_ops->irq_close(vf); 262 263 ice_vf_clear_counters(vf); 264 vf->vf_ops->clear_reset_trigger(vf); 265 } 266 267 /** 268 * ice_vf_recreate_vsi - Release and re-create the VF's VSI 269 * @vf: VF to recreate the VSI for 270 * 271 * This is only called when a single VF is being reset (i.e. VVF, VFLR, host 272 * VF configuration change, etc) 273 * 274 * It releases and then re-creates a new VSI. 275 */ 276 static int ice_vf_recreate_vsi(struct ice_vf *vf) 277 { 278 struct ice_pf *pf = vf->pf; 279 int err; 280 281 ice_vf_vsi_release(vf); 282 283 err = vf->vf_ops->create_vsi(vf); 284 if (err) { 285 dev_err(ice_pf_to_dev(pf), 286 "Failed to recreate the VF%u's VSI, error %d\n", 287 vf->vf_id, err); 288 return err; 289 } 290 291 return 0; 292 } 293 294 /** 295 * ice_vf_rebuild_vsi - rebuild the VF's VSI 296 * @vf: VF to rebuild the VSI for 297 * 298 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 299 * host, PFR, CORER, etc.). 300 * 301 * It reprograms the VSI configuration back into hardware. 302 */ 303 static int ice_vf_rebuild_vsi(struct ice_vf *vf) 304 { 305 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 306 struct ice_pf *pf = vf->pf; 307 308 if (WARN_ON(!vsi)) 309 return -EINVAL; 310 311 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { 312 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 313 vf->vf_id); 314 return -EIO; 315 } 316 /* vsi->idx will remain the same in this case so don't update 317 * vf->lan_vsi_idx 318 */ 319 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 320 vf->lan_vsi_num = vsi->vsi_num; 321 322 return 0; 323 } 324 325 /** 326 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 327 * @vf: the VF being reset 328 * 329 * Perform reset tasks which must occur after the VSI has been re-created or 330 * rebuilt during a VF reset. 331 */ 332 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 333 { 334 ice_vf_rebuild_host_cfg(vf); 335 ice_vf_set_initialized(vf); 336 337 vf->vf_ops->post_vsi_rebuild(vf); 338 } 339 340 /** 341 * ice_is_any_vf_in_unicast_promisc - check if any VF(s) 342 * are in unicast promiscuous mode 343 * @pf: PF structure for accessing VF(s) 344 * 345 * Return false if no VF(s) are in unicast promiscuous mode, 346 * else return true 347 */ 348 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) 349 { 350 bool is_vf_promisc = false; 351 struct ice_vf *vf; 352 unsigned int bkt; 353 354 rcu_read_lock(); 355 ice_for_each_vf_rcu(pf, bkt, vf) { 356 /* found a VF that has promiscuous mode configured */ 357 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 358 is_vf_promisc = true; 359 break; 360 } 361 } 362 rcu_read_unlock(); 363 364 return is_vf_promisc; 365 } 366 367 /** 368 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes 369 * @vf: the VF pointer 370 * @vsi: the VSI to configure 371 * @ucast_m: promiscuous mask to apply to unicast 372 * @mcast_m: promiscuous mask to apply to multicast 373 * 374 * Decide which mask should be used for unicast and multicast filter, 375 * based on presence of VLANs 376 */ 377 void 378 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, 379 u8 *ucast_m, u8 *mcast_m) 380 { 381 if (ice_vf_is_port_vlan_ena(vf) || 382 ice_vsi_has_non_zero_vlans(vsi)) { 383 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 384 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 385 } else { 386 *mcast_m = ICE_MCAST_PROMISC_BITS; 387 *ucast_m = ICE_UCAST_PROMISC_BITS; 388 } 389 } 390 391 /** 392 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI 393 * @vf: the VF pointer 394 * @vsi: the VSI to configure 395 * 396 * Clear all promiscuous/allmulticast filters for a VF 397 */ 398 static int 399 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) 400 { 401 struct ice_pf *pf = vf->pf; 402 u8 ucast_m, mcast_m; 403 int ret = 0; 404 405 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 406 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 407 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 408 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 409 ret = ice_clear_dflt_vsi(vsi); 410 } else { 411 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 412 } 413 414 if (ret) { 415 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); 416 } else { 417 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 418 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); 419 } 420 } 421 422 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 423 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 424 if (ret) { 425 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); 426 } else { 427 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 428 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); 429 } 430 } 431 return ret; 432 } 433 434 /** 435 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI 436 * @vf: the VF to configure 437 * @vsi: the VF's VSI 438 * @promisc_m: the promiscuous mode to enable 439 */ 440 int 441 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 442 { 443 struct ice_hw *hw = &vsi->back->hw; 444 int status; 445 446 if (ice_vf_is_port_vlan_ena(vf)) 447 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 448 ice_vf_get_port_vlan_id(vf)); 449 else if (ice_vsi_has_non_zero_vlans(vsi)) 450 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 451 else 452 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 453 454 if (status && status != -EEXIST) { 455 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 456 vf->vf_id, status); 457 return status; 458 } 459 460 return 0; 461 } 462 463 /** 464 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI 465 * @vf: the VF to configure 466 * @vsi: the VF's VSI 467 * @promisc_m: the promiscuous mode to disable 468 */ 469 int 470 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 471 { 472 struct ice_hw *hw = &vsi->back->hw; 473 int status; 474 475 if (ice_vf_is_port_vlan_ena(vf)) 476 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 477 ice_vf_get_port_vlan_id(vf)); 478 else if (ice_vsi_has_non_zero_vlans(vsi)) 479 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 480 else 481 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 482 483 if (status && status != -ENOENT) { 484 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 485 vf->vf_id, status); 486 return status; 487 } 488 489 return 0; 490 } 491 492 /** 493 * ice_reset_all_vfs - reset all allocated VFs in one go 494 * @pf: pointer to the PF structure 495 * 496 * Reset all VFs at once, in response to a PF or other device reset. 497 * 498 * First, tell the hardware to reset each VF, then do all the waiting in one 499 * chunk, and finally finish restoring each VF after the wait. This is useful 500 * during PF routines which need to reset all VFs, as otherwise it must perform 501 * these resets in a serialized fashion. 502 */ 503 void ice_reset_all_vfs(struct ice_pf *pf) 504 { 505 struct device *dev = ice_pf_to_dev(pf); 506 struct ice_hw *hw = &pf->hw; 507 struct ice_vf *vf; 508 unsigned int bkt; 509 510 /* If we don't have any VFs, then there is nothing to reset */ 511 if (!ice_has_vfs(pf)) 512 return; 513 514 mutex_lock(&pf->vfs.table_lock); 515 516 /* clear all malicious info if the VFs are getting reset */ 517 ice_for_each_vf(pf, bkt, vf) 518 ice_mbx_clear_malvf(&vf->mbx_info); 519 520 /* If VFs have been disabled, there is no need to reset */ 521 if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 522 mutex_unlock(&pf->vfs.table_lock); 523 return; 524 } 525 526 /* Begin reset on all VFs at once */ 527 ice_for_each_vf(pf, bkt, vf) 528 ice_trigger_vf_reset(vf, true, true); 529 530 /* HW requires some time to make sure it can flush the FIFO for a VF 531 * when it resets it. Now that we've triggered all of the VFs, iterate 532 * the table again and wait for each VF to complete. 533 */ 534 ice_for_each_vf(pf, bkt, vf) { 535 if (!vf->vf_ops->poll_reset_status(vf)) { 536 /* Display a warning if at least one VF didn't manage 537 * to reset in time, but continue on with the 538 * operation. 539 */ 540 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 541 break; 542 } 543 } 544 545 /* free VF resources to begin resetting the VSI state */ 546 ice_for_each_vf(pf, bkt, vf) { 547 mutex_lock(&vf->cfg_lock); 548 549 vf->driver_caps = 0; 550 ice_vc_set_default_allowlist(vf); 551 552 ice_vf_fdir_exit(vf); 553 ice_vf_fdir_init(vf); 554 /* clean VF control VSI when resetting VFs since it should be 555 * setup only when VF creates its first FDIR rule. 556 */ 557 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 558 ice_vf_ctrl_invalidate_vsi(vf); 559 560 ice_vf_pre_vsi_rebuild(vf); 561 ice_vf_rebuild_vsi(vf); 562 ice_vf_post_vsi_rebuild(vf); 563 564 mutex_unlock(&vf->cfg_lock); 565 } 566 567 if (ice_is_eswitch_mode_switchdev(pf)) 568 if (ice_eswitch_rebuild(pf)) 569 dev_warn(dev, "eswitch rebuild failed\n"); 570 571 ice_flush(hw); 572 clear_bit(ICE_VF_DIS, pf->state); 573 574 mutex_unlock(&pf->vfs.table_lock); 575 } 576 577 /** 578 * ice_notify_vf_reset - Notify VF of a reset event 579 * @vf: pointer to the VF structure 580 */ 581 static void ice_notify_vf_reset(struct ice_vf *vf) 582 { 583 struct ice_hw *hw = &vf->pf->hw; 584 struct virtchnl_pf_event pfe; 585 586 /* Bail out if VF is in disabled state, neither initialized, nor active 587 * state - otherwise proceed with notifications 588 */ 589 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 590 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 591 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 592 return; 593 594 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 595 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 596 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 597 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 598 NULL); 599 } 600 601 /** 602 * ice_reset_vf - Reset a particular VF 603 * @vf: pointer to the VF structure 604 * @flags: flags controlling behavior of the reset 605 * 606 * Flags: 607 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event 608 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset 609 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting 610 * 611 * Returns 0 if the VF is currently in reset, if resets are disabled, or if 612 * the VF resets successfully. Returns an error code if the VF fails to 613 * rebuild. 614 */ 615 int ice_reset_vf(struct ice_vf *vf, u32 flags) 616 { 617 struct ice_pf *pf = vf->pf; 618 struct ice_vsi *vsi; 619 struct device *dev; 620 int err = 0; 621 bool rsd; 622 623 dev = ice_pf_to_dev(pf); 624 625 if (flags & ICE_VF_RESET_NOTIFY) 626 ice_notify_vf_reset(vf); 627 628 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 629 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 630 vf->vf_id); 631 return 0; 632 } 633 634 if (ice_is_vf_disabled(vf)) { 635 vsi = ice_get_vf_vsi(vf); 636 if (!vsi) { 637 dev_dbg(dev, "VF is already removed\n"); 638 return -EINVAL; 639 } 640 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 641 642 if (ice_vsi_is_rx_queue_active(vsi)) 643 ice_vsi_stop_all_rx_rings(vsi); 644 645 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 646 vf->vf_id); 647 return 0; 648 } 649 650 if (flags & ICE_VF_RESET_LOCK) 651 mutex_lock(&vf->cfg_lock); 652 else 653 lockdep_assert_held(&vf->cfg_lock); 654 655 /* Set VF disable bit state here, before triggering reset */ 656 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 657 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); 658 659 vsi = ice_get_vf_vsi(vf); 660 if (WARN_ON(!vsi)) { 661 err = -EIO; 662 goto out_unlock; 663 } 664 665 ice_dis_vf_qs(vf); 666 667 /* Call Disable LAN Tx queue AQ whether or not queues are 668 * enabled. This is needed for successful completion of VFR. 669 */ 670 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 671 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); 672 673 /* poll VPGEN_VFRSTAT reg to make sure 674 * that reset is complete 675 */ 676 rsd = vf->vf_ops->poll_reset_status(vf); 677 678 /* Display a warning if VF didn't manage to reset in time, but need to 679 * continue on with the operation. 680 */ 681 if (!rsd) 682 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 683 684 vf->driver_caps = 0; 685 ice_vc_set_default_allowlist(vf); 686 687 /* disable promiscuous modes in case they were enabled 688 * ignore any error if disabling process failed 689 */ 690 ice_vf_clear_all_promisc_modes(vf, vsi); 691 692 ice_vf_fdir_exit(vf); 693 ice_vf_fdir_init(vf); 694 /* clean VF control VSI when resetting VF since it should be setup 695 * only when VF creates its first FDIR rule. 696 */ 697 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 698 ice_vf_ctrl_vsi_release(vf); 699 700 ice_vf_pre_vsi_rebuild(vf); 701 702 if (ice_vf_recreate_vsi(vf)) { 703 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", 704 vf->vf_id); 705 err = -EFAULT; 706 goto out_unlock; 707 } 708 709 ice_vf_post_vsi_rebuild(vf); 710 vsi = ice_get_vf_vsi(vf); 711 if (WARN_ON(!vsi)) { 712 err = -EINVAL; 713 goto out_unlock; 714 } 715 716 ice_eswitch_update_repr(vsi); 717 718 /* if the VF has been reset allow it to come up again */ 719 ice_mbx_clear_malvf(&vf->mbx_info); 720 721 out_unlock: 722 if (flags & ICE_VF_RESET_LOCK) 723 mutex_unlock(&vf->cfg_lock); 724 725 return err; 726 } 727 728 /** 729 * ice_set_vf_state_qs_dis - Set VF queues state to disabled 730 * @vf: pointer to the VF structure 731 */ 732 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 733 { 734 /* Clear Rx/Tx enabled queues flag */ 735 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 736 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 737 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 738 } 739 740 /** 741 * ice_set_vf_state_dis - Set VF state to disabled 742 * @vf: pointer to the VF structure 743 */ 744 void ice_set_vf_state_dis(struct ice_vf *vf) 745 { 746 ice_set_vf_state_qs_dis(vf); 747 vf->vf_ops->clear_reset_state(vf); 748 } 749 750 /* Private functions only accessed from other virtualization files */ 751 752 /** 753 * ice_initialize_vf_entry - Initialize a VF entry 754 * @vf: pointer to the VF structure 755 */ 756 void ice_initialize_vf_entry(struct ice_vf *vf) 757 { 758 struct ice_pf *pf = vf->pf; 759 struct ice_vfs *vfs; 760 761 vfs = &pf->vfs; 762 763 /* assign default capabilities */ 764 vf->spoofchk = true; 765 vf->num_vf_qs = vfs->num_qps_per; 766 ice_vc_set_default_allowlist(vf); 767 ice_virtchnl_set_dflt_ops(vf); 768 769 /* ctrl_vsi_idx will be set to a valid value only when iAVF 770 * creates its first fdir rule. 771 */ 772 ice_vf_ctrl_invalidate_vsi(vf); 773 ice_vf_fdir_init(vf); 774 775 /* Initialize mailbox info for this VF */ 776 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); 777 778 mutex_init(&vf->cfg_lock); 779 } 780 781 /** 782 * ice_dis_vf_qs - Disable the VF queues 783 * @vf: pointer to the VF structure 784 */ 785 void ice_dis_vf_qs(struct ice_vf *vf) 786 { 787 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 788 789 if (WARN_ON(!vsi)) 790 return; 791 792 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 793 ice_vsi_stop_all_rx_rings(vsi); 794 ice_set_vf_state_qs_dis(vf); 795 } 796 797 /** 798 * ice_err_to_virt_err - translate errors for VF return code 799 * @err: error return code 800 */ 801 enum virtchnl_status_code ice_err_to_virt_err(int err) 802 { 803 switch (err) { 804 case 0: 805 return VIRTCHNL_STATUS_SUCCESS; 806 case -EINVAL: 807 case -ENODEV: 808 return VIRTCHNL_STATUS_ERR_PARAM; 809 case -ENOMEM: 810 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 811 case -EALREADY: 812 case -EBUSY: 813 case -EIO: 814 case -ENOSPC: 815 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 816 default: 817 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 818 } 819 } 820 821 /** 822 * ice_check_vf_init - helper to check if VF init complete 823 * @vf: the pointer to the VF to check 824 */ 825 int ice_check_vf_init(struct ice_vf *vf) 826 { 827 struct ice_pf *pf = vf->pf; 828 829 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 830 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 831 vf->vf_id); 832 return -EBUSY; 833 } 834 return 0; 835 } 836 837 /** 838 * ice_vf_get_port_info - Get the VF's port info structure 839 * @vf: VF used to get the port info structure for 840 */ 841 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 842 { 843 return vf->pf->hw.port_info; 844 } 845 846 /** 847 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior 848 * @vsi: the VSI to configure 849 * @enable: whether to enable or disable the spoof checking 850 * 851 * Configure a VSI to enable (or disable) spoof checking behavior. 852 */ 853 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 854 { 855 struct ice_vsi_ctx *ctx; 856 int err; 857 858 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 859 if (!ctx) 860 return -ENOMEM; 861 862 ctx->info.sec_flags = vsi->info.sec_flags; 863 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 864 865 if (enable) 866 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 867 else 868 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 869 870 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 871 if (err) 872 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 873 enable ? "ON" : "OFF", vsi->vsi_num, err); 874 else 875 vsi->info.sec_flags = ctx->info.sec_flags; 876 877 kfree(ctx); 878 879 return err; 880 } 881 882 /** 883 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 884 * @vsi: VSI to enable Tx spoof checking for 885 */ 886 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 887 { 888 struct ice_vsi_vlan_ops *vlan_ops; 889 int err = 0; 890 891 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 892 893 /* Allow VF with VLAN 0 only to send all tagged traffic */ 894 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { 895 err = vlan_ops->ena_tx_filtering(vsi); 896 if (err) 897 return err; 898 } 899 900 return ice_cfg_mac_antispoof(vsi, true); 901 } 902 903 /** 904 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 905 * @vsi: VSI to disable Tx spoof checking for 906 */ 907 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 908 { 909 struct ice_vsi_vlan_ops *vlan_ops; 910 int err; 911 912 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 913 914 err = vlan_ops->dis_tx_filtering(vsi); 915 if (err) 916 return err; 917 918 return ice_cfg_mac_antispoof(vsi, false); 919 } 920 921 /** 922 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI 923 * @vsi: VSI associated to the VF 924 * @enable: whether to enable or disable the spoof checking 925 */ 926 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) 927 { 928 int err; 929 930 if (enable) 931 err = ice_vsi_ena_spoofchk(vsi); 932 else 933 err = ice_vsi_dis_spoofchk(vsi); 934 935 return err; 936 } 937 938 /** 939 * ice_is_vf_trusted 940 * @vf: pointer to the VF info 941 */ 942 bool ice_is_vf_trusted(struct ice_vf *vf) 943 { 944 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 945 } 946 947 /** 948 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 949 * @vf: the VF to check 950 * 951 * Returns true if the VF has no Rx and no Tx queues enabled and returns false 952 * otherwise 953 */ 954 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 955 { 956 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 957 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 958 } 959 960 /** 961 * ice_is_vf_link_up - check if the VF's link is up 962 * @vf: VF to check if link is up 963 */ 964 bool ice_is_vf_link_up(struct ice_vf *vf) 965 { 966 struct ice_port_info *pi = ice_vf_get_port_info(vf); 967 968 if (ice_check_vf_init(vf)) 969 return false; 970 971 if (ice_vf_has_no_qs_ena(vf)) 972 return false; 973 else if (vf->link_forced) 974 return vf->link_up; 975 else 976 return pi->phy.link_info.link_info & 977 ICE_AQ_LINK_UP; 978 } 979 980 /** 981 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 982 * @vf: VF to configure trust setting for 983 */ 984 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 985 { 986 if (vf->trusted) 987 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 988 else 989 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 990 } 991 992 /** 993 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 994 * @vf: VF to add MAC filters for 995 * 996 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 997 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 998 */ 999 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 1000 { 1001 struct device *dev = ice_pf_to_dev(vf->pf); 1002 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1003 u8 broadcast[ETH_ALEN]; 1004 int status; 1005 1006 if (WARN_ON(!vsi)) 1007 return -EINVAL; 1008 1009 if (ice_is_eswitch_mode_switchdev(vf->pf)) 1010 return 0; 1011 1012 eth_broadcast_addr(broadcast); 1013 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1014 if (status) { 1015 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 1016 vf->vf_id, status); 1017 return status; 1018 } 1019 1020 vf->num_mac++; 1021 1022 if (is_valid_ether_addr(vf->hw_lan_addr)) { 1023 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 1024 ICE_FWD_TO_VSI); 1025 if (status) { 1026 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 1027 &vf->hw_lan_addr[0], vf->vf_id, 1028 status); 1029 return status; 1030 } 1031 vf->num_mac++; 1032 1033 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 1034 } 1035 1036 return 0; 1037 } 1038 1039 /** 1040 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 1041 * @vf: VF to add MAC filters for 1042 * @vsi: Pointer to VSI 1043 * 1044 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 1045 * always re-adds either a VLAN 0 or port VLAN based filter after reset. 1046 */ 1047 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1048 { 1049 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1050 struct device *dev = ice_pf_to_dev(vf->pf); 1051 int err; 1052 1053 if (ice_vf_is_port_vlan_ena(vf)) { 1054 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 1055 if (err) { 1056 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 1057 vf->vf_id, err); 1058 return err; 1059 } 1060 1061 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 1062 } else { 1063 err = ice_vsi_add_vlan_zero(vsi); 1064 } 1065 1066 if (err) { 1067 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 1068 ice_vf_is_port_vlan_ena(vf) ? 1069 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 1070 return err; 1071 } 1072 1073 err = vlan_ops->ena_rx_filtering(vsi); 1074 if (err) 1075 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 1076 vf->vf_id, vsi->idx, err); 1077 1078 return 0; 1079 } 1080 1081 /** 1082 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 1083 * @vf: VF to re-apply the configuration for 1084 * 1085 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 1086 * needs to re-apply the host configured Tx rate limiting configuration. 1087 */ 1088 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 1089 { 1090 struct device *dev = ice_pf_to_dev(vf->pf); 1091 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1092 int err; 1093 1094 if (WARN_ON(!vsi)) 1095 return -EINVAL; 1096 1097 if (vf->min_tx_rate) { 1098 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 1099 if (err) { 1100 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 1101 vf->min_tx_rate, vf->vf_id, err); 1102 return err; 1103 } 1104 } 1105 1106 if (vf->max_tx_rate) { 1107 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 1108 if (err) { 1109 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 1110 vf->max_tx_rate, vf->vf_id, err); 1111 return err; 1112 } 1113 } 1114 1115 return 0; 1116 } 1117 1118 /** 1119 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 1120 * @vsi: Pointer to VSI 1121 * 1122 * This function moves VSI into corresponding scheduler aggregator node 1123 * based on cached value of "aggregator node info" per VSI 1124 */ 1125 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 1126 { 1127 struct ice_pf *pf = vsi->back; 1128 struct device *dev; 1129 int status; 1130 1131 if (!vsi->agg_node) 1132 return; 1133 1134 dev = ice_pf_to_dev(pf); 1135 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 1136 dev_dbg(dev, 1137 "agg_id %u already has reached max_num_vsis %u\n", 1138 vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 1139 return; 1140 } 1141 1142 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 1143 vsi->idx, vsi->tc_cfg.ena_tc); 1144 if (status) 1145 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 1146 vsi->idx, vsi->agg_node->agg_id); 1147 else 1148 vsi->agg_node->num_vsis++; 1149 } 1150 1151 /** 1152 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 1153 * @vf: VF to rebuild host configuration on 1154 */ 1155 void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 1156 { 1157 struct device *dev = ice_pf_to_dev(vf->pf); 1158 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1159 1160 if (WARN_ON(!vsi)) 1161 return; 1162 1163 ice_vf_set_host_trust_cfg(vf); 1164 1165 if (ice_vf_rebuild_host_mac_cfg(vf)) 1166 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 1167 vf->vf_id); 1168 1169 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 1170 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 1171 vf->vf_id); 1172 1173 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 1174 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 1175 vf->vf_id); 1176 1177 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 1178 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 1179 vf->vf_id); 1180 1181 /* rebuild aggregator node config for main VF VSI */ 1182 ice_vf_rebuild_aggregator_node_cfg(vsi); 1183 } 1184 1185 /** 1186 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1187 * @vf: VF that control VSI is being invalidated on 1188 */ 1189 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 1190 { 1191 vf->ctrl_vsi_idx = ICE_NO_VSI; 1192 } 1193 1194 /** 1195 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 1196 * @vf: VF that control VSI is being released on 1197 */ 1198 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 1199 { 1200 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 1201 ice_vf_ctrl_invalidate_vsi(vf); 1202 } 1203 1204 /** 1205 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 1206 * @vf: VF to setup control VSI for 1207 * 1208 * Returns pointer to the successfully allocated VSI struct on success, 1209 * otherwise returns NULL on failure. 1210 */ 1211 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 1212 { 1213 struct ice_vsi_cfg_params params = {}; 1214 struct ice_pf *pf = vf->pf; 1215 struct ice_vsi *vsi; 1216 1217 params.type = ICE_VSI_CTRL; 1218 params.pi = ice_vf_get_port_info(vf); 1219 params.vf = vf; 1220 params.flags = ICE_VSI_FLAG_INIT; 1221 1222 vsi = ice_vsi_setup(pf, ¶ms); 1223 if (!vsi) { 1224 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 1225 ice_vf_ctrl_invalidate_vsi(vf); 1226 } 1227 1228 return vsi; 1229 } 1230 1231 /** 1232 * ice_vf_init_host_cfg - Initialize host admin configuration 1233 * @vf: VF to initialize 1234 * @vsi: the VSI created at initialization 1235 * 1236 * Initialize the VF host configuration. Called during VF creation to setup 1237 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It 1238 * should only be called during VF creation. 1239 */ 1240 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1241 { 1242 struct ice_vsi_vlan_ops *vlan_ops; 1243 struct ice_pf *pf = vf->pf; 1244 u8 broadcast[ETH_ALEN]; 1245 struct device *dev; 1246 int err; 1247 1248 dev = ice_pf_to_dev(pf); 1249 1250 err = ice_vsi_add_vlan_zero(vsi); 1251 if (err) { 1252 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 1253 vf->vf_id); 1254 return err; 1255 } 1256 1257 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1258 err = vlan_ops->ena_rx_filtering(vsi); 1259 if (err) { 1260 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 1261 vf->vf_id); 1262 return err; 1263 } 1264 1265 eth_broadcast_addr(broadcast); 1266 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1267 if (err) { 1268 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", 1269 vf->vf_id, err); 1270 return err; 1271 } 1272 1273 vf->num_mac = 1; 1274 1275 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 1276 if (err) { 1277 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 1278 vf->vf_id); 1279 return err; 1280 } 1281 1282 return 0; 1283 } 1284 1285 /** 1286 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access 1287 * @vf: VF to remove access to VSI for 1288 */ 1289 void ice_vf_invalidate_vsi(struct ice_vf *vf) 1290 { 1291 vf->lan_vsi_idx = ICE_NO_VSI; 1292 vf->lan_vsi_num = ICE_NO_VSI; 1293 } 1294 1295 /** 1296 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes 1297 * @vf: pointer to the VF structure 1298 * 1299 * Release the VF associated with this VSI and then invalidate the VSI 1300 * indexes. 1301 */ 1302 void ice_vf_vsi_release(struct ice_vf *vf) 1303 { 1304 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1305 1306 if (WARN_ON(!vsi)) 1307 return; 1308 1309 ice_vsi_release(vsi); 1310 ice_vf_invalidate_vsi(vf); 1311 } 1312 1313 /** 1314 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 1315 * @vf: VF to set in initialized state 1316 * 1317 * After this function the VF will be ready to receive/handle the 1318 * VIRTCHNL_OP_GET_VF_RESOURCES message 1319 */ 1320 void ice_vf_set_initialized(struct ice_vf *vf) 1321 { 1322 ice_set_vf_state_qs_dis(vf); 1323 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 1324 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 1325 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 1326 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 1327 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 1328 } 1329 1330 /** 1331 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer 1332 * @pf: the PF private structure 1333 * @vsi: pointer to the VSI 1334 * 1335 * Return first found VF control VSI other than the vsi 1336 * passed by parameter. This function is used to determine 1337 * whether new resources have to be allocated for control VSI 1338 * or they can be shared with existing one. 1339 * 1340 * Return found VF control VSI pointer other itself. Return 1341 * NULL Otherwise. 1342 * 1343 */ 1344 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) 1345 { 1346 struct ice_vsi *ctrl_vsi = NULL; 1347 struct ice_vf *vf; 1348 unsigned int bkt; 1349 1350 rcu_read_lock(); 1351 ice_for_each_vf_rcu(pf, bkt, vf) { 1352 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1353 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1354 break; 1355 } 1356 } 1357 1358 rcu_read_unlock(); 1359 return ctrl_vsi; 1360 } 1361