1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_vf_lib_private.h" 5 #include "ice.h" 6 #include "ice_lib.h" 7 #include "ice_fltr.h" 8 #include "ice_virtchnl_allowlist.h" 9 10 /* Public functions which may be accessed by all driver files */ 11 12 /** 13 * ice_get_vf_by_id - Get pointer to VF by ID 14 * @pf: the PF private structure 15 * @vf_id: the VF ID to locate 16 * 17 * Locate and return a pointer to the VF structure associated with a given ID. 18 * Returns NULL if the ID does not have a valid VF structure associated with 19 * it. 20 * 21 * This function takes a reference to the VF, which must be released by 22 * calling ice_put_vf() once the caller is finished accessing the VF structure 23 * returned. 24 */ 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 26 { 27 struct ice_vf *vf; 28 29 rcu_read_lock(); 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 31 if (vf->vf_id == vf_id) { 32 struct ice_vf *found; 33 34 if (kref_get_unless_zero(&vf->refcnt)) 35 found = vf; 36 else 37 found = NULL; 38 39 rcu_read_unlock(); 40 return found; 41 } 42 } 43 rcu_read_unlock(); 44 45 return NULL; 46 } 47 48 /** 49 * ice_release_vf - Release VF associated with a refcount 50 * @ref: the kref decremented to zero 51 * 52 * Callback function for kref_put to release a VF once its reference count has 53 * hit zero. 54 */ 55 static void ice_release_vf(struct kref *ref) 56 { 57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 58 59 vf->vf_ops->free(vf); 60 } 61 62 /** 63 * ice_put_vf - Release a reference to a VF 64 * @vf: the VF structure to decrease reference count on 65 * 66 * Decrease the reference count for a VF, and free the entry if it is no 67 * longer in use. 68 * 69 * This must be called after ice_get_vf_by_id() once the reference to the VF 70 * structure is no longer used. Otherwise, the VF structure will never be 71 * freed. 72 */ 73 void ice_put_vf(struct ice_vf *vf) 74 { 75 kref_put(&vf->refcnt, ice_release_vf); 76 } 77 78 /** 79 * ice_has_vfs - Return true if the PF has any associated VFs 80 * @pf: the PF private structure 81 * 82 * Return whether or not the PF has any allocated VFs. 83 * 84 * Note that this function only guarantees that there are no VFs at the point 85 * of calling it. It does not guarantee that no more VFs will be added. 86 */ 87 bool ice_has_vfs(struct ice_pf *pf) 88 { 89 /* A simple check that the hash table is not empty does not require 90 * the mutex or rcu_read_lock. 91 */ 92 return !hash_empty(pf->vfs.table); 93 } 94 95 /** 96 * ice_get_num_vfs - Get number of allocated VFs 97 * @pf: the PF private structure 98 * 99 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 100 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 101 * the output of this function. 102 */ 103 u16 ice_get_num_vfs(struct ice_pf *pf) 104 { 105 struct ice_vf *vf; 106 unsigned int bkt; 107 u16 num_vfs = 0; 108 109 rcu_read_lock(); 110 ice_for_each_vf_rcu(pf, bkt, vf) 111 num_vfs++; 112 rcu_read_unlock(); 113 114 return num_vfs; 115 } 116 117 /** 118 * ice_get_vf_vsi - get VF's VSI based on the stored index 119 * @vf: VF used to get VSI 120 */ 121 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 122 { 123 if (vf->lan_vsi_idx == ICE_NO_VSI) 124 return NULL; 125 126 return vf->pf->vsi[vf->lan_vsi_idx]; 127 } 128 129 /** 130 * ice_is_vf_disabled 131 * @vf: pointer to the VF info 132 * 133 * If the PF has been disabled, there is no need resetting VF until PF is 134 * active again. Similarly, if the VF has been disabled, this means something 135 * else is resetting the VF, so we shouldn't continue. 136 * 137 * Returns true if the caller should consider the VF as disabled whether 138 * because that single VF is explicitly disabled or because the PF is 139 * currently disabled. 140 */ 141 bool ice_is_vf_disabled(struct ice_vf *vf) 142 { 143 struct ice_pf *pf = vf->pf; 144 145 return (test_bit(ICE_VF_DIS, pf->state) || 146 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 147 } 148 149 /** 150 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 151 * @vf: The VF being resseting 152 * 153 * The max poll time is about ~800ms, which is about the maximum time it takes 154 * for a VF to be reset and/or a VF driver to be removed. 155 */ 156 static void ice_wait_on_vf_reset(struct ice_vf *vf) 157 { 158 int i; 159 160 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 161 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 162 break; 163 msleep(ICE_MAX_VF_RESET_SLEEP_MS); 164 } 165 } 166 167 /** 168 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 169 * @vf: VF to check if it's ready to be configured/queried 170 * 171 * The purpose of this function is to make sure the VF is not in reset, not 172 * disabled, and initialized so it can be configured and/or queried by a host 173 * administrator. 174 */ 175 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 176 { 177 ice_wait_on_vf_reset(vf); 178 179 if (ice_is_vf_disabled(vf)) 180 return -EINVAL; 181 182 if (ice_check_vf_init(vf)) 183 return -EBUSY; 184 185 return 0; 186 } 187 188 /** 189 * ice_trigger_vf_reset - Reset a VF on HW 190 * @vf: pointer to the VF structure 191 * @is_vflr: true if VFLR was issued, false if not 192 * @is_pfr: true if the reset was triggered due to a previous PFR 193 * 194 * Trigger hardware to start a reset for a particular VF. Expects the caller 195 * to wait the proper amount of time to allow hardware to reset the VF before 196 * it cleans up and restores VF functionality. 197 */ 198 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 199 { 200 /* Inform VF that it is no longer active, as a warning */ 201 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 202 203 /* Disable VF's configuration API during reset. The flag is re-enabled 204 * when it's safe again to access VF's VSI. 205 */ 206 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 207 208 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 209 * needs to clear them in the case of VFR/VFLR. If this is done for 210 * PFR, it can mess up VF resets because the VF driver may already 211 * have started cleanup by the time we get here. 212 */ 213 if (!is_pfr) 214 vf->vf_ops->clear_mbx_register(vf); 215 216 vf->vf_ops->trigger_reset_register(vf, is_vflr); 217 } 218 219 static void ice_vf_clear_counters(struct ice_vf *vf) 220 { 221 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 222 223 if (vsi) 224 vsi->num_vlan = 0; 225 226 vf->num_mac = 0; 227 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 228 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 229 } 230 231 /** 232 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 233 * @vf: VF to perform pre VSI rebuild tasks 234 * 235 * These tasks are items that don't need to be amortized since they are most 236 * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 237 */ 238 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 239 { 240 /* Close any IRQ mapping now */ 241 if (vf->vf_ops->irq_close) 242 vf->vf_ops->irq_close(vf); 243 244 ice_vf_clear_counters(vf); 245 vf->vf_ops->clear_reset_trigger(vf); 246 } 247 248 /** 249 * ice_vf_recreate_vsi - Release and re-create the VF's VSI 250 * @vf: VF to recreate the VSI for 251 * 252 * This is only called when a single VF is being reset (i.e. VVF, VFLR, host 253 * VF configuration change, etc) 254 * 255 * It releases and then re-creates a new VSI. 256 */ 257 static int ice_vf_recreate_vsi(struct ice_vf *vf) 258 { 259 struct ice_pf *pf = vf->pf; 260 int err; 261 262 ice_vf_vsi_release(vf); 263 264 err = vf->vf_ops->create_vsi(vf); 265 if (err) { 266 dev_err(ice_pf_to_dev(pf), 267 "Failed to recreate the VF%u's VSI, error %d\n", 268 vf->vf_id, err); 269 return err; 270 } 271 272 return 0; 273 } 274 275 /** 276 * ice_vf_rebuild_vsi - rebuild the VF's VSI 277 * @vf: VF to rebuild the VSI for 278 * 279 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 280 * host, PFR, CORER, etc.). 281 * 282 * It reprograms the VSI configuration back into hardware. 283 */ 284 static int ice_vf_rebuild_vsi(struct ice_vf *vf) 285 { 286 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 287 struct ice_pf *pf = vf->pf; 288 289 if (WARN_ON(!vsi)) 290 return -EINVAL; 291 292 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { 293 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 294 vf->vf_id); 295 return -EIO; 296 } 297 /* vsi->idx will remain the same in this case so don't update 298 * vf->lan_vsi_idx 299 */ 300 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 301 vf->lan_vsi_num = vsi->vsi_num; 302 303 return 0; 304 } 305 306 /** 307 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 308 * @vf: VF to add MAC filters for 309 * @vsi: Pointer to VSI 310 * 311 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 312 * always re-adds either a VLAN 0 or port VLAN based filter after reset. 313 */ 314 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 315 { 316 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 317 struct device *dev = ice_pf_to_dev(vf->pf); 318 int err; 319 320 if (ice_vf_is_port_vlan_ena(vf)) { 321 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 322 if (err) { 323 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 324 vf->vf_id, err); 325 return err; 326 } 327 328 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 329 } else { 330 err = ice_vsi_add_vlan_zero(vsi); 331 } 332 333 if (err) { 334 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 335 ice_vf_is_port_vlan_ena(vf) ? 336 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 337 return err; 338 } 339 340 err = vlan_ops->ena_rx_filtering(vsi); 341 if (err) 342 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 343 vf->vf_id, vsi->idx, err); 344 345 return 0; 346 } 347 348 /** 349 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 350 * @vf: VF to re-apply the configuration for 351 * 352 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 353 * needs to re-apply the host configured Tx rate limiting configuration. 354 */ 355 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 356 { 357 struct device *dev = ice_pf_to_dev(vf->pf); 358 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 359 int err; 360 361 if (WARN_ON(!vsi)) 362 return -EINVAL; 363 364 if (vf->min_tx_rate) { 365 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 366 if (err) { 367 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 368 vf->min_tx_rate, vf->vf_id, err); 369 return err; 370 } 371 } 372 373 if (vf->max_tx_rate) { 374 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 375 if (err) { 376 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 377 vf->max_tx_rate, vf->vf_id, err); 378 return err; 379 } 380 } 381 382 return 0; 383 } 384 385 /** 386 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 387 * @vf: VF to configure trust setting for 388 */ 389 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 390 { 391 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); 392 } 393 394 /** 395 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 396 * @vf: VF to add MAC filters for 397 * 398 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 399 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 400 */ 401 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 402 { 403 struct device *dev = ice_pf_to_dev(vf->pf); 404 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 405 u8 broadcast[ETH_ALEN]; 406 int status; 407 408 if (WARN_ON(!vsi)) 409 return -EINVAL; 410 411 if (ice_is_eswitch_mode_switchdev(vf->pf)) 412 return 0; 413 414 eth_broadcast_addr(broadcast); 415 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 416 if (status) { 417 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 418 vf->vf_id, status); 419 return status; 420 } 421 422 vf->num_mac++; 423 424 if (is_valid_ether_addr(vf->hw_lan_addr)) { 425 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 426 ICE_FWD_TO_VSI); 427 if (status) { 428 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 429 &vf->hw_lan_addr[0], vf->vf_id, 430 status); 431 return status; 432 } 433 vf->num_mac++; 434 435 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 436 } 437 438 return 0; 439 } 440 441 /** 442 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 443 * @vsi: Pointer to VSI 444 * 445 * This function moves VSI into corresponding scheduler aggregator node 446 * based on cached value of "aggregator node info" per VSI 447 */ 448 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 449 { 450 struct ice_pf *pf = vsi->back; 451 struct device *dev; 452 int status; 453 454 if (!vsi->agg_node) 455 return; 456 457 dev = ice_pf_to_dev(pf); 458 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 459 dev_dbg(dev, 460 "agg_id %u already has reached max_num_vsis %u\n", 461 vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 462 return; 463 } 464 465 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 466 vsi->idx, vsi->tc_cfg.ena_tc); 467 if (status) 468 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 469 vsi->idx, vsi->agg_node->agg_id); 470 else 471 vsi->agg_node->num_vsis++; 472 } 473 474 /** 475 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 476 * @vf: VF to rebuild host configuration on 477 */ 478 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 479 { 480 struct device *dev = ice_pf_to_dev(vf->pf); 481 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 482 483 if (WARN_ON(!vsi)) 484 return; 485 486 ice_vf_set_host_trust_cfg(vf); 487 488 if (ice_vf_rebuild_host_mac_cfg(vf)) 489 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 490 vf->vf_id); 491 492 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 493 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 494 vf->vf_id); 495 496 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 497 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 498 vf->vf_id); 499 500 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 501 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 502 vf->vf_id); 503 504 /* rebuild aggregator node config for main VF VSI */ 505 ice_vf_rebuild_aggregator_node_cfg(vsi); 506 } 507 508 /** 509 * ice_set_vf_state_qs_dis - Set VF queues state to disabled 510 * @vf: pointer to the VF structure 511 */ 512 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 513 { 514 /* Clear Rx/Tx enabled queues flag */ 515 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 516 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 517 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 518 } 519 520 /** 521 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 522 * @vf: VF to set in initialized state 523 * 524 * After this function the VF will be ready to receive/handle the 525 * VIRTCHNL_OP_GET_VF_RESOURCES message 526 */ 527 static void ice_vf_set_initialized(struct ice_vf *vf) 528 { 529 ice_set_vf_state_qs_dis(vf); 530 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 531 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 532 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 533 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 534 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 535 } 536 537 /** 538 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 539 * @vf: the VF being reset 540 * 541 * Perform reset tasks which must occur after the VSI has been re-created or 542 * rebuilt during a VF reset. 543 */ 544 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 545 { 546 ice_vf_rebuild_host_cfg(vf); 547 ice_vf_set_initialized(vf); 548 549 vf->vf_ops->post_vsi_rebuild(vf); 550 } 551 552 /** 553 * ice_is_any_vf_in_unicast_promisc - check if any VF(s) 554 * are in unicast promiscuous mode 555 * @pf: PF structure for accessing VF(s) 556 * 557 * Return false if no VF(s) are in unicast promiscuous mode, 558 * else return true 559 */ 560 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) 561 { 562 bool is_vf_promisc = false; 563 struct ice_vf *vf; 564 unsigned int bkt; 565 566 rcu_read_lock(); 567 ice_for_each_vf_rcu(pf, bkt, vf) { 568 /* found a VF that has promiscuous mode configured */ 569 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 570 is_vf_promisc = true; 571 break; 572 } 573 } 574 rcu_read_unlock(); 575 576 return is_vf_promisc; 577 } 578 579 /** 580 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes 581 * @vf: the VF pointer 582 * @vsi: the VSI to configure 583 * @ucast_m: promiscuous mask to apply to unicast 584 * @mcast_m: promiscuous mask to apply to multicast 585 * 586 * Decide which mask should be used for unicast and multicast filter, 587 * based on presence of VLANs 588 */ 589 void 590 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, 591 u8 *ucast_m, u8 *mcast_m) 592 { 593 if (ice_vf_is_port_vlan_ena(vf) || 594 ice_vsi_has_non_zero_vlans(vsi)) { 595 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 596 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 597 } else { 598 *mcast_m = ICE_MCAST_PROMISC_BITS; 599 *ucast_m = ICE_UCAST_PROMISC_BITS; 600 } 601 } 602 603 /** 604 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI 605 * @vf: the VF pointer 606 * @vsi: the VSI to configure 607 * 608 * Clear all promiscuous/allmulticast filters for a VF 609 */ 610 static int 611 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) 612 { 613 struct ice_pf *pf = vf->pf; 614 u8 ucast_m, mcast_m; 615 int ret = 0; 616 617 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 618 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 619 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 620 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 621 ret = ice_clear_dflt_vsi(vsi); 622 } else { 623 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 624 } 625 626 if (ret) { 627 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); 628 } else { 629 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 630 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); 631 } 632 } 633 634 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 635 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 636 if (ret) { 637 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); 638 } else { 639 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 640 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); 641 } 642 } 643 return ret; 644 } 645 646 /** 647 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI 648 * @vf: the VF to configure 649 * @vsi: the VF's VSI 650 * @promisc_m: the promiscuous mode to enable 651 */ 652 int 653 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 654 { 655 struct ice_hw *hw = &vsi->back->hw; 656 int status; 657 658 if (ice_vf_is_port_vlan_ena(vf)) 659 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 660 ice_vf_get_port_vlan_id(vf)); 661 else if (ice_vsi_has_non_zero_vlans(vsi)) 662 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 663 else 664 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 665 666 if (status && status != -EEXIST) { 667 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 668 vf->vf_id, status); 669 return status; 670 } 671 672 return 0; 673 } 674 675 /** 676 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI 677 * @vf: the VF to configure 678 * @vsi: the VF's VSI 679 * @promisc_m: the promiscuous mode to disable 680 */ 681 int 682 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 683 { 684 struct ice_hw *hw = &vsi->back->hw; 685 int status; 686 687 if (ice_vf_is_port_vlan_ena(vf)) 688 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 689 ice_vf_get_port_vlan_id(vf)); 690 else if (ice_vsi_has_non_zero_vlans(vsi)) 691 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 692 else 693 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 694 695 if (status && status != -ENOENT) { 696 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 697 vf->vf_id, status); 698 return status; 699 } 700 701 return 0; 702 } 703 704 /** 705 * ice_reset_all_vfs - reset all allocated VFs in one go 706 * @pf: pointer to the PF structure 707 * 708 * Reset all VFs at once, in response to a PF or other device reset. 709 * 710 * First, tell the hardware to reset each VF, then do all the waiting in one 711 * chunk, and finally finish restoring each VF after the wait. This is useful 712 * during PF routines which need to reset all VFs, as otherwise it must perform 713 * these resets in a serialized fashion. 714 */ 715 void ice_reset_all_vfs(struct ice_pf *pf) 716 { 717 struct device *dev = ice_pf_to_dev(pf); 718 struct ice_hw *hw = &pf->hw; 719 struct ice_vf *vf; 720 unsigned int bkt; 721 722 /* If we don't have any VFs, then there is nothing to reset */ 723 if (!ice_has_vfs(pf)) 724 return; 725 726 mutex_lock(&pf->vfs.table_lock); 727 728 /* clear all malicious info if the VFs are getting reset */ 729 ice_for_each_vf(pf, bkt, vf) 730 ice_mbx_clear_malvf(&vf->mbx_info); 731 732 /* If VFs have been disabled, there is no need to reset */ 733 if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 734 mutex_unlock(&pf->vfs.table_lock); 735 return; 736 } 737 738 /* Begin reset on all VFs at once */ 739 ice_for_each_vf(pf, bkt, vf) 740 ice_trigger_vf_reset(vf, true, true); 741 742 /* HW requires some time to make sure it can flush the FIFO for a VF 743 * when it resets it. Now that we've triggered all of the VFs, iterate 744 * the table again and wait for each VF to complete. 745 */ 746 ice_for_each_vf(pf, bkt, vf) { 747 if (!vf->vf_ops->poll_reset_status(vf)) { 748 /* Display a warning if at least one VF didn't manage 749 * to reset in time, but continue on with the 750 * operation. 751 */ 752 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 753 break; 754 } 755 } 756 757 /* free VF resources to begin resetting the VSI state */ 758 ice_for_each_vf(pf, bkt, vf) { 759 mutex_lock(&vf->cfg_lock); 760 761 vf->driver_caps = 0; 762 ice_vc_set_default_allowlist(vf); 763 764 ice_vf_fdir_exit(vf); 765 ice_vf_fdir_init(vf); 766 /* clean VF control VSI when resetting VFs since it should be 767 * setup only when VF creates its first FDIR rule. 768 */ 769 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 770 ice_vf_ctrl_invalidate_vsi(vf); 771 772 ice_vf_pre_vsi_rebuild(vf); 773 ice_vf_rebuild_vsi(vf); 774 ice_vf_post_vsi_rebuild(vf); 775 776 mutex_unlock(&vf->cfg_lock); 777 } 778 779 if (ice_is_eswitch_mode_switchdev(pf)) 780 if (ice_eswitch_rebuild(pf)) 781 dev_warn(dev, "eswitch rebuild failed\n"); 782 783 ice_flush(hw); 784 clear_bit(ICE_VF_DIS, pf->state); 785 786 mutex_unlock(&pf->vfs.table_lock); 787 } 788 789 /** 790 * ice_notify_vf_reset - Notify VF of a reset event 791 * @vf: pointer to the VF structure 792 */ 793 static void ice_notify_vf_reset(struct ice_vf *vf) 794 { 795 struct ice_hw *hw = &vf->pf->hw; 796 struct virtchnl_pf_event pfe; 797 798 /* Bail out if VF is in disabled state, neither initialized, nor active 799 * state - otherwise proceed with notifications 800 */ 801 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 802 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 803 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 804 return; 805 806 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 807 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 808 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 809 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 810 NULL); 811 } 812 813 /** 814 * ice_reset_vf - Reset a particular VF 815 * @vf: pointer to the VF structure 816 * @flags: flags controlling behavior of the reset 817 * 818 * Flags: 819 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event 820 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset 821 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting 822 * 823 * Returns 0 if the VF is currently in reset, if resets are disabled, or if 824 * the VF resets successfully. Returns an error code if the VF fails to 825 * rebuild. 826 */ 827 int ice_reset_vf(struct ice_vf *vf, u32 flags) 828 { 829 struct ice_pf *pf = vf->pf; 830 struct ice_vsi *vsi; 831 struct device *dev; 832 int err = 0; 833 bool rsd; 834 835 dev = ice_pf_to_dev(pf); 836 837 if (flags & ICE_VF_RESET_NOTIFY) 838 ice_notify_vf_reset(vf); 839 840 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 841 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 842 vf->vf_id); 843 return 0; 844 } 845 846 if (flags & ICE_VF_RESET_LOCK) 847 mutex_lock(&vf->cfg_lock); 848 else 849 lockdep_assert_held(&vf->cfg_lock); 850 851 if (ice_is_vf_disabled(vf)) { 852 vsi = ice_get_vf_vsi(vf); 853 if (!vsi) { 854 dev_dbg(dev, "VF is already removed\n"); 855 err = -EINVAL; 856 goto out_unlock; 857 } 858 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 859 860 if (ice_vsi_is_rx_queue_active(vsi)) 861 ice_vsi_stop_all_rx_rings(vsi); 862 863 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 864 vf->vf_id); 865 goto out_unlock; 866 } 867 868 /* Set VF disable bit state here, before triggering reset */ 869 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 870 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); 871 872 vsi = ice_get_vf_vsi(vf); 873 if (WARN_ON(!vsi)) { 874 err = -EIO; 875 goto out_unlock; 876 } 877 878 ice_dis_vf_qs(vf); 879 880 /* Call Disable LAN Tx queue AQ whether or not queues are 881 * enabled. This is needed for successful completion of VFR. 882 */ 883 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 884 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); 885 886 /* poll VPGEN_VFRSTAT reg to make sure 887 * that reset is complete 888 */ 889 rsd = vf->vf_ops->poll_reset_status(vf); 890 891 /* Display a warning if VF didn't manage to reset in time, but need to 892 * continue on with the operation. 893 */ 894 if (!rsd) 895 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 896 897 vf->driver_caps = 0; 898 ice_vc_set_default_allowlist(vf); 899 900 /* disable promiscuous modes in case they were enabled 901 * ignore any error if disabling process failed 902 */ 903 ice_vf_clear_all_promisc_modes(vf, vsi); 904 905 ice_vf_fdir_exit(vf); 906 ice_vf_fdir_init(vf); 907 /* clean VF control VSI when resetting VF since it should be setup 908 * only when VF creates its first FDIR rule. 909 */ 910 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 911 ice_vf_ctrl_vsi_release(vf); 912 913 ice_vf_pre_vsi_rebuild(vf); 914 915 if (ice_vf_recreate_vsi(vf)) { 916 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", 917 vf->vf_id); 918 err = -EFAULT; 919 goto out_unlock; 920 } 921 922 ice_vf_post_vsi_rebuild(vf); 923 vsi = ice_get_vf_vsi(vf); 924 if (WARN_ON(!vsi)) { 925 err = -EINVAL; 926 goto out_unlock; 927 } 928 929 ice_eswitch_update_repr(vsi); 930 931 /* if the VF has been reset allow it to come up again */ 932 ice_mbx_clear_malvf(&vf->mbx_info); 933 934 out_unlock: 935 if (flags & ICE_VF_RESET_LOCK) 936 mutex_unlock(&vf->cfg_lock); 937 938 return err; 939 } 940 941 /** 942 * ice_set_vf_state_dis - Set VF state to disabled 943 * @vf: pointer to the VF structure 944 */ 945 void ice_set_vf_state_dis(struct ice_vf *vf) 946 { 947 ice_set_vf_state_qs_dis(vf); 948 vf->vf_ops->clear_reset_state(vf); 949 } 950 951 /* Private functions only accessed from other virtualization files */ 952 953 /** 954 * ice_initialize_vf_entry - Initialize a VF entry 955 * @vf: pointer to the VF structure 956 */ 957 void ice_initialize_vf_entry(struct ice_vf *vf) 958 { 959 struct ice_pf *pf = vf->pf; 960 struct ice_vfs *vfs; 961 962 vfs = &pf->vfs; 963 964 /* assign default capabilities */ 965 vf->spoofchk = true; 966 vf->num_vf_qs = vfs->num_qps_per; 967 ice_vc_set_default_allowlist(vf); 968 ice_virtchnl_set_dflt_ops(vf); 969 970 /* ctrl_vsi_idx will be set to a valid value only when iAVF 971 * creates its first fdir rule. 972 */ 973 ice_vf_ctrl_invalidate_vsi(vf); 974 ice_vf_fdir_init(vf); 975 976 /* Initialize mailbox info for this VF */ 977 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); 978 979 mutex_init(&vf->cfg_lock); 980 } 981 982 /** 983 * ice_dis_vf_qs - Disable the VF queues 984 * @vf: pointer to the VF structure 985 */ 986 void ice_dis_vf_qs(struct ice_vf *vf) 987 { 988 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 989 990 if (WARN_ON(!vsi)) 991 return; 992 993 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 994 ice_vsi_stop_all_rx_rings(vsi); 995 ice_set_vf_state_qs_dis(vf); 996 } 997 998 /** 999 * ice_err_to_virt_err - translate errors for VF return code 1000 * @err: error return code 1001 */ 1002 enum virtchnl_status_code ice_err_to_virt_err(int err) 1003 { 1004 switch (err) { 1005 case 0: 1006 return VIRTCHNL_STATUS_SUCCESS; 1007 case -EINVAL: 1008 case -ENODEV: 1009 return VIRTCHNL_STATUS_ERR_PARAM; 1010 case -ENOMEM: 1011 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1012 case -EALREADY: 1013 case -EBUSY: 1014 case -EIO: 1015 case -ENOSPC: 1016 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1017 default: 1018 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1019 } 1020 } 1021 1022 /** 1023 * ice_check_vf_init - helper to check if VF init complete 1024 * @vf: the pointer to the VF to check 1025 */ 1026 int ice_check_vf_init(struct ice_vf *vf) 1027 { 1028 struct ice_pf *pf = vf->pf; 1029 1030 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 1031 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 1032 vf->vf_id); 1033 return -EBUSY; 1034 } 1035 return 0; 1036 } 1037 1038 /** 1039 * ice_vf_get_port_info - Get the VF's port info structure 1040 * @vf: VF used to get the port info structure for 1041 */ 1042 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 1043 { 1044 return vf->pf->hw.port_info; 1045 } 1046 1047 /** 1048 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior 1049 * @vsi: the VSI to configure 1050 * @enable: whether to enable or disable the spoof checking 1051 * 1052 * Configure a VSI to enable (or disable) spoof checking behavior. 1053 */ 1054 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 1055 { 1056 struct ice_vsi_ctx *ctx; 1057 int err; 1058 1059 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1060 if (!ctx) 1061 return -ENOMEM; 1062 1063 ctx->info.sec_flags = vsi->info.sec_flags; 1064 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1065 1066 if (enable) 1067 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1068 else 1069 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1070 1071 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 1072 if (err) 1073 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 1074 enable ? "ON" : "OFF", vsi->vsi_num, err); 1075 else 1076 vsi->info.sec_flags = ctx->info.sec_flags; 1077 1078 kfree(ctx); 1079 1080 return err; 1081 } 1082 1083 /** 1084 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 1085 * @vsi: VSI to enable Tx spoof checking for 1086 */ 1087 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 1088 { 1089 struct ice_vsi_vlan_ops *vlan_ops; 1090 int err = 0; 1091 1092 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1093 1094 /* Allow VF with VLAN 0 only to send all tagged traffic */ 1095 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { 1096 err = vlan_ops->ena_tx_filtering(vsi); 1097 if (err) 1098 return err; 1099 } 1100 1101 return ice_cfg_mac_antispoof(vsi, true); 1102 } 1103 1104 /** 1105 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 1106 * @vsi: VSI to disable Tx spoof checking for 1107 */ 1108 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 1109 { 1110 struct ice_vsi_vlan_ops *vlan_ops; 1111 int err; 1112 1113 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1114 1115 err = vlan_ops->dis_tx_filtering(vsi); 1116 if (err) 1117 return err; 1118 1119 return ice_cfg_mac_antispoof(vsi, false); 1120 } 1121 1122 /** 1123 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI 1124 * @vsi: VSI associated to the VF 1125 * @enable: whether to enable or disable the spoof checking 1126 */ 1127 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) 1128 { 1129 int err; 1130 1131 if (enable) 1132 err = ice_vsi_ena_spoofchk(vsi); 1133 else 1134 err = ice_vsi_dis_spoofchk(vsi); 1135 1136 return err; 1137 } 1138 1139 /** 1140 * ice_is_vf_trusted 1141 * @vf: pointer to the VF info 1142 */ 1143 bool ice_is_vf_trusted(struct ice_vf *vf) 1144 { 1145 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1146 } 1147 1148 /** 1149 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 1150 * @vf: the VF to check 1151 * 1152 * Returns true if the VF has no Rx and no Tx queues enabled and returns false 1153 * otherwise 1154 */ 1155 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 1156 { 1157 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 1158 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 1159 } 1160 1161 /** 1162 * ice_is_vf_link_up - check if the VF's link is up 1163 * @vf: VF to check if link is up 1164 */ 1165 bool ice_is_vf_link_up(struct ice_vf *vf) 1166 { 1167 struct ice_port_info *pi = ice_vf_get_port_info(vf); 1168 1169 if (ice_check_vf_init(vf)) 1170 return false; 1171 1172 if (ice_vf_has_no_qs_ena(vf)) 1173 return false; 1174 else if (vf->link_forced) 1175 return vf->link_up; 1176 else 1177 return pi->phy.link_info.link_info & 1178 ICE_AQ_LINK_UP; 1179 } 1180 1181 /** 1182 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1183 * @vf: VF that control VSI is being invalidated on 1184 */ 1185 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 1186 { 1187 vf->ctrl_vsi_idx = ICE_NO_VSI; 1188 } 1189 1190 /** 1191 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 1192 * @vf: VF that control VSI is being released on 1193 */ 1194 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 1195 { 1196 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 1197 ice_vf_ctrl_invalidate_vsi(vf); 1198 } 1199 1200 /** 1201 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 1202 * @vf: VF to setup control VSI for 1203 * 1204 * Returns pointer to the successfully allocated VSI struct on success, 1205 * otherwise returns NULL on failure. 1206 */ 1207 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 1208 { 1209 struct ice_vsi_cfg_params params = {}; 1210 struct ice_pf *pf = vf->pf; 1211 struct ice_vsi *vsi; 1212 1213 params.type = ICE_VSI_CTRL; 1214 params.pi = ice_vf_get_port_info(vf); 1215 params.vf = vf; 1216 params.flags = ICE_VSI_FLAG_INIT; 1217 1218 vsi = ice_vsi_setup(pf, ¶ms); 1219 if (!vsi) { 1220 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 1221 ice_vf_ctrl_invalidate_vsi(vf); 1222 } 1223 1224 return vsi; 1225 } 1226 1227 /** 1228 * ice_vf_init_host_cfg - Initialize host admin configuration 1229 * @vf: VF to initialize 1230 * @vsi: the VSI created at initialization 1231 * 1232 * Initialize the VF host configuration. Called during VF creation to setup 1233 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It 1234 * should only be called during VF creation. 1235 */ 1236 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1237 { 1238 struct ice_vsi_vlan_ops *vlan_ops; 1239 struct ice_pf *pf = vf->pf; 1240 u8 broadcast[ETH_ALEN]; 1241 struct device *dev; 1242 int err; 1243 1244 dev = ice_pf_to_dev(pf); 1245 1246 err = ice_vsi_add_vlan_zero(vsi); 1247 if (err) { 1248 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 1249 vf->vf_id); 1250 return err; 1251 } 1252 1253 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1254 err = vlan_ops->ena_rx_filtering(vsi); 1255 if (err) { 1256 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 1257 vf->vf_id); 1258 return err; 1259 } 1260 1261 eth_broadcast_addr(broadcast); 1262 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1263 if (err) { 1264 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", 1265 vf->vf_id, err); 1266 return err; 1267 } 1268 1269 vf->num_mac = 1; 1270 1271 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 1272 if (err) { 1273 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 1274 vf->vf_id); 1275 return err; 1276 } 1277 1278 return 0; 1279 } 1280 1281 /** 1282 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access 1283 * @vf: VF to remove access to VSI for 1284 */ 1285 void ice_vf_invalidate_vsi(struct ice_vf *vf) 1286 { 1287 vf->lan_vsi_idx = ICE_NO_VSI; 1288 vf->lan_vsi_num = ICE_NO_VSI; 1289 } 1290 1291 /** 1292 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes 1293 * @vf: pointer to the VF structure 1294 * 1295 * Release the VF associated with this VSI and then invalidate the VSI 1296 * indexes. 1297 */ 1298 void ice_vf_vsi_release(struct ice_vf *vf) 1299 { 1300 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1301 1302 if (WARN_ON(!vsi)) 1303 return; 1304 1305 ice_vsi_release(vsi); 1306 ice_vf_invalidate_vsi(vf); 1307 } 1308 1309 /** 1310 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer 1311 * @pf: the PF private structure 1312 * @vsi: pointer to the VSI 1313 * 1314 * Return first found VF control VSI other than the vsi 1315 * passed by parameter. This function is used to determine 1316 * whether new resources have to be allocated for control VSI 1317 * or they can be shared with existing one. 1318 * 1319 * Return found VF control VSI pointer other itself. Return 1320 * NULL Otherwise. 1321 * 1322 */ 1323 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) 1324 { 1325 struct ice_vsi *ctrl_vsi = NULL; 1326 struct ice_vf *vf; 1327 unsigned int bkt; 1328 1329 rcu_read_lock(); 1330 ice_for_each_vf_rcu(pf, bkt, vf) { 1331 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1332 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1333 break; 1334 } 1335 } 1336 1337 rcu_read_unlock(); 1338 return ctrl_vsi; 1339 } 1340