1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_vf_lib_private.h" 5 #include "ice.h" 6 #include "ice_lib.h" 7 #include "ice_fltr.h" 8 #include "ice_virtchnl_allowlist.h" 9 10 /* Public functions which may be accessed by all driver files */ 11 12 /** 13 * ice_get_vf_by_id - Get pointer to VF by ID 14 * @pf: the PF private structure 15 * @vf_id: the VF ID to locate 16 * 17 * Locate and return a pointer to the VF structure associated with a given ID. 18 * Returns NULL if the ID does not have a valid VF structure associated with 19 * it. 20 * 21 * This function takes a reference to the VF, which must be released by 22 * calling ice_put_vf() once the caller is finished accessing the VF structure 23 * returned. 24 */ 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 26 { 27 struct ice_vf *vf; 28 29 rcu_read_lock(); 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 31 if (vf->vf_id == vf_id) { 32 struct ice_vf *found; 33 34 if (kref_get_unless_zero(&vf->refcnt)) 35 found = vf; 36 else 37 found = NULL; 38 39 rcu_read_unlock(); 40 return found; 41 } 42 } 43 rcu_read_unlock(); 44 45 return NULL; 46 } 47 48 /** 49 * ice_release_vf - Release VF associated with a refcount 50 * @ref: the kref decremented to zero 51 * 52 * Callback function for kref_put to release a VF once its reference count has 53 * hit zero. 54 */ 55 static void ice_release_vf(struct kref *ref) 56 { 57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 58 59 vf->vf_ops->free(vf); 60 } 61 62 /** 63 * ice_put_vf - Release a reference to a VF 64 * @vf: the VF structure to decrease reference count on 65 * 66 * Decrease the reference count for a VF, and free the entry if it is no 67 * longer in use. 68 * 69 * This must be called after ice_get_vf_by_id() once the reference to the VF 70 * structure is no longer used. Otherwise, the VF structure will never be 71 * freed. 72 */ 73 void ice_put_vf(struct ice_vf *vf) 74 { 75 kref_put(&vf->refcnt, ice_release_vf); 76 } 77 78 /** 79 * ice_has_vfs - Return true if the PF has any associated VFs 80 * @pf: the PF private structure 81 * 82 * Return whether or not the PF has any allocated VFs. 83 * 84 * Note that this function only guarantees that there are no VFs at the point 85 * of calling it. It does not guarantee that no more VFs will be added. 86 */ 87 bool ice_has_vfs(struct ice_pf *pf) 88 { 89 /* A simple check that the hash table is not empty does not require 90 * the mutex or rcu_read_lock. 91 */ 92 return !hash_empty(pf->vfs.table); 93 } 94 95 /** 96 * ice_get_num_vfs - Get number of allocated VFs 97 * @pf: the PF private structure 98 * 99 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 100 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 101 * the output of this function. 102 */ 103 u16 ice_get_num_vfs(struct ice_pf *pf) 104 { 105 struct ice_vf *vf; 106 unsigned int bkt; 107 u16 num_vfs = 0; 108 109 rcu_read_lock(); 110 ice_for_each_vf_rcu(pf, bkt, vf) 111 num_vfs++; 112 rcu_read_unlock(); 113 114 return num_vfs; 115 } 116 117 /** 118 * ice_get_vf_vsi - get VF's VSI based on the stored index 119 * @vf: VF used to get VSI 120 */ 121 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 122 { 123 if (vf->lan_vsi_idx == ICE_NO_VSI) 124 return NULL; 125 126 return vf->pf->vsi[vf->lan_vsi_idx]; 127 } 128 129 /** 130 * ice_is_vf_disabled 131 * @vf: pointer to the VF info 132 * 133 * If the PF has been disabled, there is no need resetting VF until PF is 134 * active again. Similarly, if the VF has been disabled, this means something 135 * else is resetting the VF, so we shouldn't continue. 136 * 137 * Returns true if the caller should consider the VF as disabled whether 138 * because that single VF is explicitly disabled or because the PF is 139 * currently disabled. 140 */ 141 bool ice_is_vf_disabled(struct ice_vf *vf) 142 { 143 struct ice_pf *pf = vf->pf; 144 145 return (test_bit(ICE_VF_DIS, pf->state) || 146 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 147 } 148 149 /** 150 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 151 * @vf: The VF being resseting 152 * 153 * The max poll time is about ~800ms, which is about the maximum time it takes 154 * for a VF to be reset and/or a VF driver to be removed. 155 */ 156 static void ice_wait_on_vf_reset(struct ice_vf *vf) 157 { 158 int i; 159 160 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 161 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 162 break; 163 msleep(ICE_MAX_VF_RESET_SLEEP_MS); 164 } 165 } 166 167 /** 168 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 169 * @vf: VF to check if it's ready to be configured/queried 170 * 171 * The purpose of this function is to make sure the VF is not in reset, not 172 * disabled, and initialized so it can be configured and/or queried by a host 173 * administrator. 174 */ 175 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 176 { 177 ice_wait_on_vf_reset(vf); 178 179 if (ice_is_vf_disabled(vf)) 180 return -EINVAL; 181 182 if (ice_check_vf_init(vf)) 183 return -EBUSY; 184 185 return 0; 186 } 187 188 /** 189 * ice_trigger_vf_reset - Reset a VF on HW 190 * @vf: pointer to the VF structure 191 * @is_vflr: true if VFLR was issued, false if not 192 * @is_pfr: true if the reset was triggered due to a previous PFR 193 * 194 * Trigger hardware to start a reset for a particular VF. Expects the caller 195 * to wait the proper amount of time to allow hardware to reset the VF before 196 * it cleans up and restores VF functionality. 197 */ 198 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 199 { 200 /* Inform VF that it is no longer active, as a warning */ 201 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 202 203 /* Disable VF's configuration API during reset. The flag is re-enabled 204 * when it's safe again to access VF's VSI. 205 */ 206 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 207 208 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 209 * needs to clear them in the case of VFR/VFLR. If this is done for 210 * PFR, it can mess up VF resets because the VF driver may already 211 * have started cleanup by the time we get here. 212 */ 213 if (!is_pfr) 214 vf->vf_ops->clear_mbx_register(vf); 215 216 vf->vf_ops->trigger_reset_register(vf, is_vflr); 217 } 218 219 static void ice_vf_clear_counters(struct ice_vf *vf) 220 { 221 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 222 223 if (vsi) 224 vsi->num_vlan = 0; 225 226 vf->num_mac = 0; 227 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 228 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 229 } 230 231 /** 232 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 233 * @vf: VF to perform pre VSI rebuild tasks 234 * 235 * These tasks are items that don't need to be amortized since they are most 236 * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 237 */ 238 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 239 { 240 /* Close any IRQ mapping now */ 241 if (vf->vf_ops->irq_close) 242 vf->vf_ops->irq_close(vf); 243 244 ice_vf_clear_counters(vf); 245 vf->vf_ops->clear_reset_trigger(vf); 246 } 247 248 /** 249 * ice_vf_recreate_vsi - Release and re-create the VF's VSI 250 * @vf: VF to recreate the VSI for 251 * 252 * This is only called when a single VF is being reset (i.e. VVF, VFLR, host 253 * VF configuration change, etc) 254 * 255 * It releases and then re-creates a new VSI. 256 */ 257 static int ice_vf_recreate_vsi(struct ice_vf *vf) 258 { 259 struct ice_pf *pf = vf->pf; 260 int err; 261 262 ice_vf_vsi_release(vf); 263 264 err = vf->vf_ops->create_vsi(vf); 265 if (err) { 266 dev_err(ice_pf_to_dev(pf), 267 "Failed to recreate the VF%u's VSI, error %d\n", 268 vf->vf_id, err); 269 return err; 270 } 271 272 return 0; 273 } 274 275 /** 276 * ice_vf_rebuild_vsi - rebuild the VF's VSI 277 * @vf: VF to rebuild the VSI for 278 * 279 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 280 * host, PFR, CORER, etc.). 281 * 282 * It reprograms the VSI configuration back into hardware. 283 */ 284 static int ice_vf_rebuild_vsi(struct ice_vf *vf) 285 { 286 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 287 struct ice_pf *pf = vf->pf; 288 289 if (WARN_ON(!vsi)) 290 return -EINVAL; 291 292 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { 293 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 294 vf->vf_id); 295 return -EIO; 296 } 297 /* vsi->idx will remain the same in this case so don't update 298 * vf->lan_vsi_idx 299 */ 300 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 301 vf->lan_vsi_num = vsi->vsi_num; 302 303 return 0; 304 } 305 306 /** 307 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 308 * @vf: VF to add MAC filters for 309 * @vsi: Pointer to VSI 310 * 311 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 312 * always re-adds either a VLAN 0 or port VLAN based filter after reset. 313 */ 314 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 315 { 316 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 317 struct device *dev = ice_pf_to_dev(vf->pf); 318 int err; 319 320 if (ice_vf_is_port_vlan_ena(vf)) { 321 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 322 if (err) { 323 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 324 vf->vf_id, err); 325 return err; 326 } 327 328 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 329 } else { 330 err = ice_vsi_add_vlan_zero(vsi); 331 } 332 333 if (err) { 334 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 335 ice_vf_is_port_vlan_ena(vf) ? 336 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 337 return err; 338 } 339 340 err = vlan_ops->ena_rx_filtering(vsi); 341 if (err) 342 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 343 vf->vf_id, vsi->idx, err); 344 345 return 0; 346 } 347 348 /** 349 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 350 * @vf: VF to re-apply the configuration for 351 * 352 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 353 * needs to re-apply the host configured Tx rate limiting configuration. 354 */ 355 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 356 { 357 struct device *dev = ice_pf_to_dev(vf->pf); 358 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 359 int err; 360 361 if (WARN_ON(!vsi)) 362 return -EINVAL; 363 364 if (vf->min_tx_rate) { 365 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 366 if (err) { 367 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 368 vf->min_tx_rate, vf->vf_id, err); 369 return err; 370 } 371 } 372 373 if (vf->max_tx_rate) { 374 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 375 if (err) { 376 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 377 vf->max_tx_rate, vf->vf_id, err); 378 return err; 379 } 380 } 381 382 return 0; 383 } 384 385 /** 386 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 387 * @vf: VF to configure trust setting for 388 */ 389 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 390 { 391 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); 392 } 393 394 /** 395 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 396 * @vf: VF to add MAC filters for 397 * 398 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 399 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 400 */ 401 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 402 { 403 struct device *dev = ice_pf_to_dev(vf->pf); 404 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 405 u8 broadcast[ETH_ALEN]; 406 int status; 407 408 if (WARN_ON(!vsi)) 409 return -EINVAL; 410 411 if (ice_is_eswitch_mode_switchdev(vf->pf)) 412 return 0; 413 414 eth_broadcast_addr(broadcast); 415 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 416 if (status) { 417 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 418 vf->vf_id, status); 419 return status; 420 } 421 422 vf->num_mac++; 423 424 if (is_valid_ether_addr(vf->hw_lan_addr)) { 425 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 426 ICE_FWD_TO_VSI); 427 if (status) { 428 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 429 &vf->hw_lan_addr[0], vf->vf_id, 430 status); 431 return status; 432 } 433 vf->num_mac++; 434 435 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 436 } 437 438 return 0; 439 } 440 441 /** 442 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 443 * @vsi: Pointer to VSI 444 * 445 * This function moves VSI into corresponding scheduler aggregator node 446 * based on cached value of "aggregator node info" per VSI 447 */ 448 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 449 { 450 struct ice_pf *pf = vsi->back; 451 struct device *dev; 452 int status; 453 454 if (!vsi->agg_node) 455 return; 456 457 dev = ice_pf_to_dev(pf); 458 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 459 dev_dbg(dev, 460 "agg_id %u already has reached max_num_vsis %u\n", 461 vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 462 return; 463 } 464 465 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 466 vsi->idx, vsi->tc_cfg.ena_tc); 467 if (status) 468 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 469 vsi->idx, vsi->agg_node->agg_id); 470 else 471 vsi->agg_node->num_vsis++; 472 } 473 474 /** 475 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 476 * @vf: VF to rebuild host configuration on 477 */ 478 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 479 { 480 struct device *dev = ice_pf_to_dev(vf->pf); 481 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 482 483 if (WARN_ON(!vsi)) 484 return; 485 486 ice_vf_set_host_trust_cfg(vf); 487 488 if (ice_vf_rebuild_host_mac_cfg(vf)) 489 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 490 vf->vf_id); 491 492 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 493 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 494 vf->vf_id); 495 496 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 497 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 498 vf->vf_id); 499 500 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 501 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 502 vf->vf_id); 503 504 /* rebuild aggregator node config for main VF VSI */ 505 ice_vf_rebuild_aggregator_node_cfg(vsi); 506 } 507 508 /** 509 * ice_set_vf_state_qs_dis - Set VF queues state to disabled 510 * @vf: pointer to the VF structure 511 */ 512 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 513 { 514 /* Clear Rx/Tx enabled queues flag */ 515 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 516 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 517 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 518 } 519 520 /** 521 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 522 * @vf: VF to set in initialized state 523 * 524 * After this function the VF will be ready to receive/handle the 525 * VIRTCHNL_OP_GET_VF_RESOURCES message 526 */ 527 static void ice_vf_set_initialized(struct ice_vf *vf) 528 { 529 ice_set_vf_state_qs_dis(vf); 530 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 531 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 532 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 533 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 534 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 535 } 536 537 /** 538 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 539 * @vf: the VF being reset 540 * 541 * Perform reset tasks which must occur after the VSI has been re-created or 542 * rebuilt during a VF reset. 543 */ 544 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 545 { 546 ice_vf_rebuild_host_cfg(vf); 547 ice_vf_set_initialized(vf); 548 549 vf->vf_ops->post_vsi_rebuild(vf); 550 } 551 552 /** 553 * ice_is_any_vf_in_unicast_promisc - check if any VF(s) 554 * are in unicast promiscuous mode 555 * @pf: PF structure for accessing VF(s) 556 * 557 * Return false if no VF(s) are in unicast promiscuous mode, 558 * else return true 559 */ 560 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) 561 { 562 bool is_vf_promisc = false; 563 struct ice_vf *vf; 564 unsigned int bkt; 565 566 rcu_read_lock(); 567 ice_for_each_vf_rcu(pf, bkt, vf) { 568 /* found a VF that has promiscuous mode configured */ 569 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 570 is_vf_promisc = true; 571 break; 572 } 573 } 574 rcu_read_unlock(); 575 576 return is_vf_promisc; 577 } 578 579 /** 580 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes 581 * @vf: the VF pointer 582 * @vsi: the VSI to configure 583 * @ucast_m: promiscuous mask to apply to unicast 584 * @mcast_m: promiscuous mask to apply to multicast 585 * 586 * Decide which mask should be used for unicast and multicast filter, 587 * based on presence of VLANs 588 */ 589 void 590 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, 591 u8 *ucast_m, u8 *mcast_m) 592 { 593 if (ice_vf_is_port_vlan_ena(vf) || 594 ice_vsi_has_non_zero_vlans(vsi)) { 595 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 596 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 597 } else { 598 *mcast_m = ICE_MCAST_PROMISC_BITS; 599 *ucast_m = ICE_UCAST_PROMISC_BITS; 600 } 601 } 602 603 /** 604 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI 605 * @vf: the VF pointer 606 * @vsi: the VSI to configure 607 * 608 * Clear all promiscuous/allmulticast filters for a VF 609 */ 610 static int 611 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) 612 { 613 struct ice_pf *pf = vf->pf; 614 u8 ucast_m, mcast_m; 615 int ret = 0; 616 617 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 618 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 619 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 620 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 621 ret = ice_clear_dflt_vsi(vsi); 622 } else { 623 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 624 } 625 626 if (ret) { 627 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); 628 } else { 629 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 630 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); 631 } 632 } 633 634 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 635 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 636 if (ret) { 637 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); 638 } else { 639 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 640 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); 641 } 642 } 643 return ret; 644 } 645 646 /** 647 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI 648 * @vf: the VF to configure 649 * @vsi: the VF's VSI 650 * @promisc_m: the promiscuous mode to enable 651 */ 652 int 653 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 654 { 655 struct ice_hw *hw = &vsi->back->hw; 656 int status; 657 658 if (ice_vf_is_port_vlan_ena(vf)) 659 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 660 ice_vf_get_port_vlan_id(vf)); 661 else if (ice_vsi_has_non_zero_vlans(vsi)) 662 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 663 else 664 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 665 666 if (status && status != -EEXIST) { 667 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 668 vf->vf_id, status); 669 return status; 670 } 671 672 return 0; 673 } 674 675 /** 676 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI 677 * @vf: the VF to configure 678 * @vsi: the VF's VSI 679 * @promisc_m: the promiscuous mode to disable 680 */ 681 int 682 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 683 { 684 struct ice_hw *hw = &vsi->back->hw; 685 int status; 686 687 if (ice_vf_is_port_vlan_ena(vf)) 688 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 689 ice_vf_get_port_vlan_id(vf)); 690 else if (ice_vsi_has_non_zero_vlans(vsi)) 691 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 692 else 693 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 694 695 if (status && status != -ENOENT) { 696 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 697 vf->vf_id, status); 698 return status; 699 } 700 701 return 0; 702 } 703 704 /** 705 * ice_reset_all_vfs - reset all allocated VFs in one go 706 * @pf: pointer to the PF structure 707 * 708 * Reset all VFs at once, in response to a PF or other device reset. 709 * 710 * First, tell the hardware to reset each VF, then do all the waiting in one 711 * chunk, and finally finish restoring each VF after the wait. This is useful 712 * during PF routines which need to reset all VFs, as otherwise it must perform 713 * these resets in a serialized fashion. 714 */ 715 void ice_reset_all_vfs(struct ice_pf *pf) 716 { 717 struct device *dev = ice_pf_to_dev(pf); 718 struct ice_hw *hw = &pf->hw; 719 struct ice_vf *vf; 720 unsigned int bkt; 721 722 /* If we don't have any VFs, then there is nothing to reset */ 723 if (!ice_has_vfs(pf)) 724 return; 725 726 mutex_lock(&pf->vfs.table_lock); 727 728 /* clear all malicious info if the VFs are getting reset */ 729 ice_for_each_vf(pf, bkt, vf) 730 ice_mbx_clear_malvf(&vf->mbx_info); 731 732 /* If VFs have been disabled, there is no need to reset */ 733 if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 734 mutex_unlock(&pf->vfs.table_lock); 735 return; 736 } 737 738 /* Begin reset on all VFs at once */ 739 ice_for_each_vf(pf, bkt, vf) 740 ice_trigger_vf_reset(vf, true, true); 741 742 /* HW requires some time to make sure it can flush the FIFO for a VF 743 * when it resets it. Now that we've triggered all of the VFs, iterate 744 * the table again and wait for each VF to complete. 745 */ 746 ice_for_each_vf(pf, bkt, vf) { 747 if (!vf->vf_ops->poll_reset_status(vf)) { 748 /* Display a warning if at least one VF didn't manage 749 * to reset in time, but continue on with the 750 * operation. 751 */ 752 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 753 break; 754 } 755 } 756 757 /* free VF resources to begin resetting the VSI state */ 758 ice_for_each_vf(pf, bkt, vf) { 759 mutex_lock(&vf->cfg_lock); 760 761 vf->driver_caps = 0; 762 ice_vc_set_default_allowlist(vf); 763 764 ice_vf_fdir_exit(vf); 765 ice_vf_fdir_init(vf); 766 /* clean VF control VSI when resetting VFs since it should be 767 * setup only when VF creates its first FDIR rule. 768 */ 769 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 770 ice_vf_ctrl_invalidate_vsi(vf); 771 772 ice_vf_pre_vsi_rebuild(vf); 773 ice_vf_rebuild_vsi(vf); 774 ice_vf_post_vsi_rebuild(vf); 775 776 mutex_unlock(&vf->cfg_lock); 777 } 778 779 if (ice_is_eswitch_mode_switchdev(pf)) 780 if (ice_eswitch_rebuild(pf)) 781 dev_warn(dev, "eswitch rebuild failed\n"); 782 783 ice_flush(hw); 784 clear_bit(ICE_VF_DIS, pf->state); 785 786 mutex_unlock(&pf->vfs.table_lock); 787 } 788 789 /** 790 * ice_notify_vf_reset - Notify VF of a reset event 791 * @vf: pointer to the VF structure 792 */ 793 static void ice_notify_vf_reset(struct ice_vf *vf) 794 { 795 struct ice_hw *hw = &vf->pf->hw; 796 struct virtchnl_pf_event pfe; 797 798 /* Bail out if VF is in disabled state, neither initialized, nor active 799 * state - otherwise proceed with notifications 800 */ 801 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 802 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 803 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 804 return; 805 806 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 807 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 808 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 809 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 810 NULL); 811 } 812 813 /** 814 * ice_reset_vf - Reset a particular VF 815 * @vf: pointer to the VF structure 816 * @flags: flags controlling behavior of the reset 817 * 818 * Flags: 819 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event 820 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset 821 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting 822 * 823 * Returns 0 if the VF is currently in reset, if resets are disabled, or if 824 * the VF resets successfully. Returns an error code if the VF fails to 825 * rebuild. 826 */ 827 int ice_reset_vf(struct ice_vf *vf, u32 flags) 828 { 829 struct ice_pf *pf = vf->pf; 830 struct ice_lag *lag; 831 struct ice_vsi *vsi; 832 u8 act_prt, pri_prt; 833 struct device *dev; 834 int err = 0; 835 bool rsd; 836 837 dev = ice_pf_to_dev(pf); 838 act_prt = ICE_LAG_INVALID_PORT; 839 pri_prt = pf->hw.port_info->lport; 840 841 if (flags & ICE_VF_RESET_NOTIFY) 842 ice_notify_vf_reset(vf); 843 844 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 845 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 846 vf->vf_id); 847 return 0; 848 } 849 850 if (flags & ICE_VF_RESET_LOCK) 851 mutex_lock(&vf->cfg_lock); 852 else 853 lockdep_assert_held(&vf->cfg_lock); 854 855 lag = pf->lag; 856 mutex_lock(&pf->lag_mutex); 857 if (lag && lag->bonded && lag->primary) { 858 act_prt = lag->active_port; 859 if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && 860 lag->upper_netdev) 861 ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); 862 else 863 act_prt = ICE_LAG_INVALID_PORT; 864 } 865 866 if (ice_is_vf_disabled(vf)) { 867 vsi = ice_get_vf_vsi(vf); 868 if (!vsi) { 869 dev_dbg(dev, "VF is already removed\n"); 870 err = -EINVAL; 871 goto out_unlock; 872 } 873 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 874 875 if (ice_vsi_is_rx_queue_active(vsi)) 876 ice_vsi_stop_all_rx_rings(vsi); 877 878 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 879 vf->vf_id); 880 goto out_unlock; 881 } 882 883 /* Set VF disable bit state here, before triggering reset */ 884 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 885 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); 886 887 vsi = ice_get_vf_vsi(vf); 888 if (WARN_ON(!vsi)) { 889 err = -EIO; 890 goto out_unlock; 891 } 892 893 ice_dis_vf_qs(vf); 894 895 /* Call Disable LAN Tx queue AQ whether or not queues are 896 * enabled. This is needed for successful completion of VFR. 897 */ 898 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 899 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); 900 901 /* poll VPGEN_VFRSTAT reg to make sure 902 * that reset is complete 903 */ 904 rsd = vf->vf_ops->poll_reset_status(vf); 905 906 /* Display a warning if VF didn't manage to reset in time, but need to 907 * continue on with the operation. 908 */ 909 if (!rsd) 910 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 911 912 vf->driver_caps = 0; 913 ice_vc_set_default_allowlist(vf); 914 915 /* disable promiscuous modes in case they were enabled 916 * ignore any error if disabling process failed 917 */ 918 ice_vf_clear_all_promisc_modes(vf, vsi); 919 920 ice_vf_fdir_exit(vf); 921 ice_vf_fdir_init(vf); 922 /* clean VF control VSI when resetting VF since it should be setup 923 * only when VF creates its first FDIR rule. 924 */ 925 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 926 ice_vf_ctrl_vsi_release(vf); 927 928 ice_vf_pre_vsi_rebuild(vf); 929 930 if (ice_vf_recreate_vsi(vf)) { 931 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", 932 vf->vf_id); 933 err = -EFAULT; 934 goto out_unlock; 935 } 936 937 ice_vf_post_vsi_rebuild(vf); 938 vsi = ice_get_vf_vsi(vf); 939 if (WARN_ON(!vsi)) { 940 err = -EINVAL; 941 goto out_unlock; 942 } 943 944 ice_eswitch_update_repr(vsi); 945 946 /* if the VF has been reset allow it to come up again */ 947 ice_mbx_clear_malvf(&vf->mbx_info); 948 949 out_unlock: 950 if (lag && lag->bonded && lag->primary && 951 act_prt != ICE_LAG_INVALID_PORT) 952 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 953 mutex_unlock(&pf->lag_mutex); 954 955 if (flags & ICE_VF_RESET_LOCK) 956 mutex_unlock(&vf->cfg_lock); 957 958 return err; 959 } 960 961 /** 962 * ice_set_vf_state_dis - Set VF state to disabled 963 * @vf: pointer to the VF structure 964 */ 965 void ice_set_vf_state_dis(struct ice_vf *vf) 966 { 967 ice_set_vf_state_qs_dis(vf); 968 vf->vf_ops->clear_reset_state(vf); 969 } 970 971 /* Private functions only accessed from other virtualization files */ 972 973 /** 974 * ice_initialize_vf_entry - Initialize a VF entry 975 * @vf: pointer to the VF structure 976 */ 977 void ice_initialize_vf_entry(struct ice_vf *vf) 978 { 979 struct ice_pf *pf = vf->pf; 980 struct ice_vfs *vfs; 981 982 vfs = &pf->vfs; 983 984 /* assign default capabilities */ 985 vf->spoofchk = true; 986 vf->num_vf_qs = vfs->num_qps_per; 987 ice_vc_set_default_allowlist(vf); 988 ice_virtchnl_set_dflt_ops(vf); 989 990 /* ctrl_vsi_idx will be set to a valid value only when iAVF 991 * creates its first fdir rule. 992 */ 993 ice_vf_ctrl_invalidate_vsi(vf); 994 ice_vf_fdir_init(vf); 995 996 /* Initialize mailbox info for this VF */ 997 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); 998 999 mutex_init(&vf->cfg_lock); 1000 } 1001 1002 /** 1003 * ice_dis_vf_qs - Disable the VF queues 1004 * @vf: pointer to the VF structure 1005 */ 1006 void ice_dis_vf_qs(struct ice_vf *vf) 1007 { 1008 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1009 1010 if (WARN_ON(!vsi)) 1011 return; 1012 1013 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 1014 ice_vsi_stop_all_rx_rings(vsi); 1015 ice_set_vf_state_qs_dis(vf); 1016 } 1017 1018 /** 1019 * ice_err_to_virt_err - translate errors for VF return code 1020 * @err: error return code 1021 */ 1022 enum virtchnl_status_code ice_err_to_virt_err(int err) 1023 { 1024 switch (err) { 1025 case 0: 1026 return VIRTCHNL_STATUS_SUCCESS; 1027 case -EINVAL: 1028 case -ENODEV: 1029 return VIRTCHNL_STATUS_ERR_PARAM; 1030 case -ENOMEM: 1031 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1032 case -EALREADY: 1033 case -EBUSY: 1034 case -EIO: 1035 case -ENOSPC: 1036 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1037 default: 1038 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1039 } 1040 } 1041 1042 /** 1043 * ice_check_vf_init - helper to check if VF init complete 1044 * @vf: the pointer to the VF to check 1045 */ 1046 int ice_check_vf_init(struct ice_vf *vf) 1047 { 1048 struct ice_pf *pf = vf->pf; 1049 1050 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 1051 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 1052 vf->vf_id); 1053 return -EBUSY; 1054 } 1055 return 0; 1056 } 1057 1058 /** 1059 * ice_vf_get_port_info - Get the VF's port info structure 1060 * @vf: VF used to get the port info structure for 1061 */ 1062 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 1063 { 1064 return vf->pf->hw.port_info; 1065 } 1066 1067 /** 1068 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior 1069 * @vsi: the VSI to configure 1070 * @enable: whether to enable or disable the spoof checking 1071 * 1072 * Configure a VSI to enable (or disable) spoof checking behavior. 1073 */ 1074 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 1075 { 1076 struct ice_vsi_ctx *ctx; 1077 int err; 1078 1079 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1080 if (!ctx) 1081 return -ENOMEM; 1082 1083 ctx->info.sec_flags = vsi->info.sec_flags; 1084 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1085 1086 if (enable) 1087 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1088 else 1089 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1090 1091 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 1092 if (err) 1093 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 1094 enable ? "ON" : "OFF", vsi->vsi_num, err); 1095 else 1096 vsi->info.sec_flags = ctx->info.sec_flags; 1097 1098 kfree(ctx); 1099 1100 return err; 1101 } 1102 1103 /** 1104 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 1105 * @vsi: VSI to enable Tx spoof checking for 1106 */ 1107 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 1108 { 1109 struct ice_vsi_vlan_ops *vlan_ops; 1110 int err = 0; 1111 1112 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1113 1114 /* Allow VF with VLAN 0 only to send all tagged traffic */ 1115 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { 1116 err = vlan_ops->ena_tx_filtering(vsi); 1117 if (err) 1118 return err; 1119 } 1120 1121 return ice_cfg_mac_antispoof(vsi, true); 1122 } 1123 1124 /** 1125 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 1126 * @vsi: VSI to disable Tx spoof checking for 1127 */ 1128 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 1129 { 1130 struct ice_vsi_vlan_ops *vlan_ops; 1131 int err; 1132 1133 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1134 1135 err = vlan_ops->dis_tx_filtering(vsi); 1136 if (err) 1137 return err; 1138 1139 return ice_cfg_mac_antispoof(vsi, false); 1140 } 1141 1142 /** 1143 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI 1144 * @vsi: VSI associated to the VF 1145 * @enable: whether to enable or disable the spoof checking 1146 */ 1147 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) 1148 { 1149 int err; 1150 1151 if (enable) 1152 err = ice_vsi_ena_spoofchk(vsi); 1153 else 1154 err = ice_vsi_dis_spoofchk(vsi); 1155 1156 return err; 1157 } 1158 1159 /** 1160 * ice_is_vf_trusted 1161 * @vf: pointer to the VF info 1162 */ 1163 bool ice_is_vf_trusted(struct ice_vf *vf) 1164 { 1165 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1166 } 1167 1168 /** 1169 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 1170 * @vf: the VF to check 1171 * 1172 * Returns true if the VF has no Rx and no Tx queues enabled and returns false 1173 * otherwise 1174 */ 1175 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 1176 { 1177 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 1178 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 1179 } 1180 1181 /** 1182 * ice_is_vf_link_up - check if the VF's link is up 1183 * @vf: VF to check if link is up 1184 */ 1185 bool ice_is_vf_link_up(struct ice_vf *vf) 1186 { 1187 struct ice_port_info *pi = ice_vf_get_port_info(vf); 1188 1189 if (ice_check_vf_init(vf)) 1190 return false; 1191 1192 if (ice_vf_has_no_qs_ena(vf)) 1193 return false; 1194 else if (vf->link_forced) 1195 return vf->link_up; 1196 else 1197 return pi->phy.link_info.link_info & 1198 ICE_AQ_LINK_UP; 1199 } 1200 1201 /** 1202 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1203 * @vf: VF that control VSI is being invalidated on 1204 */ 1205 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 1206 { 1207 vf->ctrl_vsi_idx = ICE_NO_VSI; 1208 } 1209 1210 /** 1211 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 1212 * @vf: VF that control VSI is being released on 1213 */ 1214 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 1215 { 1216 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 1217 ice_vf_ctrl_invalidate_vsi(vf); 1218 } 1219 1220 /** 1221 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 1222 * @vf: VF to setup control VSI for 1223 * 1224 * Returns pointer to the successfully allocated VSI struct on success, 1225 * otherwise returns NULL on failure. 1226 */ 1227 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 1228 { 1229 struct ice_vsi_cfg_params params = {}; 1230 struct ice_pf *pf = vf->pf; 1231 struct ice_vsi *vsi; 1232 1233 params.type = ICE_VSI_CTRL; 1234 params.pi = ice_vf_get_port_info(vf); 1235 params.vf = vf; 1236 params.flags = ICE_VSI_FLAG_INIT; 1237 1238 vsi = ice_vsi_setup(pf, ¶ms); 1239 if (!vsi) { 1240 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 1241 ice_vf_ctrl_invalidate_vsi(vf); 1242 } 1243 1244 return vsi; 1245 } 1246 1247 /** 1248 * ice_vf_init_host_cfg - Initialize host admin configuration 1249 * @vf: VF to initialize 1250 * @vsi: the VSI created at initialization 1251 * 1252 * Initialize the VF host configuration. Called during VF creation to setup 1253 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It 1254 * should only be called during VF creation. 1255 */ 1256 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1257 { 1258 struct ice_vsi_vlan_ops *vlan_ops; 1259 struct ice_pf *pf = vf->pf; 1260 u8 broadcast[ETH_ALEN]; 1261 struct device *dev; 1262 int err; 1263 1264 dev = ice_pf_to_dev(pf); 1265 1266 err = ice_vsi_add_vlan_zero(vsi); 1267 if (err) { 1268 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 1269 vf->vf_id); 1270 return err; 1271 } 1272 1273 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1274 err = vlan_ops->ena_rx_filtering(vsi); 1275 if (err) { 1276 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 1277 vf->vf_id); 1278 return err; 1279 } 1280 1281 eth_broadcast_addr(broadcast); 1282 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1283 if (err) { 1284 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", 1285 vf->vf_id, err); 1286 return err; 1287 } 1288 1289 vf->num_mac = 1; 1290 1291 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 1292 if (err) { 1293 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 1294 vf->vf_id); 1295 return err; 1296 } 1297 1298 return 0; 1299 } 1300 1301 /** 1302 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access 1303 * @vf: VF to remove access to VSI for 1304 */ 1305 void ice_vf_invalidate_vsi(struct ice_vf *vf) 1306 { 1307 vf->lan_vsi_idx = ICE_NO_VSI; 1308 vf->lan_vsi_num = ICE_NO_VSI; 1309 } 1310 1311 /** 1312 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes 1313 * @vf: pointer to the VF structure 1314 * 1315 * Release the VF associated with this VSI and then invalidate the VSI 1316 * indexes. 1317 */ 1318 void ice_vf_vsi_release(struct ice_vf *vf) 1319 { 1320 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1321 1322 if (WARN_ON(!vsi)) 1323 return; 1324 1325 ice_vsi_release(vsi); 1326 ice_vf_invalidate_vsi(vf); 1327 } 1328 1329 /** 1330 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer 1331 * @pf: the PF private structure 1332 * @vsi: pointer to the VSI 1333 * 1334 * Return first found VF control VSI other than the vsi 1335 * passed by parameter. This function is used to determine 1336 * whether new resources have to be allocated for control VSI 1337 * or they can be shared with existing one. 1338 * 1339 * Return found VF control VSI pointer other itself. Return 1340 * NULL Otherwise. 1341 * 1342 */ 1343 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) 1344 { 1345 struct ice_vsi *ctrl_vsi = NULL; 1346 struct ice_vf *vf; 1347 unsigned int bkt; 1348 1349 rcu_read_lock(); 1350 ice_for_each_vf_rcu(pf, bkt, vf) { 1351 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1352 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1353 break; 1354 } 1355 } 1356 1357 rcu_read_unlock(); 1358 return ctrl_vsi; 1359 } 1360