1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_vf_lib_private.h" 6 #include "ice_base.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_flow.h" 11 #include "ice_eswitch.h" 12 #include "ice_virtchnl_allowlist.h" 13 #include "ice_flex_pipe.h" 14 #include "ice_vf_vsi_vlan_ops.h" 15 #include "ice_vlan.h" 16 17 /** 18 * ice_free_vf_entries - Free all VF entries from the hash table 19 * @pf: pointer to the PF structure 20 * 21 * Iterate over the VF hash table, removing and releasing all VF entries. 22 * Called during VF teardown or as cleanup during failed VF initialization. 23 */ 24 static void ice_free_vf_entries(struct ice_pf *pf) 25 { 26 struct ice_vfs *vfs = &pf->vfs; 27 struct hlist_node *tmp; 28 struct ice_vf *vf; 29 unsigned int bkt; 30 31 /* Remove all VFs from the hash table and release their main 32 * reference. Once all references to the VF are dropped, ice_put_vf() 33 * will call ice_release_vf which will remove the VF memory. 34 */ 35 lockdep_assert_held(&vfs->table_lock); 36 37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 38 hash_del_rcu(&vf->entry); 39 ice_put_vf(vf); 40 } 41 } 42 43 /** 44 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it 45 * @vf: invalidate this VF's VSI after freeing it 46 */ 47 static void ice_vf_vsi_release(struct ice_vf *vf) 48 { 49 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 50 51 if (WARN_ON(!vsi)) 52 return; 53 54 ice_vsi_release(vsi); 55 ice_vf_invalidate_vsi(vf); 56 } 57 58 /** 59 * ice_free_vf_res - Free a VF's resources 60 * @vf: pointer to the VF info 61 */ 62 static void ice_free_vf_res(struct ice_vf *vf) 63 { 64 struct ice_pf *pf = vf->pf; 65 int i, last_vector_idx; 66 67 /* First, disable VF's configuration API to prevent OS from 68 * accessing the VF's VSI after it's freed or invalidated. 69 */ 70 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 71 ice_vf_fdir_exit(vf); 72 /* free VF control VSI */ 73 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 74 ice_vf_ctrl_vsi_release(vf); 75 76 /* free VSI and disconnect it from the parent uplink */ 77 if (vf->lan_vsi_idx != ICE_NO_VSI) { 78 ice_vf_vsi_release(vf); 79 vf->num_mac = 0; 80 } 81 82 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; 83 84 /* clear VF MDD event information */ 85 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 86 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 87 88 /* Disable interrupts so that VF starts in a known state */ 89 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 90 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 91 ice_flush(&pf->hw); 92 } 93 /* reset some of the state variables keeping track of the resources */ 94 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 95 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 96 } 97 98 /** 99 * ice_dis_vf_mappings 100 * @vf: pointer to the VF structure 101 */ 102 static void ice_dis_vf_mappings(struct ice_vf *vf) 103 { 104 struct ice_pf *pf = vf->pf; 105 struct ice_vsi *vsi; 106 struct device *dev; 107 int first, last, v; 108 struct ice_hw *hw; 109 110 hw = &pf->hw; 111 vsi = ice_get_vf_vsi(vf); 112 if (WARN_ON(!vsi)) 113 return; 114 115 dev = ice_pf_to_dev(pf); 116 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 117 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 118 119 first = vf->first_vector_idx; 120 last = first + pf->vfs.num_msix_per - 1; 121 for (v = first; v <= last; v++) { 122 u32 reg; 123 124 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 125 GLINT_VECT2FUNC_IS_PF_M) | 126 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 127 GLINT_VECT2FUNC_PF_NUM_M)); 128 wr32(hw, GLINT_VECT2FUNC(v), reg); 129 } 130 131 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 132 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 133 else 134 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 135 136 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 137 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 138 else 139 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 140 } 141 142 /** 143 * ice_sriov_free_msix_res - Reset/free any used MSIX resources 144 * @pf: pointer to the PF structure 145 * 146 * Since no MSIX entries are taken from the pf->irq_tracker then just clear 147 * the pf->sriov_base_vector. 148 * 149 * Returns 0 on success, and -EINVAL on error. 150 */ 151 static int ice_sriov_free_msix_res(struct ice_pf *pf) 152 { 153 struct ice_res_tracker *res; 154 155 if (!pf) 156 return -EINVAL; 157 158 res = pf->irq_tracker; 159 if (!res) 160 return -EINVAL; 161 162 /* give back irq_tracker resources used */ 163 WARN_ON(pf->sriov_base_vector < res->num_entries); 164 165 pf->sriov_base_vector = 0; 166 167 return 0; 168 } 169 170 /** 171 * ice_free_vfs - Free all VFs 172 * @pf: pointer to the PF structure 173 */ 174 void ice_free_vfs(struct ice_pf *pf) 175 { 176 struct device *dev = ice_pf_to_dev(pf); 177 struct ice_vfs *vfs = &pf->vfs; 178 struct ice_hw *hw = &pf->hw; 179 struct ice_vf *vf; 180 unsigned int bkt; 181 182 if (!ice_has_vfs(pf)) 183 return; 184 185 while (test_and_set_bit(ICE_VF_DIS, pf->state)) 186 usleep_range(1000, 2000); 187 188 /* Disable IOV before freeing resources. This lets any VF drivers 189 * running in the host get themselves cleaned up before we yank 190 * the carpet out from underneath their feet. 191 */ 192 if (!pci_vfs_assigned(pf->pdev)) 193 pci_disable_sriov(pf->pdev); 194 else 195 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 196 197 mutex_lock(&vfs->table_lock); 198 199 ice_eswitch_release(pf); 200 201 ice_for_each_vf(pf, bkt, vf) { 202 mutex_lock(&vf->cfg_lock); 203 204 ice_dis_vf_qs(vf); 205 206 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 207 /* disable VF qp mappings and set VF disable state */ 208 ice_dis_vf_mappings(vf); 209 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 210 ice_free_vf_res(vf); 211 } 212 213 if (!pci_vfs_assigned(pf->pdev)) { 214 u32 reg_idx, bit_idx; 215 216 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 217 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 218 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 219 } 220 221 /* clear malicious info since the VF is getting released */ 222 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 223 ICE_MAX_SRIOV_VFS, vf->vf_id)) 224 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 225 vf->vf_id); 226 227 mutex_unlock(&vf->cfg_lock); 228 } 229 230 if (ice_sriov_free_msix_res(pf)) 231 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 232 233 vfs->num_qps_per = 0; 234 ice_free_vf_entries(pf); 235 236 mutex_unlock(&vfs->table_lock); 237 238 clear_bit(ICE_VF_DIS, pf->state); 239 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 240 } 241 242 /** 243 * ice_vf_vsi_setup - Set up a VF VSI 244 * @vf: VF to setup VSI for 245 * 246 * Returns pointer to the successfully allocated VSI struct on success, 247 * otherwise returns NULL on failure. 248 */ 249 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 250 { 251 struct ice_port_info *pi = ice_vf_get_port_info(vf); 252 struct ice_pf *pf = vf->pf; 253 struct ice_vsi *vsi; 254 255 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); 256 257 if (!vsi) { 258 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 259 ice_vf_invalidate_vsi(vf); 260 return NULL; 261 } 262 263 vf->lan_vsi_idx = vsi->idx; 264 vf->lan_vsi_num = vsi->vsi_num; 265 266 return vsi; 267 } 268 269 /** 270 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space 271 * @pf: pointer to PF structure 272 * @vf: pointer to VF that the first MSIX vector index is being calculated for 273 * 274 * This returns the first MSIX vector index in PF space that is used by this VF. 275 * This index is used when accessing PF relative registers such as 276 * GLINT_VECT2FUNC and GLINT_DYN_CTL. 277 * This will always be the OICR index in the AVF driver so any functionality 278 * using vf->first_vector_idx for queue configuration will have to increment by 279 * 1 to avoid meddling with the OICR index. 280 */ 281 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) 282 { 283 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; 284 } 285 286 /** 287 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 288 * @vf: VF to enable MSIX mappings for 289 * 290 * Some of the registers need to be indexed/configured using hardware global 291 * device values and other registers need 0-based values, which represent PF 292 * based values. 293 */ 294 static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 295 { 296 int device_based_first_msix, device_based_last_msix; 297 int pf_based_first_msix, pf_based_last_msix, v; 298 struct ice_pf *pf = vf->pf; 299 int device_based_vf_id; 300 struct ice_hw *hw; 301 u32 reg; 302 303 hw = &pf->hw; 304 pf_based_first_msix = vf->first_vector_idx; 305 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; 306 307 device_based_first_msix = pf_based_first_msix + 308 pf->hw.func_caps.common_cap.msix_vector_first_id; 309 device_based_last_msix = 310 (device_based_first_msix + pf->vfs.num_msix_per) - 1; 311 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 312 313 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 314 VPINT_ALLOC_FIRST_M) | 315 ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 316 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 317 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 318 319 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 320 & VPINT_ALLOC_PCI_FIRST_M) | 321 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 322 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 323 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 324 325 /* map the interrupts to its functions */ 326 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 327 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 328 GLINT_VECT2FUNC_VF_NUM_M) | 329 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 330 GLINT_VECT2FUNC_PF_NUM_M)); 331 wr32(hw, GLINT_VECT2FUNC(v), reg); 332 } 333 334 /* Map mailbox interrupt to VF MSI-X vector 0 */ 335 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 336 } 337 338 /** 339 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 340 * @vf: VF to enable the mappings for 341 * @max_txq: max Tx queues allowed on the VF's VSI 342 * @max_rxq: max Rx queues allowed on the VF's VSI 343 */ 344 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 345 { 346 struct device *dev = ice_pf_to_dev(vf->pf); 347 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 348 struct ice_hw *hw = &vf->pf->hw; 349 u32 reg; 350 351 if (WARN_ON(!vsi)) 352 return; 353 354 /* set regardless of mapping mode */ 355 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 356 357 /* VF Tx queues allocation */ 358 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 359 /* set the VF PF Tx queue range 360 * VFNUMQ value should be set to (number of queues - 1). A value 361 * of 0 means 1 queue and a value of 255 means 256 queues 362 */ 363 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 364 VPLAN_TX_QBASE_VFFIRSTQ_M) | 365 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 366 VPLAN_TX_QBASE_VFNUMQ_M)); 367 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 368 } else { 369 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 370 } 371 372 /* set regardless of mapping mode */ 373 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 374 375 /* VF Rx queues allocation */ 376 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 377 /* set the VF PF Rx queue range 378 * VFNUMQ value should be set to (number of queues - 1). A value 379 * of 0 means 1 queue and a value of 255 means 256 queues 380 */ 381 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 382 VPLAN_RX_QBASE_VFFIRSTQ_M) | 383 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 384 VPLAN_RX_QBASE_VFNUMQ_M)); 385 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 386 } else { 387 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 388 } 389 } 390 391 /** 392 * ice_ena_vf_mappings - enable VF MSIX and queue mapping 393 * @vf: pointer to the VF structure 394 */ 395 static void ice_ena_vf_mappings(struct ice_vf *vf) 396 { 397 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 398 399 if (WARN_ON(!vsi)) 400 return; 401 402 ice_ena_vf_msix_mappings(vf); 403 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 404 } 405 406 /** 407 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 408 * @vf: VF to calculate the register index for 409 * @q_vector: a q_vector associated to the VF 410 */ 411 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 412 { 413 struct ice_pf *pf; 414 415 if (!vf || !q_vector) 416 return -EINVAL; 417 418 pf = vf->pf; 419 420 /* always add one to account for the OICR being the first MSIX */ 421 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + 422 q_vector->v_idx + 1; 423 } 424 425 /** 426 * ice_get_max_valid_res_idx - Get the max valid resource index 427 * @res: pointer to the resource to find the max valid index for 428 * 429 * Start from the end of the ice_res_tracker and return right when we find the 430 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only 431 * valid for SR-IOV because it is the only consumer that manipulates the 432 * res->end and this is always called when res->end is set to res->num_entries. 433 */ 434 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) 435 { 436 int i; 437 438 if (!res) 439 return -EINVAL; 440 441 for (i = res->num_entries - 1; i >= 0; i--) 442 if (res->list[i] & ICE_RES_VALID_BIT) 443 return i; 444 445 return 0; 446 } 447 448 /** 449 * ice_sriov_set_msix_res - Set any used MSIX resources 450 * @pf: pointer to PF structure 451 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 452 * 453 * This function allows SR-IOV resources to be taken from the end of the PF's 454 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 455 * just set the pf->sriov_base_vector and return success. 456 * 457 * If there are not enough resources available, return an error. This should 458 * always be caught by ice_set_per_vf_res(). 459 * 460 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 461 * in the PF's space available for SR-IOV. 462 */ 463 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 464 { 465 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 466 int vectors_used = pf->irq_tracker->num_entries; 467 int sriov_base_vector; 468 469 sriov_base_vector = total_vectors - num_msix_needed; 470 471 /* make sure we only grab irq_tracker entries from the list end and 472 * that we have enough available MSIX vectors 473 */ 474 if (sriov_base_vector < vectors_used) 475 return -EINVAL; 476 477 pf->sriov_base_vector = sriov_base_vector; 478 479 return 0; 480 } 481 482 /** 483 * ice_set_per_vf_res - check if vectors and queues are available 484 * @pf: pointer to the PF structure 485 * @num_vfs: the number of SR-IOV VFs being configured 486 * 487 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 488 * get more vectors and can enable more queues per VF. Note that this does not 489 * grab any vectors from the SW pool already allocated. Also note, that all 490 * vector counts include one for each VF's miscellaneous interrupt vector 491 * (i.e. OICR). 492 * 493 * Minimum VFs - 2 vectors, 1 queue pair 494 * Small VFs - 5 vectors, 4 queue pairs 495 * Medium VFs - 17 vectors, 16 queue pairs 496 * 497 * Second, determine number of queue pairs per VF by starting with a pre-defined 498 * maximum each VF supports. If this is not possible, then we adjust based on 499 * queue pairs available on the device. 500 * 501 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 502 * by each VF during VF initialization and reset. 503 */ 504 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 505 { 506 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 507 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 508 int msix_avail_per_vf, msix_avail_for_sriov; 509 struct device *dev = ice_pf_to_dev(pf); 510 int err; 511 512 lockdep_assert_held(&pf->vfs.table_lock); 513 514 if (!num_vfs) 515 return -EINVAL; 516 517 if (max_valid_res_idx < 0) 518 return -ENOSPC; 519 520 /* determine MSI-X resources per VF */ 521 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 522 pf->irq_tracker->num_entries; 523 msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 524 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 525 num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 526 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 527 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 528 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 529 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 530 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 531 num_msix_per_vf = ICE_MIN_INTR_PER_VF; 532 } else { 533 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 534 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 535 num_vfs); 536 return -ENOSPC; 537 } 538 539 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 540 ICE_MAX_RSS_QS_PER_VF); 541 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 542 if (!avail_qs) 543 num_txq = 0; 544 else if (num_txq > avail_qs) 545 num_txq = rounddown_pow_of_two(avail_qs); 546 547 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 548 ICE_MAX_RSS_QS_PER_VF); 549 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 550 if (!avail_qs) 551 num_rxq = 0; 552 else if (num_rxq > avail_qs) 553 num_rxq = rounddown_pow_of_two(avail_qs); 554 555 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 556 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 557 ICE_MIN_QS_PER_VF, num_vfs); 558 return -ENOSPC; 559 } 560 561 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); 562 if (err) { 563 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", 564 num_vfs, err); 565 return err; 566 } 567 568 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 569 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 570 pf->vfs.num_msix_per = num_msix_per_vf; 571 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 572 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 573 574 return 0; 575 } 576 577 /** 578 * ice_init_vf_vsi_res - initialize/setup VF VSI resources 579 * @vf: VF to initialize/setup the VSI for 580 * 581 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 582 * VF VSI's broadcast filter and is only used during initial VF creation. 583 */ 584 static int ice_init_vf_vsi_res(struct ice_vf *vf) 585 { 586 struct ice_vsi_vlan_ops *vlan_ops; 587 struct ice_pf *pf = vf->pf; 588 u8 broadcast[ETH_ALEN]; 589 struct ice_vsi *vsi; 590 struct device *dev; 591 int err; 592 593 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); 594 595 dev = ice_pf_to_dev(pf); 596 vsi = ice_vf_vsi_setup(vf); 597 if (!vsi) 598 return -ENOMEM; 599 600 err = ice_vsi_add_vlan_zero(vsi); 601 if (err) { 602 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 603 vf->vf_id); 604 goto release_vsi; 605 } 606 607 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 608 err = vlan_ops->ena_rx_filtering(vsi); 609 if (err) { 610 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 611 vf->vf_id); 612 goto release_vsi; 613 } 614 615 eth_broadcast_addr(broadcast); 616 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 617 if (err) { 618 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", 619 vf->vf_id, err); 620 goto release_vsi; 621 } 622 623 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 624 if (err) { 625 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 626 vf->vf_id); 627 goto release_vsi; 628 } 629 630 vf->num_mac = 1; 631 632 return 0; 633 634 release_vsi: 635 ice_vf_vsi_release(vf); 636 return err; 637 } 638 639 /** 640 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 641 * @pf: PF the VFs are associated with 642 */ 643 static int ice_start_vfs(struct ice_pf *pf) 644 { 645 struct ice_hw *hw = &pf->hw; 646 unsigned int bkt, it_cnt; 647 struct ice_vf *vf; 648 int retval; 649 650 lockdep_assert_held(&pf->vfs.table_lock); 651 652 it_cnt = 0; 653 ice_for_each_vf(pf, bkt, vf) { 654 vf->vf_ops->clear_reset_trigger(vf); 655 656 retval = ice_init_vf_vsi_res(vf); 657 if (retval) { 658 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 659 vf->vf_id, retval); 660 goto teardown; 661 } 662 663 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 664 ice_ena_vf_mappings(vf); 665 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 666 it_cnt++; 667 } 668 669 ice_flush(hw); 670 return 0; 671 672 teardown: 673 ice_for_each_vf(pf, bkt, vf) { 674 if (it_cnt == 0) 675 break; 676 677 ice_dis_vf_mappings(vf); 678 ice_vf_vsi_release(vf); 679 it_cnt--; 680 } 681 682 return retval; 683 } 684 685 /** 686 * ice_sriov_free_vf - Free VF memory after all references are dropped 687 * @vf: pointer to VF to free 688 * 689 * Called by ice_put_vf through ice_release_vf once the last reference to a VF 690 * structure has been dropped. 691 */ 692 static void ice_sriov_free_vf(struct ice_vf *vf) 693 { 694 mutex_destroy(&vf->cfg_lock); 695 696 kfree_rcu(vf, rcu); 697 } 698 699 /** 700 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers 701 * @vf: the vf to configure 702 */ 703 static void ice_sriov_clear_mbx_register(struct ice_vf *vf) 704 { 705 struct ice_pf *pf = vf->pf; 706 707 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); 708 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); 709 } 710 711 /** 712 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF 713 * @vf: pointer to VF structure 714 * @is_vflr: true if reset occurred due to VFLR 715 * 716 * Trigger and cleanup after a VF reset for a SR-IOV VF. 717 */ 718 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) 719 { 720 struct ice_pf *pf = vf->pf; 721 u32 reg, reg_idx, bit_idx; 722 unsigned int vf_abs_id, i; 723 struct device *dev; 724 struct ice_hw *hw; 725 726 dev = ice_pf_to_dev(pf); 727 hw = &pf->hw; 728 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 729 730 /* In the case of a VFLR, HW has already reset the VF and we just need 731 * to clean up. Otherwise we must first trigger the reset using the 732 * VFRTRIG register. 733 */ 734 if (!is_vflr) { 735 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 736 reg |= VPGEN_VFRTRIG_VFSWR_M; 737 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 738 } 739 740 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 741 reg_idx = (vf_abs_id) / 32; 742 bit_idx = (vf_abs_id) % 32; 743 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 744 ice_flush(hw); 745 746 wr32(hw, PF_PCI_CIAA, 747 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 748 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 749 reg = rd32(hw, PF_PCI_CIAD); 750 /* no transactions pending so stop polling */ 751 if ((reg & VF_TRANS_PENDING_M) == 0) 752 break; 753 754 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 755 udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 756 } 757 } 758 759 /** 760 * ice_sriov_poll_reset_status - poll SRIOV VF reset status 761 * @vf: pointer to VF structure 762 * 763 * Returns true when reset is successful, else returns false 764 */ 765 static bool ice_sriov_poll_reset_status(struct ice_vf *vf) 766 { 767 struct ice_pf *pf = vf->pf; 768 unsigned int i; 769 u32 reg; 770 771 for (i = 0; i < 10; i++) { 772 /* VF reset requires driver to first reset the VF and then 773 * poll the status register to make sure that the reset 774 * completed successfully. 775 */ 776 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 777 if (reg & VPGEN_VFRSTAT_VFRD_M) 778 return true; 779 780 /* only sleep if the reset is not done */ 781 usleep_range(10, 20); 782 } 783 return false; 784 } 785 786 /** 787 * ice_sriov_clear_reset_trigger - enable VF to access hardware 788 * @vf: VF to enabled hardware access for 789 */ 790 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) 791 { 792 struct ice_hw *hw = &vf->pf->hw; 793 u32 reg; 794 795 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 796 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 797 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 798 ice_flush(hw); 799 } 800 801 /** 802 * ice_sriov_vsi_rebuild - release and rebuild VF's VSI 803 * @vf: VF to release and setup the VSI for 804 * 805 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF 806 * configuration change, etc.). 807 */ 808 static int ice_sriov_vsi_rebuild(struct ice_vf *vf) 809 { 810 struct ice_pf *pf = vf->pf; 811 812 ice_vf_vsi_release(vf); 813 if (!ice_vf_vsi_setup(vf)) { 814 dev_err(ice_pf_to_dev(pf), 815 "Failed to release and setup the VF%u's VSI\n", 816 vf->vf_id); 817 return -ENOMEM; 818 } 819 820 return 0; 821 } 822 823 /** 824 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 825 * @vf: VF to perform tasks on 826 */ 827 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) 828 { 829 ice_vf_rebuild_host_cfg(vf); 830 ice_vf_set_initialized(vf); 831 ice_ena_vf_mappings(vf); 832 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 833 } 834 835 static const struct ice_vf_ops ice_sriov_vf_ops = { 836 .reset_type = ICE_VF_RESET, 837 .free = ice_sriov_free_vf, 838 .clear_mbx_register = ice_sriov_clear_mbx_register, 839 .trigger_reset_register = ice_sriov_trigger_reset_register, 840 .poll_reset_status = ice_sriov_poll_reset_status, 841 .clear_reset_trigger = ice_sriov_clear_reset_trigger, 842 .vsi_rebuild = ice_sriov_vsi_rebuild, 843 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, 844 }; 845 846 /** 847 * ice_create_vf_entries - Allocate and insert VF entries 848 * @pf: pointer to the PF structure 849 * @num_vfs: the number of VFs to allocate 850 * 851 * Allocate new VF entries and insert them into the hash table. Set some 852 * basic default fields for initializing the new VFs. 853 * 854 * After this function exits, the hash table will have num_vfs entries 855 * inserted. 856 * 857 * Returns 0 on success or an integer error code on failure. 858 */ 859 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 860 { 861 struct ice_vfs *vfs = &pf->vfs; 862 struct ice_vf *vf; 863 u16 vf_id; 864 int err; 865 866 lockdep_assert_held(&vfs->table_lock); 867 868 for (vf_id = 0; vf_id < num_vfs; vf_id++) { 869 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 870 if (!vf) { 871 err = -ENOMEM; 872 goto err_free_entries; 873 } 874 kref_init(&vf->refcnt); 875 876 vf->pf = pf; 877 vf->vf_id = vf_id; 878 879 /* set sriov vf ops for VFs created during SRIOV flow */ 880 vf->vf_ops = &ice_sriov_vf_ops; 881 882 vf->vf_sw_id = pf->first_sw; 883 /* assign default capabilities */ 884 vf->spoofchk = true; 885 vf->num_vf_qs = pf->vfs.num_qps_per; 886 ice_vc_set_default_allowlist(vf); 887 888 /* ctrl_vsi_idx will be set to a valid value only when VF 889 * creates its first fdir rule. 890 */ 891 ice_vf_ctrl_invalidate_vsi(vf); 892 ice_vf_fdir_init(vf); 893 894 ice_virtchnl_set_dflt_ops(vf); 895 896 mutex_init(&vf->cfg_lock); 897 898 hash_add_rcu(vfs->table, &vf->entry, vf_id); 899 } 900 901 return 0; 902 903 err_free_entries: 904 ice_free_vf_entries(pf); 905 return err; 906 } 907 908 /** 909 * ice_ena_vfs - enable VFs so they are ready to be used 910 * @pf: pointer to the PF structure 911 * @num_vfs: number of VFs to enable 912 */ 913 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 914 { 915 struct device *dev = ice_pf_to_dev(pf); 916 struct ice_hw *hw = &pf->hw; 917 int ret; 918 919 /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 920 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 921 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 922 set_bit(ICE_OICR_INTR_DIS, pf->state); 923 ice_flush(hw); 924 925 ret = pci_enable_sriov(pf->pdev, num_vfs); 926 if (ret) 927 goto err_unroll_intr; 928 929 mutex_lock(&pf->vfs.table_lock); 930 931 ret = ice_set_per_vf_res(pf, num_vfs); 932 if (ret) { 933 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", 934 num_vfs, ret); 935 goto err_unroll_sriov; 936 } 937 938 ret = ice_create_vf_entries(pf, num_vfs); 939 if (ret) { 940 dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 941 num_vfs); 942 goto err_unroll_sriov; 943 } 944 945 ret = ice_start_vfs(pf); 946 if (ret) { 947 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); 948 ret = -EAGAIN; 949 goto err_unroll_vf_entries; 950 } 951 952 clear_bit(ICE_VF_DIS, pf->state); 953 954 ret = ice_eswitch_configure(pf); 955 if (ret) { 956 dev_err(dev, "Failed to configure eswitch, err %d\n", ret); 957 goto err_unroll_sriov; 958 } 959 960 /* rearm global interrupts */ 961 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 962 ice_irq_dynamic_ena(hw, NULL, NULL); 963 964 mutex_unlock(&pf->vfs.table_lock); 965 966 return 0; 967 968 err_unroll_vf_entries: 969 ice_free_vf_entries(pf); 970 err_unroll_sriov: 971 mutex_unlock(&pf->vfs.table_lock); 972 pci_disable_sriov(pf->pdev); 973 err_unroll_intr: 974 /* rearm interrupts here */ 975 ice_irq_dynamic_ena(hw, NULL, NULL); 976 clear_bit(ICE_OICR_INTR_DIS, pf->state); 977 return ret; 978 } 979 980 /** 981 * ice_pci_sriov_ena - Enable or change number of VFs 982 * @pf: pointer to the PF structure 983 * @num_vfs: number of VFs to allocate 984 * 985 * Returns 0 on success and negative on failure 986 */ 987 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 988 { 989 int pre_existing_vfs = pci_num_vf(pf->pdev); 990 struct device *dev = ice_pf_to_dev(pf); 991 int err; 992 993 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 994 ice_free_vfs(pf); 995 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 996 return 0; 997 998 if (num_vfs > pf->vfs.num_supported) { 999 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 1000 num_vfs, pf->vfs.num_supported); 1001 return -EOPNOTSUPP; 1002 } 1003 1004 dev_info(dev, "Enabling %d VFs\n", num_vfs); 1005 err = ice_ena_vfs(pf, num_vfs); 1006 if (err) { 1007 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 1008 return err; 1009 } 1010 1011 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 1012 return 0; 1013 } 1014 1015 /** 1016 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 1017 * @pf: PF to enabled SR-IOV on 1018 */ 1019 static int ice_check_sriov_allowed(struct ice_pf *pf) 1020 { 1021 struct device *dev = ice_pf_to_dev(pf); 1022 1023 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 1024 dev_err(dev, "This device is not capable of SR-IOV\n"); 1025 return -EOPNOTSUPP; 1026 } 1027 1028 if (ice_is_safe_mode(pf)) { 1029 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 1030 return -EOPNOTSUPP; 1031 } 1032 1033 if (!ice_pf_state_is_nominal(pf)) { 1034 dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 1035 return -EBUSY; 1036 } 1037 1038 return 0; 1039 } 1040 1041 /** 1042 * ice_sriov_configure - Enable or change number of VFs via sysfs 1043 * @pdev: pointer to a pci_dev structure 1044 * @num_vfs: number of VFs to allocate or 0 to free VFs 1045 * 1046 * This function is called when the user updates the number of VFs in sysfs. On 1047 * success return whatever num_vfs was set to by the caller. Return negative on 1048 * failure. 1049 */ 1050 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 1051 { 1052 struct ice_pf *pf = pci_get_drvdata(pdev); 1053 struct device *dev = ice_pf_to_dev(pf); 1054 int err; 1055 1056 err = ice_check_sriov_allowed(pf); 1057 if (err) 1058 return err; 1059 1060 if (!num_vfs) { 1061 if (!pci_vfs_assigned(pdev)) { 1062 ice_free_vfs(pf); 1063 ice_mbx_deinit_snapshot(&pf->hw); 1064 if (pf->lag) 1065 ice_enable_lag(pf->lag); 1066 return 0; 1067 } 1068 1069 dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 1070 return -EBUSY; 1071 } 1072 1073 err = ice_mbx_init_snapshot(&pf->hw, num_vfs); 1074 if (err) 1075 return err; 1076 1077 err = ice_pci_sriov_ena(pf, num_vfs); 1078 if (err) { 1079 ice_mbx_deinit_snapshot(&pf->hw); 1080 return err; 1081 } 1082 1083 if (pf->lag) 1084 ice_disable_lag(pf->lag); 1085 return num_vfs; 1086 } 1087 1088 /** 1089 * ice_process_vflr_event - Free VF resources via IRQ calls 1090 * @pf: pointer to the PF structure 1091 * 1092 * called from the VFLR IRQ handler to 1093 * free up VF resources and state variables 1094 */ 1095 void ice_process_vflr_event(struct ice_pf *pf) 1096 { 1097 struct ice_hw *hw = &pf->hw; 1098 struct ice_vf *vf; 1099 unsigned int bkt; 1100 u32 reg; 1101 1102 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 1103 !ice_has_vfs(pf)) 1104 return; 1105 1106 mutex_lock(&pf->vfs.table_lock); 1107 ice_for_each_vf(pf, bkt, vf) { 1108 u32 reg_idx, bit_idx; 1109 1110 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1111 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1112 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1113 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 1114 if (reg & BIT(bit_idx)) 1115 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 1116 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 1117 } 1118 mutex_unlock(&pf->vfs.table_lock); 1119 } 1120 1121 /** 1122 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 1123 * @pf: PF used to index all VFs 1124 * @pfq: queue index relative to the PF's function space 1125 * 1126 * If no VF is found who owns the pfq then return NULL, otherwise return a 1127 * pointer to the VF who owns the pfq 1128 * 1129 * If this function returns non-NULL, it acquires a reference count of the VF 1130 * structure. The caller is responsible for calling ice_put_vf() to drop this 1131 * reference. 1132 */ 1133 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 1134 { 1135 struct ice_vf *vf; 1136 unsigned int bkt; 1137 1138 rcu_read_lock(); 1139 ice_for_each_vf_rcu(pf, bkt, vf) { 1140 struct ice_vsi *vsi; 1141 u16 rxq_idx; 1142 1143 vsi = ice_get_vf_vsi(vf); 1144 if (!vsi) 1145 continue; 1146 1147 ice_for_each_rxq(vsi, rxq_idx) 1148 if (vsi->rxq_map[rxq_idx] == pfq) { 1149 struct ice_vf *found; 1150 1151 if (kref_get_unless_zero(&vf->refcnt)) 1152 found = vf; 1153 else 1154 found = NULL; 1155 rcu_read_unlock(); 1156 return found; 1157 } 1158 } 1159 rcu_read_unlock(); 1160 1161 return NULL; 1162 } 1163 1164 /** 1165 * ice_globalq_to_pfq - convert from global queue index to PF space queue index 1166 * @pf: PF used for conversion 1167 * @globalq: global queue index used to convert to PF space queue index 1168 */ 1169 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 1170 { 1171 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 1172 } 1173 1174 /** 1175 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 1176 * @pf: PF that the LAN overflow event happened on 1177 * @event: structure holding the event information for the LAN overflow event 1178 * 1179 * Determine if the LAN overflow event was caused by a VF queue. If it was not 1180 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 1181 * reset on the offending VF. 1182 */ 1183 void 1184 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1185 { 1186 u32 gldcb_rtctq, queue; 1187 struct ice_vf *vf; 1188 1189 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 1190 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 1191 1192 /* event returns device global Rx queue number */ 1193 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 1194 GLDCB_RTCTQ_RXQNUM_S; 1195 1196 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 1197 if (!vf) 1198 return; 1199 1200 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1201 ice_put_vf(vf); 1202 } 1203 1204 /** 1205 * ice_set_vf_spoofchk 1206 * @netdev: network interface device structure 1207 * @vf_id: VF identifier 1208 * @ena: flag to enable or disable feature 1209 * 1210 * Enable or disable VF spoof checking 1211 */ 1212 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 1213 { 1214 struct ice_netdev_priv *np = netdev_priv(netdev); 1215 struct ice_pf *pf = np->vsi->back; 1216 struct ice_vsi *vf_vsi; 1217 struct device *dev; 1218 struct ice_vf *vf; 1219 int ret; 1220 1221 dev = ice_pf_to_dev(pf); 1222 1223 vf = ice_get_vf_by_id(pf, vf_id); 1224 if (!vf) 1225 return -EINVAL; 1226 1227 ret = ice_check_vf_ready_for_cfg(vf); 1228 if (ret) 1229 goto out_put_vf; 1230 1231 vf_vsi = ice_get_vf_vsi(vf); 1232 if (!vf_vsi) { 1233 netdev_err(netdev, "VSI %d for VF %d is null\n", 1234 vf->lan_vsi_idx, vf->vf_id); 1235 ret = -EINVAL; 1236 goto out_put_vf; 1237 } 1238 1239 if (vf_vsi->type != ICE_VSI_VF) { 1240 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 1241 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 1242 ret = -ENODEV; 1243 goto out_put_vf; 1244 } 1245 1246 if (ena == vf->spoofchk) { 1247 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 1248 ret = 0; 1249 goto out_put_vf; 1250 } 1251 1252 ret = ice_vsi_apply_spoofchk(vf_vsi, ena); 1253 if (ret) 1254 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 1255 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 1256 else 1257 vf->spoofchk = ena; 1258 1259 out_put_vf: 1260 ice_put_vf(vf); 1261 return ret; 1262 } 1263 1264 /** 1265 * ice_get_vf_cfg 1266 * @netdev: network interface device structure 1267 * @vf_id: VF identifier 1268 * @ivi: VF configuration structure 1269 * 1270 * return VF configuration 1271 */ 1272 int 1273 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 1274 { 1275 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1276 struct ice_vf *vf; 1277 int ret; 1278 1279 vf = ice_get_vf_by_id(pf, vf_id); 1280 if (!vf) 1281 return -EINVAL; 1282 1283 ret = ice_check_vf_ready_for_cfg(vf); 1284 if (ret) 1285 goto out_put_vf; 1286 1287 ivi->vf = vf_id; 1288 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); 1289 1290 /* VF configuration for VLAN and applicable QoS */ 1291 ivi->vlan = ice_vf_get_port_vlan_id(vf); 1292 ivi->qos = ice_vf_get_port_vlan_prio(vf); 1293 if (ice_vf_is_port_vlan_ena(vf)) 1294 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 1295 1296 ivi->trusted = vf->trusted; 1297 ivi->spoofchk = vf->spoofchk; 1298 if (!vf->link_forced) 1299 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 1300 else if (vf->link_up) 1301 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1302 else 1303 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 1304 ivi->max_tx_rate = vf->max_tx_rate; 1305 ivi->min_tx_rate = vf->min_tx_rate; 1306 1307 out_put_vf: 1308 ice_put_vf(vf); 1309 return ret; 1310 } 1311 1312 /** 1313 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch 1314 * @pf: PF used to reference the switch's rules 1315 * @umac: unicast MAC to compare against existing switch rules 1316 * 1317 * Return true on the first/any match, else return false 1318 */ 1319 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) 1320 { 1321 struct ice_sw_recipe *mac_recipe_list = 1322 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; 1323 struct ice_fltr_mgmt_list_entry *list_itr; 1324 struct list_head *rule_head; 1325 struct mutex *rule_lock; /* protect MAC filter list access */ 1326 1327 rule_head = &mac_recipe_list->filt_rules; 1328 rule_lock = &mac_recipe_list->filt_rule_lock; 1329 1330 mutex_lock(rule_lock); 1331 list_for_each_entry(list_itr, rule_head, list_entry) { 1332 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 1333 1334 if (ether_addr_equal(existing_mac, umac)) { 1335 mutex_unlock(rule_lock); 1336 return true; 1337 } 1338 } 1339 1340 mutex_unlock(rule_lock); 1341 1342 return false; 1343 } 1344 1345 /** 1346 * ice_set_vf_mac 1347 * @netdev: network interface device structure 1348 * @vf_id: VF identifier 1349 * @mac: MAC address 1350 * 1351 * program VF MAC address 1352 */ 1353 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1354 { 1355 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1356 struct ice_vf *vf; 1357 int ret; 1358 1359 if (is_multicast_ether_addr(mac)) { 1360 netdev_err(netdev, "%pM not a valid unicast address\n", mac); 1361 return -EINVAL; 1362 } 1363 1364 vf = ice_get_vf_by_id(pf, vf_id); 1365 if (!vf) 1366 return -EINVAL; 1367 1368 /* nothing left to do, unicast MAC already set */ 1369 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && 1370 ether_addr_equal(vf->hw_lan_addr.addr, mac)) { 1371 ret = 0; 1372 goto out_put_vf; 1373 } 1374 1375 ret = ice_check_vf_ready_for_cfg(vf); 1376 if (ret) 1377 goto out_put_vf; 1378 1379 if (ice_unicast_mac_exists(pf, mac)) { 1380 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", 1381 mac, vf_id, mac); 1382 ret = -EINVAL; 1383 goto out_put_vf; 1384 } 1385 1386 mutex_lock(&vf->cfg_lock); 1387 1388 /* VF is notified of its new MAC via the PF's response to the 1389 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 1390 */ 1391 ether_addr_copy(vf->dev_lan_addr.addr, mac); 1392 ether_addr_copy(vf->hw_lan_addr.addr, mac); 1393 if (is_zero_ether_addr(mac)) { 1394 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 1395 vf->pf_set_mac = false; 1396 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 1397 vf->vf_id); 1398 } else { 1399 /* PF will add MAC rule for the VF */ 1400 vf->pf_set_mac = true; 1401 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 1402 mac, vf_id); 1403 } 1404 1405 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1406 mutex_unlock(&vf->cfg_lock); 1407 1408 out_put_vf: 1409 ice_put_vf(vf); 1410 return ret; 1411 } 1412 1413 /** 1414 * ice_set_vf_trust 1415 * @netdev: network interface device structure 1416 * @vf_id: VF identifier 1417 * @trusted: Boolean value to enable/disable trusted VF 1418 * 1419 * Enable or disable a given VF as trusted 1420 */ 1421 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 1422 { 1423 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1424 struct ice_vf *vf; 1425 int ret; 1426 1427 if (ice_is_eswitch_mode_switchdev(pf)) { 1428 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 1429 return -EOPNOTSUPP; 1430 } 1431 1432 vf = ice_get_vf_by_id(pf, vf_id); 1433 if (!vf) 1434 return -EINVAL; 1435 1436 ret = ice_check_vf_ready_for_cfg(vf); 1437 if (ret) 1438 goto out_put_vf; 1439 1440 /* Check if already trusted */ 1441 if (trusted == vf->trusted) { 1442 ret = 0; 1443 goto out_put_vf; 1444 } 1445 1446 mutex_lock(&vf->cfg_lock); 1447 1448 vf->trusted = trusted; 1449 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1450 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 1451 vf_id, trusted ? "" : "un"); 1452 1453 mutex_unlock(&vf->cfg_lock); 1454 1455 out_put_vf: 1456 ice_put_vf(vf); 1457 return ret; 1458 } 1459 1460 /** 1461 * ice_set_vf_link_state 1462 * @netdev: network interface device structure 1463 * @vf_id: VF identifier 1464 * @link_state: required link state 1465 * 1466 * Set VF's link state, irrespective of physical link state status 1467 */ 1468 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 1469 { 1470 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1471 struct ice_vf *vf; 1472 int ret; 1473 1474 vf = ice_get_vf_by_id(pf, vf_id); 1475 if (!vf) 1476 return -EINVAL; 1477 1478 ret = ice_check_vf_ready_for_cfg(vf); 1479 if (ret) 1480 goto out_put_vf; 1481 1482 switch (link_state) { 1483 case IFLA_VF_LINK_STATE_AUTO: 1484 vf->link_forced = false; 1485 break; 1486 case IFLA_VF_LINK_STATE_ENABLE: 1487 vf->link_forced = true; 1488 vf->link_up = true; 1489 break; 1490 case IFLA_VF_LINK_STATE_DISABLE: 1491 vf->link_forced = true; 1492 vf->link_up = false; 1493 break; 1494 default: 1495 ret = -EINVAL; 1496 goto out_put_vf; 1497 } 1498 1499 ice_vc_notify_vf_link_state(vf); 1500 1501 out_put_vf: 1502 ice_put_vf(vf); 1503 return ret; 1504 } 1505 1506 /** 1507 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 1508 * @pf: PF associated with VFs 1509 */ 1510 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 1511 { 1512 struct ice_vf *vf; 1513 unsigned int bkt; 1514 int rate = 0; 1515 1516 rcu_read_lock(); 1517 ice_for_each_vf_rcu(pf, bkt, vf) 1518 rate += vf->min_tx_rate; 1519 rcu_read_unlock(); 1520 1521 return rate; 1522 } 1523 1524 /** 1525 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 1526 * @vf: VF trying to configure min_tx_rate 1527 * @min_tx_rate: min Tx rate in Mbps 1528 * 1529 * Check if the min_tx_rate being passed in will cause oversubscription of total 1530 * min_tx_rate based on the current link speed and all other VFs configured 1531 * min_tx_rate 1532 * 1533 * Return true if the passed min_tx_rate would cause oversubscription, else 1534 * return false 1535 */ 1536 static bool 1537 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 1538 { 1539 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1540 int all_vfs_min_tx_rate; 1541 int link_speed_mbps; 1542 1543 if (WARN_ON(!vsi)) 1544 return false; 1545 1546 link_speed_mbps = ice_get_link_speed_mbps(vsi); 1547 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 1548 1549 /* this VF's previous rate is being overwritten */ 1550 all_vfs_min_tx_rate -= vf->min_tx_rate; 1551 1552 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 1553 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 1554 min_tx_rate, vf->vf_id, 1555 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 1556 link_speed_mbps); 1557 return true; 1558 } 1559 1560 return false; 1561 } 1562 1563 /** 1564 * ice_set_vf_bw - set min/max VF bandwidth 1565 * @netdev: network interface device structure 1566 * @vf_id: VF identifier 1567 * @min_tx_rate: Minimum Tx rate in Mbps 1568 * @max_tx_rate: Maximum Tx rate in Mbps 1569 */ 1570 int 1571 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 1572 int max_tx_rate) 1573 { 1574 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1575 struct ice_vsi *vsi; 1576 struct device *dev; 1577 struct ice_vf *vf; 1578 int ret; 1579 1580 dev = ice_pf_to_dev(pf); 1581 1582 vf = ice_get_vf_by_id(pf, vf_id); 1583 if (!vf) 1584 return -EINVAL; 1585 1586 ret = ice_check_vf_ready_for_cfg(vf); 1587 if (ret) 1588 goto out_put_vf; 1589 1590 vsi = ice_get_vf_vsi(vf); 1591 if (!vsi) { 1592 ret = -EINVAL; 1593 goto out_put_vf; 1594 } 1595 1596 /* when max_tx_rate is zero that means no max Tx rate limiting, so only 1597 * check if max_tx_rate is non-zero 1598 */ 1599 if (max_tx_rate && min_tx_rate > max_tx_rate) { 1600 dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", 1601 min_tx_rate, max_tx_rate); 1602 ret = -EINVAL; 1603 goto out_put_vf; 1604 } 1605 1606 if (min_tx_rate && ice_is_dcb_active(pf)) { 1607 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 1608 ret = -EOPNOTSUPP; 1609 goto out_put_vf; 1610 } 1611 1612 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 1613 ret = -EINVAL; 1614 goto out_put_vf; 1615 } 1616 1617 if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 1618 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 1619 if (ret) { 1620 dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 1621 vf->vf_id); 1622 goto out_put_vf; 1623 } 1624 1625 vf->min_tx_rate = min_tx_rate; 1626 } 1627 1628 if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 1629 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 1630 if (ret) { 1631 dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 1632 vf->vf_id); 1633 goto out_put_vf; 1634 } 1635 1636 vf->max_tx_rate = max_tx_rate; 1637 } 1638 1639 out_put_vf: 1640 ice_put_vf(vf); 1641 return ret; 1642 } 1643 1644 /** 1645 * ice_get_vf_stats - populate some stats for the VF 1646 * @netdev: the netdev of the PF 1647 * @vf_id: the host OS identifier (0-255) 1648 * @vf_stats: pointer to the OS memory to be initialized 1649 */ 1650 int ice_get_vf_stats(struct net_device *netdev, int vf_id, 1651 struct ifla_vf_stats *vf_stats) 1652 { 1653 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1654 struct ice_eth_stats *stats; 1655 struct ice_vsi *vsi; 1656 struct ice_vf *vf; 1657 int ret; 1658 1659 vf = ice_get_vf_by_id(pf, vf_id); 1660 if (!vf) 1661 return -EINVAL; 1662 1663 ret = ice_check_vf_ready_for_cfg(vf); 1664 if (ret) 1665 goto out_put_vf; 1666 1667 vsi = ice_get_vf_vsi(vf); 1668 if (!vsi) { 1669 ret = -EINVAL; 1670 goto out_put_vf; 1671 } 1672 1673 ice_update_eth_stats(vsi); 1674 stats = &vsi->eth_stats; 1675 1676 memset(vf_stats, 0, sizeof(*vf_stats)); 1677 1678 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 1679 stats->rx_multicast; 1680 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 1681 stats->tx_multicast; 1682 vf_stats->rx_bytes = stats->rx_bytes; 1683 vf_stats->tx_bytes = stats->tx_bytes; 1684 vf_stats->broadcast = stats->rx_broadcast; 1685 vf_stats->multicast = stats->rx_multicast; 1686 vf_stats->rx_dropped = stats->rx_discards; 1687 vf_stats->tx_dropped = stats->tx_discards; 1688 1689 out_put_vf: 1690 ice_put_vf(vf); 1691 return ret; 1692 } 1693 1694 /** 1695 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 1696 * @hw: hardware structure used to check the VLAN mode 1697 * @vlan_proto: VLAN TPID being checked 1698 * 1699 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 1700 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 1701 * Mode (SVM), then only ETH_P_8021Q is supported. 1702 */ 1703 static bool 1704 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 1705 { 1706 bool is_supported = false; 1707 1708 switch (vlan_proto) { 1709 case ETH_P_8021Q: 1710 is_supported = true; 1711 break; 1712 case ETH_P_8021AD: 1713 if (ice_is_dvm_ena(hw)) 1714 is_supported = true; 1715 break; 1716 } 1717 1718 return is_supported; 1719 } 1720 1721 /** 1722 * ice_set_vf_port_vlan 1723 * @netdev: network interface device structure 1724 * @vf_id: VF identifier 1725 * @vlan_id: VLAN ID being set 1726 * @qos: priority setting 1727 * @vlan_proto: VLAN protocol 1728 * 1729 * program VF Port VLAN ID and/or QoS 1730 */ 1731 int 1732 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 1733 __be16 vlan_proto) 1734 { 1735 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1736 u16 local_vlan_proto = ntohs(vlan_proto); 1737 struct device *dev; 1738 struct ice_vf *vf; 1739 int ret; 1740 1741 dev = ice_pf_to_dev(pf); 1742 1743 if (vlan_id >= VLAN_N_VID || qos > 7) { 1744 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 1745 vf_id, vlan_id, qos); 1746 return -EINVAL; 1747 } 1748 1749 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 1750 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 1751 local_vlan_proto); 1752 return -EPROTONOSUPPORT; 1753 } 1754 1755 vf = ice_get_vf_by_id(pf, vf_id); 1756 if (!vf) 1757 return -EINVAL; 1758 1759 ret = ice_check_vf_ready_for_cfg(vf); 1760 if (ret) 1761 goto out_put_vf; 1762 1763 if (ice_vf_get_port_vlan_prio(vf) == qos && 1764 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 1765 ice_vf_get_port_vlan_id(vf) == vlan_id) { 1766 /* duplicate request, so just return success */ 1767 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 1768 vlan_id, qos, local_vlan_proto); 1769 ret = 0; 1770 goto out_put_vf; 1771 } 1772 1773 mutex_lock(&vf->cfg_lock); 1774 1775 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 1776 if (ice_vf_is_port_vlan_ena(vf)) 1777 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 1778 vlan_id, qos, local_vlan_proto, vf_id); 1779 else 1780 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 1781 1782 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1783 mutex_unlock(&vf->cfg_lock); 1784 1785 out_put_vf: 1786 ice_put_vf(vf); 1787 return ret; 1788 } 1789 1790 /** 1791 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 1792 * @vf: pointer to the VF structure 1793 */ 1794 void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 1795 { 1796 struct ice_pf *pf = vf->pf; 1797 struct device *dev; 1798 1799 dev = ice_pf_to_dev(pf); 1800 1801 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 1802 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 1803 vf->dev_lan_addr.addr, 1804 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 1805 ? "on" : "off"); 1806 } 1807 1808 /** 1809 * ice_print_vfs_mdd_events - print VFs malicious driver detect event 1810 * @pf: pointer to the PF structure 1811 * 1812 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 1813 */ 1814 void ice_print_vfs_mdd_events(struct ice_pf *pf) 1815 { 1816 struct device *dev = ice_pf_to_dev(pf); 1817 struct ice_hw *hw = &pf->hw; 1818 struct ice_vf *vf; 1819 unsigned int bkt; 1820 1821 /* check that there are pending MDD events to print */ 1822 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 1823 return; 1824 1825 /* VF MDD event logs are rate limited to one second intervals */ 1826 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 1827 return; 1828 1829 pf->vfs.last_printed_mdd_jiffies = jiffies; 1830 1831 mutex_lock(&pf->vfs.table_lock); 1832 ice_for_each_vf(pf, bkt, vf) { 1833 /* only print Rx MDD event message if there are new events */ 1834 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 1835 vf->mdd_rx_events.last_printed = 1836 vf->mdd_rx_events.count; 1837 ice_print_vf_rx_mdd_event(vf); 1838 } 1839 1840 /* only print Tx MDD event message if there are new events */ 1841 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 1842 vf->mdd_tx_events.last_printed = 1843 vf->mdd_tx_events.count; 1844 1845 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 1846 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 1847 vf->dev_lan_addr.addr); 1848 } 1849 } 1850 mutex_unlock(&pf->vfs.table_lock); 1851 } 1852 1853 /** 1854 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 1855 * @pdev: pointer to a pci_dev structure 1856 * 1857 * Called when recovering from a PF FLR to restore interrupt capability to 1858 * the VFs. 1859 */ 1860 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) 1861 { 1862 u16 vf_id; 1863 int pos; 1864 1865 if (!pci_num_vf(pdev)) 1866 return; 1867 1868 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1869 if (pos) { 1870 struct pci_dev *vfdev; 1871 1872 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, 1873 &vf_id); 1874 vfdev = pci_get_device(pdev->vendor, vf_id, NULL); 1875 while (vfdev) { 1876 if (vfdev->is_virtfn && vfdev->physfn == pdev) 1877 pci_restore_msi_state(vfdev); 1878 vfdev = pci_get_device(pdev->vendor, vf_id, 1879 vfdev); 1880 } 1881 } 1882 } 1883 1884 /** 1885 * ice_is_malicious_vf - helper function to detect a malicious VF 1886 * @pf: ptr to struct ice_pf 1887 * @event: pointer to the AQ event 1888 * @num_msg_proc: the number of messages processed so far 1889 * @num_msg_pending: the number of messages peinding in admin queue 1890 */ 1891 bool 1892 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, 1893 u16 num_msg_proc, u16 num_msg_pending) 1894 { 1895 s16 vf_id = le16_to_cpu(event->desc.retval); 1896 struct device *dev = ice_pf_to_dev(pf); 1897 struct ice_mbx_data mbxdata; 1898 bool malvf = false; 1899 struct ice_vf *vf; 1900 int status; 1901 1902 vf = ice_get_vf_by_id(pf, vf_id); 1903 if (!vf) 1904 return false; 1905 1906 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 1907 goto out_put_vf; 1908 1909 mbxdata.num_msg_proc = num_msg_proc; 1910 mbxdata.num_pending_arq = num_msg_pending; 1911 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; 1912 #define ICE_MBX_OVERFLOW_WATERMARK 64 1913 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; 1914 1915 /* check to see if we have a malicious VF */ 1916 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); 1917 if (status) 1918 goto out_put_vf; 1919 1920 if (malvf) { 1921 bool report_vf = false; 1922 1923 /* if the VF is malicious and we haven't let the user 1924 * know about it, then let them know now 1925 */ 1926 status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, 1927 ICE_MAX_SRIOV_VFS, vf_id, 1928 &report_vf); 1929 if (status) 1930 dev_dbg(dev, "Error reporting malicious VF\n"); 1931 1932 if (report_vf) { 1933 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 1934 1935 if (pf_vsi) 1936 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", 1937 &vf->dev_lan_addr.addr[0], 1938 pf_vsi->netdev->dev_addr); 1939 } 1940 } 1941 1942 out_put_vf: 1943 ice_put_vf(vf); 1944 return malvf; 1945 } 1946