1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_vf_lib_private.h" 6 #include "ice_base.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_flow.h" 11 #include "ice_eswitch.h" 12 #include "ice_virtchnl_allowlist.h" 13 #include "ice_flex_pipe.h" 14 #include "ice_vf_vsi_vlan_ops.h" 15 #include "ice_vlan.h" 16 17 /** 18 * ice_free_vf_entries - Free all VF entries from the hash table 19 * @pf: pointer to the PF structure 20 * 21 * Iterate over the VF hash table, removing and releasing all VF entries. 22 * Called during VF teardown or as cleanup during failed VF initialization. 23 */ 24 static void ice_free_vf_entries(struct ice_pf *pf) 25 { 26 struct ice_vfs *vfs = &pf->vfs; 27 struct hlist_node *tmp; 28 struct ice_vf *vf; 29 unsigned int bkt; 30 31 /* Remove all VFs from the hash table and release their main 32 * reference. Once all references to the VF are dropped, ice_put_vf() 33 * will call ice_release_vf which will remove the VF memory. 34 */ 35 lockdep_assert_held(&vfs->table_lock); 36 37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 38 hash_del_rcu(&vf->entry); 39 ice_put_vf(vf); 40 } 41 } 42 43 /** 44 * ice_free_vf_res - Free a VF's resources 45 * @vf: pointer to the VF info 46 */ 47 static void ice_free_vf_res(struct ice_vf *vf) 48 { 49 struct ice_pf *pf = vf->pf; 50 int i, last_vector_idx; 51 52 /* First, disable VF's configuration API to prevent OS from 53 * accessing the VF's VSI after it's freed or invalidated. 54 */ 55 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 56 ice_vf_fdir_exit(vf); 57 /* free VF control VSI */ 58 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 59 ice_vf_ctrl_vsi_release(vf); 60 61 /* free VSI and disconnect it from the parent uplink */ 62 if (vf->lan_vsi_idx != ICE_NO_VSI) { 63 ice_vf_vsi_release(vf); 64 vf->num_mac = 0; 65 } 66 67 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; 68 69 /* clear VF MDD event information */ 70 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 71 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 72 73 /* Disable interrupts so that VF starts in a known state */ 74 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 75 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 76 ice_flush(&pf->hw); 77 } 78 /* reset some of the state variables keeping track of the resources */ 79 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 80 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 81 } 82 83 /** 84 * ice_dis_vf_mappings 85 * @vf: pointer to the VF structure 86 */ 87 static void ice_dis_vf_mappings(struct ice_vf *vf) 88 { 89 struct ice_pf *pf = vf->pf; 90 struct ice_vsi *vsi; 91 struct device *dev; 92 int first, last, v; 93 struct ice_hw *hw; 94 95 hw = &pf->hw; 96 vsi = ice_get_vf_vsi(vf); 97 if (WARN_ON(!vsi)) 98 return; 99 100 dev = ice_pf_to_dev(pf); 101 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 102 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 103 104 first = vf->first_vector_idx; 105 last = first + pf->vfs.num_msix_per - 1; 106 for (v = first; v <= last; v++) { 107 u32 reg; 108 109 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 110 GLINT_VECT2FUNC_IS_PF_M) | 111 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 112 GLINT_VECT2FUNC_PF_NUM_M)); 113 wr32(hw, GLINT_VECT2FUNC(v), reg); 114 } 115 116 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 117 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 118 else 119 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 120 121 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 122 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 123 else 124 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 125 } 126 127 /** 128 * ice_sriov_free_msix_res - Reset/free any used MSIX resources 129 * @pf: pointer to the PF structure 130 * 131 * Since no MSIX entries are taken from the pf->irq_tracker then just clear 132 * the pf->sriov_base_vector. 133 * 134 * Returns 0 on success, and -EINVAL on error. 135 */ 136 static int ice_sriov_free_msix_res(struct ice_pf *pf) 137 { 138 struct ice_res_tracker *res; 139 140 if (!pf) 141 return -EINVAL; 142 143 res = pf->irq_tracker; 144 if (!res) 145 return -EINVAL; 146 147 /* give back irq_tracker resources used */ 148 WARN_ON(pf->sriov_base_vector < res->num_entries); 149 150 pf->sriov_base_vector = 0; 151 152 return 0; 153 } 154 155 /** 156 * ice_free_vfs - Free all VFs 157 * @pf: pointer to the PF structure 158 */ 159 void ice_free_vfs(struct ice_pf *pf) 160 { 161 struct device *dev = ice_pf_to_dev(pf); 162 struct ice_vfs *vfs = &pf->vfs; 163 struct ice_hw *hw = &pf->hw; 164 struct ice_vf *vf; 165 unsigned int bkt; 166 167 if (!ice_has_vfs(pf)) 168 return; 169 170 while (test_and_set_bit(ICE_VF_DIS, pf->state)) 171 usleep_range(1000, 2000); 172 173 /* Disable IOV before freeing resources. This lets any VF drivers 174 * running in the host get themselves cleaned up before we yank 175 * the carpet out from underneath their feet. 176 */ 177 if (!pci_vfs_assigned(pf->pdev)) 178 pci_disable_sriov(pf->pdev); 179 else 180 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 181 182 mutex_lock(&vfs->table_lock); 183 184 ice_eswitch_release(pf); 185 186 ice_for_each_vf(pf, bkt, vf) { 187 mutex_lock(&vf->cfg_lock); 188 189 ice_dis_vf_qs(vf); 190 191 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 192 /* disable VF qp mappings and set VF disable state */ 193 ice_dis_vf_mappings(vf); 194 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 195 ice_free_vf_res(vf); 196 } 197 198 if (!pci_vfs_assigned(pf->pdev)) { 199 u32 reg_idx, bit_idx; 200 201 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 202 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 203 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 204 } 205 206 /* clear malicious info since the VF is getting released */ 207 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 208 ICE_MAX_SRIOV_VFS, vf->vf_id)) 209 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 210 vf->vf_id); 211 212 mutex_unlock(&vf->cfg_lock); 213 } 214 215 if (ice_sriov_free_msix_res(pf)) 216 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 217 218 vfs->num_qps_per = 0; 219 ice_free_vf_entries(pf); 220 221 mutex_unlock(&vfs->table_lock); 222 223 clear_bit(ICE_VF_DIS, pf->state); 224 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 225 } 226 227 /** 228 * ice_vf_vsi_setup - Set up a VF VSI 229 * @vf: VF to setup VSI for 230 * 231 * Returns pointer to the successfully allocated VSI struct on success, 232 * otherwise returns NULL on failure. 233 */ 234 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 235 { 236 struct ice_vsi_cfg_params params = {}; 237 struct ice_pf *pf = vf->pf; 238 struct ice_vsi *vsi; 239 240 params.type = ICE_VSI_VF; 241 params.pi = ice_vf_get_port_info(vf); 242 params.vf = vf; 243 params.flags = ICE_VSI_FLAG_INIT; 244 245 vsi = ice_vsi_setup(pf, ¶ms); 246 247 if (!vsi) { 248 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 249 ice_vf_invalidate_vsi(vf); 250 return NULL; 251 } 252 253 vf->lan_vsi_idx = vsi->idx; 254 vf->lan_vsi_num = vsi->vsi_num; 255 256 return vsi; 257 } 258 259 /** 260 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space 261 * @pf: pointer to PF structure 262 * @vf: pointer to VF that the first MSIX vector index is being calculated for 263 * 264 * This returns the first MSIX vector index in PF space that is used by this VF. 265 * This index is used when accessing PF relative registers such as 266 * GLINT_VECT2FUNC and GLINT_DYN_CTL. 267 * This will always be the OICR index in the AVF driver so any functionality 268 * using vf->first_vector_idx for queue configuration will have to increment by 269 * 1 to avoid meddling with the OICR index. 270 */ 271 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) 272 { 273 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; 274 } 275 276 /** 277 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 278 * @vf: VF to enable MSIX mappings for 279 * 280 * Some of the registers need to be indexed/configured using hardware global 281 * device values and other registers need 0-based values, which represent PF 282 * based values. 283 */ 284 static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 285 { 286 int device_based_first_msix, device_based_last_msix; 287 int pf_based_first_msix, pf_based_last_msix, v; 288 struct ice_pf *pf = vf->pf; 289 int device_based_vf_id; 290 struct ice_hw *hw; 291 u32 reg; 292 293 hw = &pf->hw; 294 pf_based_first_msix = vf->first_vector_idx; 295 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; 296 297 device_based_first_msix = pf_based_first_msix + 298 pf->hw.func_caps.common_cap.msix_vector_first_id; 299 device_based_last_msix = 300 (device_based_first_msix + pf->vfs.num_msix_per) - 1; 301 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 302 303 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 304 VPINT_ALLOC_FIRST_M) | 305 ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 306 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 307 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 308 309 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 310 & VPINT_ALLOC_PCI_FIRST_M) | 311 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 312 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 313 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 314 315 /* map the interrupts to its functions */ 316 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 317 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 318 GLINT_VECT2FUNC_VF_NUM_M) | 319 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 320 GLINT_VECT2FUNC_PF_NUM_M)); 321 wr32(hw, GLINT_VECT2FUNC(v), reg); 322 } 323 324 /* Map mailbox interrupt to VF MSI-X vector 0 */ 325 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 326 } 327 328 /** 329 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 330 * @vf: VF to enable the mappings for 331 * @max_txq: max Tx queues allowed on the VF's VSI 332 * @max_rxq: max Rx queues allowed on the VF's VSI 333 */ 334 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 335 { 336 struct device *dev = ice_pf_to_dev(vf->pf); 337 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 338 struct ice_hw *hw = &vf->pf->hw; 339 u32 reg; 340 341 if (WARN_ON(!vsi)) 342 return; 343 344 /* set regardless of mapping mode */ 345 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 346 347 /* VF Tx queues allocation */ 348 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 349 /* set the VF PF Tx queue range 350 * VFNUMQ value should be set to (number of queues - 1). A value 351 * of 0 means 1 queue and a value of 255 means 256 queues 352 */ 353 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 354 VPLAN_TX_QBASE_VFFIRSTQ_M) | 355 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 356 VPLAN_TX_QBASE_VFNUMQ_M)); 357 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 358 } else { 359 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 360 } 361 362 /* set regardless of mapping mode */ 363 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 364 365 /* VF Rx queues allocation */ 366 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 367 /* set the VF PF Rx queue range 368 * VFNUMQ value should be set to (number of queues - 1). A value 369 * of 0 means 1 queue and a value of 255 means 256 queues 370 */ 371 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 372 VPLAN_RX_QBASE_VFFIRSTQ_M) | 373 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 374 VPLAN_RX_QBASE_VFNUMQ_M)); 375 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 376 } else { 377 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 378 } 379 } 380 381 /** 382 * ice_ena_vf_mappings - enable VF MSIX and queue mapping 383 * @vf: pointer to the VF structure 384 */ 385 static void ice_ena_vf_mappings(struct ice_vf *vf) 386 { 387 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 388 389 if (WARN_ON(!vsi)) 390 return; 391 392 ice_ena_vf_msix_mappings(vf); 393 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 394 } 395 396 /** 397 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 398 * @vf: VF to calculate the register index for 399 * @q_vector: a q_vector associated to the VF 400 */ 401 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 402 { 403 struct ice_pf *pf; 404 405 if (!vf || !q_vector) 406 return -EINVAL; 407 408 pf = vf->pf; 409 410 /* always add one to account for the OICR being the first MSIX */ 411 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + 412 q_vector->v_idx + 1; 413 } 414 415 /** 416 * ice_get_max_valid_res_idx - Get the max valid resource index 417 * @res: pointer to the resource to find the max valid index for 418 * 419 * Start from the end of the ice_res_tracker and return right when we find the 420 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only 421 * valid for SR-IOV because it is the only consumer that manipulates the 422 * res->end and this is always called when res->end is set to res->num_entries. 423 */ 424 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) 425 { 426 int i; 427 428 if (!res) 429 return -EINVAL; 430 431 for (i = res->num_entries - 1; i >= 0; i--) 432 if (res->list[i] & ICE_RES_VALID_BIT) 433 return i; 434 435 return 0; 436 } 437 438 /** 439 * ice_sriov_set_msix_res - Set any used MSIX resources 440 * @pf: pointer to PF structure 441 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 442 * 443 * This function allows SR-IOV resources to be taken from the end of the PF's 444 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 445 * just set the pf->sriov_base_vector and return success. 446 * 447 * If there are not enough resources available, return an error. This should 448 * always be caught by ice_set_per_vf_res(). 449 * 450 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 451 * in the PF's space available for SR-IOV. 452 */ 453 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 454 { 455 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 456 int vectors_used = pf->irq_tracker->num_entries; 457 int sriov_base_vector; 458 459 sriov_base_vector = total_vectors - num_msix_needed; 460 461 /* make sure we only grab irq_tracker entries from the list end and 462 * that we have enough available MSIX vectors 463 */ 464 if (sriov_base_vector < vectors_used) 465 return -EINVAL; 466 467 pf->sriov_base_vector = sriov_base_vector; 468 469 return 0; 470 } 471 472 /** 473 * ice_set_per_vf_res - check if vectors and queues are available 474 * @pf: pointer to the PF structure 475 * @num_vfs: the number of SR-IOV VFs being configured 476 * 477 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 478 * get more vectors and can enable more queues per VF. Note that this does not 479 * grab any vectors from the SW pool already allocated. Also note, that all 480 * vector counts include one for each VF's miscellaneous interrupt vector 481 * (i.e. OICR). 482 * 483 * Minimum VFs - 2 vectors, 1 queue pair 484 * Small VFs - 5 vectors, 4 queue pairs 485 * Medium VFs - 17 vectors, 16 queue pairs 486 * 487 * Second, determine number of queue pairs per VF by starting with a pre-defined 488 * maximum each VF supports. If this is not possible, then we adjust based on 489 * queue pairs available on the device. 490 * 491 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 492 * by each VF during VF initialization and reset. 493 */ 494 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 495 { 496 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 497 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 498 int msix_avail_per_vf, msix_avail_for_sriov; 499 struct device *dev = ice_pf_to_dev(pf); 500 int err; 501 502 lockdep_assert_held(&pf->vfs.table_lock); 503 504 if (!num_vfs) 505 return -EINVAL; 506 507 if (max_valid_res_idx < 0) 508 return -ENOSPC; 509 510 /* determine MSI-X resources per VF */ 511 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 512 pf->irq_tracker->num_entries; 513 msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 514 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 515 num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 516 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 517 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 518 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 519 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 520 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 521 num_msix_per_vf = ICE_MIN_INTR_PER_VF; 522 } else { 523 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 524 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 525 num_vfs); 526 return -ENOSPC; 527 } 528 529 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 530 ICE_MAX_RSS_QS_PER_VF); 531 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 532 if (!avail_qs) 533 num_txq = 0; 534 else if (num_txq > avail_qs) 535 num_txq = rounddown_pow_of_two(avail_qs); 536 537 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 538 ICE_MAX_RSS_QS_PER_VF); 539 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 540 if (!avail_qs) 541 num_rxq = 0; 542 else if (num_rxq > avail_qs) 543 num_rxq = rounddown_pow_of_two(avail_qs); 544 545 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 546 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 547 ICE_MIN_QS_PER_VF, num_vfs); 548 return -ENOSPC; 549 } 550 551 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); 552 if (err) { 553 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", 554 num_vfs, err); 555 return err; 556 } 557 558 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 559 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 560 pf->vfs.num_msix_per = num_msix_per_vf; 561 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 562 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 563 564 return 0; 565 } 566 567 /** 568 * ice_init_vf_vsi_res - initialize/setup VF VSI resources 569 * @vf: VF to initialize/setup the VSI for 570 * 571 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 572 * VF VSI's broadcast filter and is only used during initial VF creation. 573 */ 574 static int ice_init_vf_vsi_res(struct ice_vf *vf) 575 { 576 struct ice_pf *pf = vf->pf; 577 struct ice_vsi *vsi; 578 int err; 579 580 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); 581 582 vsi = ice_vf_vsi_setup(vf); 583 if (!vsi) 584 return -ENOMEM; 585 586 err = ice_vf_init_host_cfg(vf, vsi); 587 if (err) 588 goto release_vsi; 589 590 return 0; 591 592 release_vsi: 593 ice_vf_vsi_release(vf); 594 return err; 595 } 596 597 /** 598 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 599 * @pf: PF the VFs are associated with 600 */ 601 static int ice_start_vfs(struct ice_pf *pf) 602 { 603 struct ice_hw *hw = &pf->hw; 604 unsigned int bkt, it_cnt; 605 struct ice_vf *vf; 606 int retval; 607 608 lockdep_assert_held(&pf->vfs.table_lock); 609 610 it_cnt = 0; 611 ice_for_each_vf(pf, bkt, vf) { 612 vf->vf_ops->clear_reset_trigger(vf); 613 614 retval = ice_init_vf_vsi_res(vf); 615 if (retval) { 616 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 617 vf->vf_id, retval); 618 goto teardown; 619 } 620 621 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 622 ice_ena_vf_mappings(vf); 623 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 624 it_cnt++; 625 } 626 627 ice_flush(hw); 628 return 0; 629 630 teardown: 631 ice_for_each_vf(pf, bkt, vf) { 632 if (it_cnt == 0) 633 break; 634 635 ice_dis_vf_mappings(vf); 636 ice_vf_vsi_release(vf); 637 it_cnt--; 638 } 639 640 return retval; 641 } 642 643 /** 644 * ice_sriov_free_vf - Free VF memory after all references are dropped 645 * @vf: pointer to VF to free 646 * 647 * Called by ice_put_vf through ice_release_vf once the last reference to a VF 648 * structure has been dropped. 649 */ 650 static void ice_sriov_free_vf(struct ice_vf *vf) 651 { 652 mutex_destroy(&vf->cfg_lock); 653 654 kfree_rcu(vf, rcu); 655 } 656 657 /** 658 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers 659 * @vf: the vf to configure 660 */ 661 static void ice_sriov_clear_mbx_register(struct ice_vf *vf) 662 { 663 struct ice_pf *pf = vf->pf; 664 665 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); 666 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); 667 } 668 669 /** 670 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF 671 * @vf: pointer to VF structure 672 * @is_vflr: true if reset occurred due to VFLR 673 * 674 * Trigger and cleanup after a VF reset for a SR-IOV VF. 675 */ 676 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) 677 { 678 struct ice_pf *pf = vf->pf; 679 u32 reg, reg_idx, bit_idx; 680 unsigned int vf_abs_id, i; 681 struct device *dev; 682 struct ice_hw *hw; 683 684 dev = ice_pf_to_dev(pf); 685 hw = &pf->hw; 686 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 687 688 /* In the case of a VFLR, HW has already reset the VF and we just need 689 * to clean up. Otherwise we must first trigger the reset using the 690 * VFRTRIG register. 691 */ 692 if (!is_vflr) { 693 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 694 reg |= VPGEN_VFRTRIG_VFSWR_M; 695 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 696 } 697 698 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 699 reg_idx = (vf_abs_id) / 32; 700 bit_idx = (vf_abs_id) % 32; 701 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 702 ice_flush(hw); 703 704 wr32(hw, PF_PCI_CIAA, 705 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 706 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 707 reg = rd32(hw, PF_PCI_CIAD); 708 /* no transactions pending so stop polling */ 709 if ((reg & VF_TRANS_PENDING_M) == 0) 710 break; 711 712 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 713 udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 714 } 715 } 716 717 /** 718 * ice_sriov_poll_reset_status - poll SRIOV VF reset status 719 * @vf: pointer to VF structure 720 * 721 * Returns true when reset is successful, else returns false 722 */ 723 static bool ice_sriov_poll_reset_status(struct ice_vf *vf) 724 { 725 struct ice_pf *pf = vf->pf; 726 unsigned int i; 727 u32 reg; 728 729 for (i = 0; i < 10; i++) { 730 /* VF reset requires driver to first reset the VF and then 731 * poll the status register to make sure that the reset 732 * completed successfully. 733 */ 734 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 735 if (reg & VPGEN_VFRSTAT_VFRD_M) 736 return true; 737 738 /* only sleep if the reset is not done */ 739 usleep_range(10, 20); 740 } 741 return false; 742 } 743 744 /** 745 * ice_sriov_clear_reset_trigger - enable VF to access hardware 746 * @vf: VF to enabled hardware access for 747 */ 748 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) 749 { 750 struct ice_hw *hw = &vf->pf->hw; 751 u32 reg; 752 753 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 754 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 755 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 756 ice_flush(hw); 757 } 758 759 /** 760 * ice_sriov_create_vsi - Create a new VSI for a VF 761 * @vf: VF to create the VSI for 762 * 763 * This is called by ice_vf_recreate_vsi to create the new VSI after the old 764 * VSI has been released. 765 */ 766 static int ice_sriov_create_vsi(struct ice_vf *vf) 767 { 768 struct ice_vsi *vsi; 769 770 vsi = ice_vf_vsi_setup(vf); 771 if (!vsi) 772 return -ENOMEM; 773 774 return 0; 775 } 776 777 /** 778 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 779 * @vf: VF to perform tasks on 780 */ 781 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) 782 { 783 ice_ena_vf_mappings(vf); 784 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 785 } 786 787 static const struct ice_vf_ops ice_sriov_vf_ops = { 788 .reset_type = ICE_VF_RESET, 789 .free = ice_sriov_free_vf, 790 .clear_mbx_register = ice_sriov_clear_mbx_register, 791 .trigger_reset_register = ice_sriov_trigger_reset_register, 792 .poll_reset_status = ice_sriov_poll_reset_status, 793 .clear_reset_trigger = ice_sriov_clear_reset_trigger, 794 .create_vsi = ice_sriov_create_vsi, 795 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, 796 }; 797 798 /** 799 * ice_create_vf_entries - Allocate and insert VF entries 800 * @pf: pointer to the PF structure 801 * @num_vfs: the number of VFs to allocate 802 * 803 * Allocate new VF entries and insert them into the hash table. Set some 804 * basic default fields for initializing the new VFs. 805 * 806 * After this function exits, the hash table will have num_vfs entries 807 * inserted. 808 * 809 * Returns 0 on success or an integer error code on failure. 810 */ 811 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 812 { 813 struct ice_vfs *vfs = &pf->vfs; 814 struct ice_vf *vf; 815 u16 vf_id; 816 int err; 817 818 lockdep_assert_held(&vfs->table_lock); 819 820 for (vf_id = 0; vf_id < num_vfs; vf_id++) { 821 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 822 if (!vf) { 823 err = -ENOMEM; 824 goto err_free_entries; 825 } 826 kref_init(&vf->refcnt); 827 828 vf->pf = pf; 829 vf->vf_id = vf_id; 830 831 /* set sriov vf ops for VFs created during SRIOV flow */ 832 vf->vf_ops = &ice_sriov_vf_ops; 833 834 ice_initialize_vf_entry(vf); 835 836 vf->vf_sw_id = pf->first_sw; 837 838 hash_add_rcu(vfs->table, &vf->entry, vf_id); 839 } 840 841 return 0; 842 843 err_free_entries: 844 ice_free_vf_entries(pf); 845 return err; 846 } 847 848 /** 849 * ice_ena_vfs - enable VFs so they are ready to be used 850 * @pf: pointer to the PF structure 851 * @num_vfs: number of VFs to enable 852 */ 853 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 854 { 855 struct device *dev = ice_pf_to_dev(pf); 856 struct ice_hw *hw = &pf->hw; 857 int ret; 858 859 /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 860 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 861 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 862 set_bit(ICE_OICR_INTR_DIS, pf->state); 863 ice_flush(hw); 864 865 ret = pci_enable_sriov(pf->pdev, num_vfs); 866 if (ret) 867 goto err_unroll_intr; 868 869 mutex_lock(&pf->vfs.table_lock); 870 871 ret = ice_set_per_vf_res(pf, num_vfs); 872 if (ret) { 873 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", 874 num_vfs, ret); 875 goto err_unroll_sriov; 876 } 877 878 ret = ice_create_vf_entries(pf, num_vfs); 879 if (ret) { 880 dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 881 num_vfs); 882 goto err_unroll_sriov; 883 } 884 885 ret = ice_start_vfs(pf); 886 if (ret) { 887 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); 888 ret = -EAGAIN; 889 goto err_unroll_vf_entries; 890 } 891 892 clear_bit(ICE_VF_DIS, pf->state); 893 894 ret = ice_eswitch_configure(pf); 895 if (ret) { 896 dev_err(dev, "Failed to configure eswitch, err %d\n", ret); 897 goto err_unroll_sriov; 898 } 899 900 /* rearm global interrupts */ 901 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 902 ice_irq_dynamic_ena(hw, NULL, NULL); 903 904 mutex_unlock(&pf->vfs.table_lock); 905 906 return 0; 907 908 err_unroll_vf_entries: 909 ice_free_vf_entries(pf); 910 err_unroll_sriov: 911 mutex_unlock(&pf->vfs.table_lock); 912 pci_disable_sriov(pf->pdev); 913 err_unroll_intr: 914 /* rearm interrupts here */ 915 ice_irq_dynamic_ena(hw, NULL, NULL); 916 clear_bit(ICE_OICR_INTR_DIS, pf->state); 917 return ret; 918 } 919 920 /** 921 * ice_pci_sriov_ena - Enable or change number of VFs 922 * @pf: pointer to the PF structure 923 * @num_vfs: number of VFs to allocate 924 * 925 * Returns 0 on success and negative on failure 926 */ 927 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 928 { 929 int pre_existing_vfs = pci_num_vf(pf->pdev); 930 struct device *dev = ice_pf_to_dev(pf); 931 int err; 932 933 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 934 ice_free_vfs(pf); 935 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 936 return 0; 937 938 if (num_vfs > pf->vfs.num_supported) { 939 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 940 num_vfs, pf->vfs.num_supported); 941 return -EOPNOTSUPP; 942 } 943 944 dev_info(dev, "Enabling %d VFs\n", num_vfs); 945 err = ice_ena_vfs(pf, num_vfs); 946 if (err) { 947 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 948 return err; 949 } 950 951 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 952 return 0; 953 } 954 955 /** 956 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 957 * @pf: PF to enabled SR-IOV on 958 */ 959 static int ice_check_sriov_allowed(struct ice_pf *pf) 960 { 961 struct device *dev = ice_pf_to_dev(pf); 962 963 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 964 dev_err(dev, "This device is not capable of SR-IOV\n"); 965 return -EOPNOTSUPP; 966 } 967 968 if (ice_is_safe_mode(pf)) { 969 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 970 return -EOPNOTSUPP; 971 } 972 973 if (!ice_pf_state_is_nominal(pf)) { 974 dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 975 return -EBUSY; 976 } 977 978 return 0; 979 } 980 981 /** 982 * ice_sriov_configure - Enable or change number of VFs via sysfs 983 * @pdev: pointer to a pci_dev structure 984 * @num_vfs: number of VFs to allocate or 0 to free VFs 985 * 986 * This function is called when the user updates the number of VFs in sysfs. On 987 * success return whatever num_vfs was set to by the caller. Return negative on 988 * failure. 989 */ 990 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 991 { 992 struct ice_pf *pf = pci_get_drvdata(pdev); 993 struct device *dev = ice_pf_to_dev(pf); 994 int err; 995 996 err = ice_check_sriov_allowed(pf); 997 if (err) 998 return err; 999 1000 if (!num_vfs) { 1001 if (!pci_vfs_assigned(pdev)) { 1002 ice_free_vfs(pf); 1003 ice_mbx_deinit_snapshot(&pf->hw); 1004 if (pf->lag) 1005 ice_enable_lag(pf->lag); 1006 return 0; 1007 } 1008 1009 dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 1010 return -EBUSY; 1011 } 1012 1013 err = ice_mbx_init_snapshot(&pf->hw, num_vfs); 1014 if (err) 1015 return err; 1016 1017 err = ice_pci_sriov_ena(pf, num_vfs); 1018 if (err) { 1019 ice_mbx_deinit_snapshot(&pf->hw); 1020 return err; 1021 } 1022 1023 if (pf->lag) 1024 ice_disable_lag(pf->lag); 1025 return num_vfs; 1026 } 1027 1028 /** 1029 * ice_process_vflr_event - Free VF resources via IRQ calls 1030 * @pf: pointer to the PF structure 1031 * 1032 * called from the VFLR IRQ handler to 1033 * free up VF resources and state variables 1034 */ 1035 void ice_process_vflr_event(struct ice_pf *pf) 1036 { 1037 struct ice_hw *hw = &pf->hw; 1038 struct ice_vf *vf; 1039 unsigned int bkt; 1040 u32 reg; 1041 1042 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 1043 !ice_has_vfs(pf)) 1044 return; 1045 1046 mutex_lock(&pf->vfs.table_lock); 1047 ice_for_each_vf(pf, bkt, vf) { 1048 u32 reg_idx, bit_idx; 1049 1050 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1051 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1052 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1053 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 1054 if (reg & BIT(bit_idx)) 1055 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 1056 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 1057 } 1058 mutex_unlock(&pf->vfs.table_lock); 1059 } 1060 1061 /** 1062 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 1063 * @pf: PF used to index all VFs 1064 * @pfq: queue index relative to the PF's function space 1065 * 1066 * If no VF is found who owns the pfq then return NULL, otherwise return a 1067 * pointer to the VF who owns the pfq 1068 * 1069 * If this function returns non-NULL, it acquires a reference count of the VF 1070 * structure. The caller is responsible for calling ice_put_vf() to drop this 1071 * reference. 1072 */ 1073 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 1074 { 1075 struct ice_vf *vf; 1076 unsigned int bkt; 1077 1078 rcu_read_lock(); 1079 ice_for_each_vf_rcu(pf, bkt, vf) { 1080 struct ice_vsi *vsi; 1081 u16 rxq_idx; 1082 1083 vsi = ice_get_vf_vsi(vf); 1084 if (!vsi) 1085 continue; 1086 1087 ice_for_each_rxq(vsi, rxq_idx) 1088 if (vsi->rxq_map[rxq_idx] == pfq) { 1089 struct ice_vf *found; 1090 1091 if (kref_get_unless_zero(&vf->refcnt)) 1092 found = vf; 1093 else 1094 found = NULL; 1095 rcu_read_unlock(); 1096 return found; 1097 } 1098 } 1099 rcu_read_unlock(); 1100 1101 return NULL; 1102 } 1103 1104 /** 1105 * ice_globalq_to_pfq - convert from global queue index to PF space queue index 1106 * @pf: PF used for conversion 1107 * @globalq: global queue index used to convert to PF space queue index 1108 */ 1109 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 1110 { 1111 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 1112 } 1113 1114 /** 1115 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 1116 * @pf: PF that the LAN overflow event happened on 1117 * @event: structure holding the event information for the LAN overflow event 1118 * 1119 * Determine if the LAN overflow event was caused by a VF queue. If it was not 1120 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 1121 * reset on the offending VF. 1122 */ 1123 void 1124 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1125 { 1126 u32 gldcb_rtctq, queue; 1127 struct ice_vf *vf; 1128 1129 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 1130 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 1131 1132 /* event returns device global Rx queue number */ 1133 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 1134 GLDCB_RTCTQ_RXQNUM_S; 1135 1136 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 1137 if (!vf) 1138 return; 1139 1140 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1141 ice_put_vf(vf); 1142 } 1143 1144 /** 1145 * ice_set_vf_spoofchk 1146 * @netdev: network interface device structure 1147 * @vf_id: VF identifier 1148 * @ena: flag to enable or disable feature 1149 * 1150 * Enable or disable VF spoof checking 1151 */ 1152 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 1153 { 1154 struct ice_netdev_priv *np = netdev_priv(netdev); 1155 struct ice_pf *pf = np->vsi->back; 1156 struct ice_vsi *vf_vsi; 1157 struct device *dev; 1158 struct ice_vf *vf; 1159 int ret; 1160 1161 dev = ice_pf_to_dev(pf); 1162 1163 vf = ice_get_vf_by_id(pf, vf_id); 1164 if (!vf) 1165 return -EINVAL; 1166 1167 ret = ice_check_vf_ready_for_cfg(vf); 1168 if (ret) 1169 goto out_put_vf; 1170 1171 vf_vsi = ice_get_vf_vsi(vf); 1172 if (!vf_vsi) { 1173 netdev_err(netdev, "VSI %d for VF %d is null\n", 1174 vf->lan_vsi_idx, vf->vf_id); 1175 ret = -EINVAL; 1176 goto out_put_vf; 1177 } 1178 1179 if (vf_vsi->type != ICE_VSI_VF) { 1180 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 1181 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 1182 ret = -ENODEV; 1183 goto out_put_vf; 1184 } 1185 1186 if (ena == vf->spoofchk) { 1187 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 1188 ret = 0; 1189 goto out_put_vf; 1190 } 1191 1192 ret = ice_vsi_apply_spoofchk(vf_vsi, ena); 1193 if (ret) 1194 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 1195 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 1196 else 1197 vf->spoofchk = ena; 1198 1199 out_put_vf: 1200 ice_put_vf(vf); 1201 return ret; 1202 } 1203 1204 /** 1205 * ice_get_vf_cfg 1206 * @netdev: network interface device structure 1207 * @vf_id: VF identifier 1208 * @ivi: VF configuration structure 1209 * 1210 * return VF configuration 1211 */ 1212 int 1213 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 1214 { 1215 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1216 struct ice_vf *vf; 1217 int ret; 1218 1219 vf = ice_get_vf_by_id(pf, vf_id); 1220 if (!vf) 1221 return -EINVAL; 1222 1223 ret = ice_check_vf_ready_for_cfg(vf); 1224 if (ret) 1225 goto out_put_vf; 1226 1227 ivi->vf = vf_id; 1228 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); 1229 1230 /* VF configuration for VLAN and applicable QoS */ 1231 ivi->vlan = ice_vf_get_port_vlan_id(vf); 1232 ivi->qos = ice_vf_get_port_vlan_prio(vf); 1233 if (ice_vf_is_port_vlan_ena(vf)) 1234 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 1235 1236 ivi->trusted = vf->trusted; 1237 ivi->spoofchk = vf->spoofchk; 1238 if (!vf->link_forced) 1239 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 1240 else if (vf->link_up) 1241 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1242 else 1243 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 1244 ivi->max_tx_rate = vf->max_tx_rate; 1245 ivi->min_tx_rate = vf->min_tx_rate; 1246 1247 out_put_vf: 1248 ice_put_vf(vf); 1249 return ret; 1250 } 1251 1252 /** 1253 * ice_set_vf_mac 1254 * @netdev: network interface device structure 1255 * @vf_id: VF identifier 1256 * @mac: MAC address 1257 * 1258 * program VF MAC address 1259 */ 1260 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1261 { 1262 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1263 struct ice_vf *vf; 1264 int ret; 1265 1266 if (is_multicast_ether_addr(mac)) { 1267 netdev_err(netdev, "%pM not a valid unicast address\n", mac); 1268 return -EINVAL; 1269 } 1270 1271 vf = ice_get_vf_by_id(pf, vf_id); 1272 if (!vf) 1273 return -EINVAL; 1274 1275 /* nothing left to do, unicast MAC already set */ 1276 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && 1277 ether_addr_equal(vf->hw_lan_addr.addr, mac)) { 1278 ret = 0; 1279 goto out_put_vf; 1280 } 1281 1282 ret = ice_check_vf_ready_for_cfg(vf); 1283 if (ret) 1284 goto out_put_vf; 1285 1286 mutex_lock(&vf->cfg_lock); 1287 1288 /* VF is notified of its new MAC via the PF's response to the 1289 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 1290 */ 1291 ether_addr_copy(vf->dev_lan_addr.addr, mac); 1292 ether_addr_copy(vf->hw_lan_addr.addr, mac); 1293 if (is_zero_ether_addr(mac)) { 1294 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 1295 vf->pf_set_mac = false; 1296 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 1297 vf->vf_id); 1298 } else { 1299 /* PF will add MAC rule for the VF */ 1300 vf->pf_set_mac = true; 1301 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 1302 mac, vf_id); 1303 } 1304 1305 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1306 mutex_unlock(&vf->cfg_lock); 1307 1308 out_put_vf: 1309 ice_put_vf(vf); 1310 return ret; 1311 } 1312 1313 /** 1314 * ice_set_vf_trust 1315 * @netdev: network interface device structure 1316 * @vf_id: VF identifier 1317 * @trusted: Boolean value to enable/disable trusted VF 1318 * 1319 * Enable or disable a given VF as trusted 1320 */ 1321 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 1322 { 1323 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1324 struct ice_vf *vf; 1325 int ret; 1326 1327 if (ice_is_eswitch_mode_switchdev(pf)) { 1328 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 1329 return -EOPNOTSUPP; 1330 } 1331 1332 vf = ice_get_vf_by_id(pf, vf_id); 1333 if (!vf) 1334 return -EINVAL; 1335 1336 ret = ice_check_vf_ready_for_cfg(vf); 1337 if (ret) 1338 goto out_put_vf; 1339 1340 /* Check if already trusted */ 1341 if (trusted == vf->trusted) { 1342 ret = 0; 1343 goto out_put_vf; 1344 } 1345 1346 mutex_lock(&vf->cfg_lock); 1347 1348 vf->trusted = trusted; 1349 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1350 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 1351 vf_id, trusted ? "" : "un"); 1352 1353 mutex_unlock(&vf->cfg_lock); 1354 1355 out_put_vf: 1356 ice_put_vf(vf); 1357 return ret; 1358 } 1359 1360 /** 1361 * ice_set_vf_link_state 1362 * @netdev: network interface device structure 1363 * @vf_id: VF identifier 1364 * @link_state: required link state 1365 * 1366 * Set VF's link state, irrespective of physical link state status 1367 */ 1368 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 1369 { 1370 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1371 struct ice_vf *vf; 1372 int ret; 1373 1374 vf = ice_get_vf_by_id(pf, vf_id); 1375 if (!vf) 1376 return -EINVAL; 1377 1378 ret = ice_check_vf_ready_for_cfg(vf); 1379 if (ret) 1380 goto out_put_vf; 1381 1382 switch (link_state) { 1383 case IFLA_VF_LINK_STATE_AUTO: 1384 vf->link_forced = false; 1385 break; 1386 case IFLA_VF_LINK_STATE_ENABLE: 1387 vf->link_forced = true; 1388 vf->link_up = true; 1389 break; 1390 case IFLA_VF_LINK_STATE_DISABLE: 1391 vf->link_forced = true; 1392 vf->link_up = false; 1393 break; 1394 default: 1395 ret = -EINVAL; 1396 goto out_put_vf; 1397 } 1398 1399 ice_vc_notify_vf_link_state(vf); 1400 1401 out_put_vf: 1402 ice_put_vf(vf); 1403 return ret; 1404 } 1405 1406 /** 1407 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 1408 * @pf: PF associated with VFs 1409 */ 1410 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 1411 { 1412 struct ice_vf *vf; 1413 unsigned int bkt; 1414 int rate = 0; 1415 1416 rcu_read_lock(); 1417 ice_for_each_vf_rcu(pf, bkt, vf) 1418 rate += vf->min_tx_rate; 1419 rcu_read_unlock(); 1420 1421 return rate; 1422 } 1423 1424 /** 1425 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 1426 * @vf: VF trying to configure min_tx_rate 1427 * @min_tx_rate: min Tx rate in Mbps 1428 * 1429 * Check if the min_tx_rate being passed in will cause oversubscription of total 1430 * min_tx_rate based on the current link speed and all other VFs configured 1431 * min_tx_rate 1432 * 1433 * Return true if the passed min_tx_rate would cause oversubscription, else 1434 * return false 1435 */ 1436 static bool 1437 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 1438 { 1439 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1440 int all_vfs_min_tx_rate; 1441 int link_speed_mbps; 1442 1443 if (WARN_ON(!vsi)) 1444 return false; 1445 1446 link_speed_mbps = ice_get_link_speed_mbps(vsi); 1447 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 1448 1449 /* this VF's previous rate is being overwritten */ 1450 all_vfs_min_tx_rate -= vf->min_tx_rate; 1451 1452 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 1453 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 1454 min_tx_rate, vf->vf_id, 1455 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 1456 link_speed_mbps); 1457 return true; 1458 } 1459 1460 return false; 1461 } 1462 1463 /** 1464 * ice_set_vf_bw - set min/max VF bandwidth 1465 * @netdev: network interface device structure 1466 * @vf_id: VF identifier 1467 * @min_tx_rate: Minimum Tx rate in Mbps 1468 * @max_tx_rate: Maximum Tx rate in Mbps 1469 */ 1470 int 1471 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 1472 int max_tx_rate) 1473 { 1474 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1475 struct ice_vsi *vsi; 1476 struct device *dev; 1477 struct ice_vf *vf; 1478 int ret; 1479 1480 dev = ice_pf_to_dev(pf); 1481 1482 vf = ice_get_vf_by_id(pf, vf_id); 1483 if (!vf) 1484 return -EINVAL; 1485 1486 ret = ice_check_vf_ready_for_cfg(vf); 1487 if (ret) 1488 goto out_put_vf; 1489 1490 vsi = ice_get_vf_vsi(vf); 1491 if (!vsi) { 1492 ret = -EINVAL; 1493 goto out_put_vf; 1494 } 1495 1496 if (min_tx_rate && ice_is_dcb_active(pf)) { 1497 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 1498 ret = -EOPNOTSUPP; 1499 goto out_put_vf; 1500 } 1501 1502 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 1503 ret = -EINVAL; 1504 goto out_put_vf; 1505 } 1506 1507 if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 1508 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 1509 if (ret) { 1510 dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 1511 vf->vf_id); 1512 goto out_put_vf; 1513 } 1514 1515 vf->min_tx_rate = min_tx_rate; 1516 } 1517 1518 if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 1519 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 1520 if (ret) { 1521 dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 1522 vf->vf_id); 1523 goto out_put_vf; 1524 } 1525 1526 vf->max_tx_rate = max_tx_rate; 1527 } 1528 1529 out_put_vf: 1530 ice_put_vf(vf); 1531 return ret; 1532 } 1533 1534 /** 1535 * ice_get_vf_stats - populate some stats for the VF 1536 * @netdev: the netdev of the PF 1537 * @vf_id: the host OS identifier (0-255) 1538 * @vf_stats: pointer to the OS memory to be initialized 1539 */ 1540 int ice_get_vf_stats(struct net_device *netdev, int vf_id, 1541 struct ifla_vf_stats *vf_stats) 1542 { 1543 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1544 struct ice_eth_stats *stats; 1545 struct ice_vsi *vsi; 1546 struct ice_vf *vf; 1547 int ret; 1548 1549 vf = ice_get_vf_by_id(pf, vf_id); 1550 if (!vf) 1551 return -EINVAL; 1552 1553 ret = ice_check_vf_ready_for_cfg(vf); 1554 if (ret) 1555 goto out_put_vf; 1556 1557 vsi = ice_get_vf_vsi(vf); 1558 if (!vsi) { 1559 ret = -EINVAL; 1560 goto out_put_vf; 1561 } 1562 1563 ice_update_eth_stats(vsi); 1564 stats = &vsi->eth_stats; 1565 1566 memset(vf_stats, 0, sizeof(*vf_stats)); 1567 1568 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 1569 stats->rx_multicast; 1570 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 1571 stats->tx_multicast; 1572 vf_stats->rx_bytes = stats->rx_bytes; 1573 vf_stats->tx_bytes = stats->tx_bytes; 1574 vf_stats->broadcast = stats->rx_broadcast; 1575 vf_stats->multicast = stats->rx_multicast; 1576 vf_stats->rx_dropped = stats->rx_discards; 1577 vf_stats->tx_dropped = stats->tx_discards; 1578 1579 out_put_vf: 1580 ice_put_vf(vf); 1581 return ret; 1582 } 1583 1584 /** 1585 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 1586 * @hw: hardware structure used to check the VLAN mode 1587 * @vlan_proto: VLAN TPID being checked 1588 * 1589 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 1590 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 1591 * Mode (SVM), then only ETH_P_8021Q is supported. 1592 */ 1593 static bool 1594 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 1595 { 1596 bool is_supported = false; 1597 1598 switch (vlan_proto) { 1599 case ETH_P_8021Q: 1600 is_supported = true; 1601 break; 1602 case ETH_P_8021AD: 1603 if (ice_is_dvm_ena(hw)) 1604 is_supported = true; 1605 break; 1606 } 1607 1608 return is_supported; 1609 } 1610 1611 /** 1612 * ice_set_vf_port_vlan 1613 * @netdev: network interface device structure 1614 * @vf_id: VF identifier 1615 * @vlan_id: VLAN ID being set 1616 * @qos: priority setting 1617 * @vlan_proto: VLAN protocol 1618 * 1619 * program VF Port VLAN ID and/or QoS 1620 */ 1621 int 1622 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 1623 __be16 vlan_proto) 1624 { 1625 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1626 u16 local_vlan_proto = ntohs(vlan_proto); 1627 struct device *dev; 1628 struct ice_vf *vf; 1629 int ret; 1630 1631 dev = ice_pf_to_dev(pf); 1632 1633 if (vlan_id >= VLAN_N_VID || qos > 7) { 1634 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 1635 vf_id, vlan_id, qos); 1636 return -EINVAL; 1637 } 1638 1639 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 1640 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 1641 local_vlan_proto); 1642 return -EPROTONOSUPPORT; 1643 } 1644 1645 vf = ice_get_vf_by_id(pf, vf_id); 1646 if (!vf) 1647 return -EINVAL; 1648 1649 ret = ice_check_vf_ready_for_cfg(vf); 1650 if (ret) 1651 goto out_put_vf; 1652 1653 if (ice_vf_get_port_vlan_prio(vf) == qos && 1654 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 1655 ice_vf_get_port_vlan_id(vf) == vlan_id) { 1656 /* duplicate request, so just return success */ 1657 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 1658 vlan_id, qos, local_vlan_proto); 1659 ret = 0; 1660 goto out_put_vf; 1661 } 1662 1663 mutex_lock(&vf->cfg_lock); 1664 1665 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 1666 if (ice_vf_is_port_vlan_ena(vf)) 1667 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 1668 vlan_id, qos, local_vlan_proto, vf_id); 1669 else 1670 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 1671 1672 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1673 mutex_unlock(&vf->cfg_lock); 1674 1675 out_put_vf: 1676 ice_put_vf(vf); 1677 return ret; 1678 } 1679 1680 /** 1681 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 1682 * @vf: pointer to the VF structure 1683 */ 1684 void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 1685 { 1686 struct ice_pf *pf = vf->pf; 1687 struct device *dev; 1688 1689 dev = ice_pf_to_dev(pf); 1690 1691 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 1692 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 1693 vf->dev_lan_addr.addr, 1694 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 1695 ? "on" : "off"); 1696 } 1697 1698 /** 1699 * ice_print_vfs_mdd_events - print VFs malicious driver detect event 1700 * @pf: pointer to the PF structure 1701 * 1702 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 1703 */ 1704 void ice_print_vfs_mdd_events(struct ice_pf *pf) 1705 { 1706 struct device *dev = ice_pf_to_dev(pf); 1707 struct ice_hw *hw = &pf->hw; 1708 struct ice_vf *vf; 1709 unsigned int bkt; 1710 1711 /* check that there are pending MDD events to print */ 1712 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 1713 return; 1714 1715 /* VF MDD event logs are rate limited to one second intervals */ 1716 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 1717 return; 1718 1719 pf->vfs.last_printed_mdd_jiffies = jiffies; 1720 1721 mutex_lock(&pf->vfs.table_lock); 1722 ice_for_each_vf(pf, bkt, vf) { 1723 /* only print Rx MDD event message if there are new events */ 1724 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 1725 vf->mdd_rx_events.last_printed = 1726 vf->mdd_rx_events.count; 1727 ice_print_vf_rx_mdd_event(vf); 1728 } 1729 1730 /* only print Tx MDD event message if there are new events */ 1731 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 1732 vf->mdd_tx_events.last_printed = 1733 vf->mdd_tx_events.count; 1734 1735 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 1736 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 1737 vf->dev_lan_addr.addr); 1738 } 1739 } 1740 mutex_unlock(&pf->vfs.table_lock); 1741 } 1742 1743 /** 1744 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 1745 * @pdev: pointer to a pci_dev structure 1746 * 1747 * Called when recovering from a PF FLR to restore interrupt capability to 1748 * the VFs. 1749 */ 1750 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) 1751 { 1752 u16 vf_id; 1753 int pos; 1754 1755 if (!pci_num_vf(pdev)) 1756 return; 1757 1758 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1759 if (pos) { 1760 struct pci_dev *vfdev; 1761 1762 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, 1763 &vf_id); 1764 vfdev = pci_get_device(pdev->vendor, vf_id, NULL); 1765 while (vfdev) { 1766 if (vfdev->is_virtfn && vfdev->physfn == pdev) 1767 pci_restore_msi_state(vfdev); 1768 vfdev = pci_get_device(pdev->vendor, vf_id, 1769 vfdev); 1770 } 1771 } 1772 } 1773 1774 /** 1775 * ice_is_malicious_vf - helper function to detect a malicious VF 1776 * @pf: ptr to struct ice_pf 1777 * @event: pointer to the AQ event 1778 * @num_msg_proc: the number of messages processed so far 1779 * @num_msg_pending: the number of messages peinding in admin queue 1780 */ 1781 bool 1782 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, 1783 u16 num_msg_proc, u16 num_msg_pending) 1784 { 1785 s16 vf_id = le16_to_cpu(event->desc.retval); 1786 struct device *dev = ice_pf_to_dev(pf); 1787 struct ice_mbx_data mbxdata; 1788 bool malvf = false; 1789 struct ice_vf *vf; 1790 int status; 1791 1792 vf = ice_get_vf_by_id(pf, vf_id); 1793 if (!vf) 1794 return false; 1795 1796 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 1797 goto out_put_vf; 1798 1799 mbxdata.num_msg_proc = num_msg_proc; 1800 mbxdata.num_pending_arq = num_msg_pending; 1801 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; 1802 #define ICE_MBX_OVERFLOW_WATERMARK 64 1803 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; 1804 1805 /* check to see if we have a malicious VF */ 1806 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); 1807 if (status) 1808 goto out_put_vf; 1809 1810 if (malvf) { 1811 bool report_vf = false; 1812 1813 /* if the VF is malicious and we haven't let the user 1814 * know about it, then let them know now 1815 */ 1816 status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, 1817 ICE_MAX_SRIOV_VFS, vf_id, 1818 &report_vf); 1819 if (status) 1820 dev_dbg(dev, "Error reporting malicious VF\n"); 1821 1822 if (report_vf) { 1823 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 1824 1825 if (pf_vsi) 1826 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", 1827 &vf->dev_lan_addr.addr[0], 1828 pf_vsi->netdev->dev_addr); 1829 } 1830 } 1831 1832 out_put_vf: 1833 ice_put_vf(vf); 1834 return malvf; 1835 } 1836