1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_vf_lib_private.h" 6 #include "ice_base.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_flow.h" 11 #include "ice_eswitch.h" 12 #include "ice_virtchnl_allowlist.h" 13 #include "ice_flex_pipe.h" 14 #include "ice_vf_vsi_vlan_ops.h" 15 #include "ice_vlan.h" 16 17 /** 18 * ice_free_vf_entries - Free all VF entries from the hash table 19 * @pf: pointer to the PF structure 20 * 21 * Iterate over the VF hash table, removing and releasing all VF entries. 22 * Called during VF teardown or as cleanup during failed VF initialization. 23 */ 24 static void ice_free_vf_entries(struct ice_pf *pf) 25 { 26 struct ice_vfs *vfs = &pf->vfs; 27 struct hlist_node *tmp; 28 struct ice_vf *vf; 29 unsigned int bkt; 30 31 /* Remove all VFs from the hash table and release their main 32 * reference. Once all references to the VF are dropped, ice_put_vf() 33 * will call ice_release_vf which will remove the VF memory. 34 */ 35 lockdep_assert_held(&vfs->table_lock); 36 37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 38 hash_del_rcu(&vf->entry); 39 ice_put_vf(vf); 40 } 41 } 42 43 /** 44 * ice_free_vf_res - Free a VF's resources 45 * @vf: pointer to the VF info 46 */ 47 static void ice_free_vf_res(struct ice_vf *vf) 48 { 49 struct ice_pf *pf = vf->pf; 50 int i, last_vector_idx; 51 52 /* First, disable VF's configuration API to prevent OS from 53 * accessing the VF's VSI after it's freed or invalidated. 54 */ 55 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 56 ice_vf_fdir_exit(vf); 57 /* free VF control VSI */ 58 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 59 ice_vf_ctrl_vsi_release(vf); 60 61 /* free VSI and disconnect it from the parent uplink */ 62 if (vf->lan_vsi_idx != ICE_NO_VSI) { 63 ice_vf_vsi_release(vf); 64 vf->num_mac = 0; 65 } 66 67 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; 68 69 /* clear VF MDD event information */ 70 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 71 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 72 73 /* Disable interrupts so that VF starts in a known state */ 74 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 75 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 76 ice_flush(&pf->hw); 77 } 78 /* reset some of the state variables keeping track of the resources */ 79 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 80 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 81 } 82 83 /** 84 * ice_dis_vf_mappings 85 * @vf: pointer to the VF structure 86 */ 87 static void ice_dis_vf_mappings(struct ice_vf *vf) 88 { 89 struct ice_pf *pf = vf->pf; 90 struct ice_vsi *vsi; 91 struct device *dev; 92 int first, last, v; 93 struct ice_hw *hw; 94 95 hw = &pf->hw; 96 vsi = ice_get_vf_vsi(vf); 97 if (WARN_ON(!vsi)) 98 return; 99 100 dev = ice_pf_to_dev(pf); 101 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 102 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 103 104 first = vf->first_vector_idx; 105 last = first + pf->vfs.num_msix_per - 1; 106 for (v = first; v <= last; v++) { 107 u32 reg; 108 109 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 110 GLINT_VECT2FUNC_IS_PF_M) | 111 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 112 GLINT_VECT2FUNC_PF_NUM_M)); 113 wr32(hw, GLINT_VECT2FUNC(v), reg); 114 } 115 116 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 117 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 118 else 119 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 120 121 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 122 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 123 else 124 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 125 } 126 127 /** 128 * ice_sriov_free_msix_res - Reset/free any used MSIX resources 129 * @pf: pointer to the PF structure 130 * 131 * Since no MSIX entries are taken from the pf->irq_tracker then just clear 132 * the pf->sriov_base_vector. 133 * 134 * Returns 0 on success, and -EINVAL on error. 135 */ 136 static int ice_sriov_free_msix_res(struct ice_pf *pf) 137 { 138 if (!pf) 139 return -EINVAL; 140 141 pf->sriov_base_vector = 0; 142 143 return 0; 144 } 145 146 /** 147 * ice_free_vfs - Free all VFs 148 * @pf: pointer to the PF structure 149 */ 150 void ice_free_vfs(struct ice_pf *pf) 151 { 152 struct device *dev = ice_pf_to_dev(pf); 153 struct ice_vfs *vfs = &pf->vfs; 154 struct ice_hw *hw = &pf->hw; 155 struct ice_vf *vf; 156 unsigned int bkt; 157 158 if (!ice_has_vfs(pf)) 159 return; 160 161 while (test_and_set_bit(ICE_VF_DIS, pf->state)) 162 usleep_range(1000, 2000); 163 164 /* Disable IOV before freeing resources. This lets any VF drivers 165 * running in the host get themselves cleaned up before we yank 166 * the carpet out from underneath their feet. 167 */ 168 if (!pci_vfs_assigned(pf->pdev)) 169 pci_disable_sriov(pf->pdev); 170 else 171 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 172 173 mutex_lock(&vfs->table_lock); 174 175 ice_eswitch_release(pf); 176 177 ice_for_each_vf(pf, bkt, vf) { 178 mutex_lock(&vf->cfg_lock); 179 180 ice_dis_vf_qs(vf); 181 182 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 183 /* disable VF qp mappings and set VF disable state */ 184 ice_dis_vf_mappings(vf); 185 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 186 ice_free_vf_res(vf); 187 } 188 189 if (!pci_vfs_assigned(pf->pdev)) { 190 u32 reg_idx, bit_idx; 191 192 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 193 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 194 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 195 } 196 197 /* clear malicious info since the VF is getting released */ 198 list_del(&vf->mbx_info.list_entry); 199 200 mutex_unlock(&vf->cfg_lock); 201 } 202 203 if (ice_sriov_free_msix_res(pf)) 204 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 205 206 vfs->num_qps_per = 0; 207 ice_free_vf_entries(pf); 208 209 mutex_unlock(&vfs->table_lock); 210 211 clear_bit(ICE_VF_DIS, pf->state); 212 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 213 } 214 215 /** 216 * ice_vf_vsi_setup - Set up a VF VSI 217 * @vf: VF to setup VSI for 218 * 219 * Returns pointer to the successfully allocated VSI struct on success, 220 * otherwise returns NULL on failure. 221 */ 222 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 223 { 224 struct ice_vsi_cfg_params params = {}; 225 struct ice_pf *pf = vf->pf; 226 struct ice_vsi *vsi; 227 228 params.type = ICE_VSI_VF; 229 params.pi = ice_vf_get_port_info(vf); 230 params.vf = vf; 231 params.flags = ICE_VSI_FLAG_INIT; 232 233 vsi = ice_vsi_setup(pf, ¶ms); 234 235 if (!vsi) { 236 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 237 ice_vf_invalidate_vsi(vf); 238 return NULL; 239 } 240 241 vf->lan_vsi_idx = vsi->idx; 242 vf->lan_vsi_num = vsi->vsi_num; 243 244 return vsi; 245 } 246 247 /** 248 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space 249 * @pf: pointer to PF structure 250 * @vf: pointer to VF that the first MSIX vector index is being calculated for 251 * 252 * This returns the first MSIX vector index in PF space that is used by this VF. 253 * This index is used when accessing PF relative registers such as 254 * GLINT_VECT2FUNC and GLINT_DYN_CTL. 255 * This will always be the OICR index in the AVF driver so any functionality 256 * using vf->first_vector_idx for queue configuration will have to increment by 257 * 1 to avoid meddling with the OICR index. 258 */ 259 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) 260 { 261 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; 262 } 263 264 /** 265 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 266 * @vf: VF to enable MSIX mappings for 267 * 268 * Some of the registers need to be indexed/configured using hardware global 269 * device values and other registers need 0-based values, which represent PF 270 * based values. 271 */ 272 static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 273 { 274 int device_based_first_msix, device_based_last_msix; 275 int pf_based_first_msix, pf_based_last_msix, v; 276 struct ice_pf *pf = vf->pf; 277 int device_based_vf_id; 278 struct ice_hw *hw; 279 u32 reg; 280 281 hw = &pf->hw; 282 pf_based_first_msix = vf->first_vector_idx; 283 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; 284 285 device_based_first_msix = pf_based_first_msix + 286 pf->hw.func_caps.common_cap.msix_vector_first_id; 287 device_based_last_msix = 288 (device_based_first_msix + pf->vfs.num_msix_per) - 1; 289 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 290 291 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 292 VPINT_ALLOC_FIRST_M) | 293 ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 294 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 295 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 296 297 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 298 & VPINT_ALLOC_PCI_FIRST_M) | 299 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 300 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 301 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 302 303 /* map the interrupts to its functions */ 304 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 305 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 306 GLINT_VECT2FUNC_VF_NUM_M) | 307 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 308 GLINT_VECT2FUNC_PF_NUM_M)); 309 wr32(hw, GLINT_VECT2FUNC(v), reg); 310 } 311 312 /* Map mailbox interrupt to VF MSI-X vector 0 */ 313 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 314 } 315 316 /** 317 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 318 * @vf: VF to enable the mappings for 319 * @max_txq: max Tx queues allowed on the VF's VSI 320 * @max_rxq: max Rx queues allowed on the VF's VSI 321 */ 322 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 323 { 324 struct device *dev = ice_pf_to_dev(vf->pf); 325 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 326 struct ice_hw *hw = &vf->pf->hw; 327 u32 reg; 328 329 if (WARN_ON(!vsi)) 330 return; 331 332 /* set regardless of mapping mode */ 333 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 334 335 /* VF Tx queues allocation */ 336 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 337 /* set the VF PF Tx queue range 338 * VFNUMQ value should be set to (number of queues - 1). A value 339 * of 0 means 1 queue and a value of 255 means 256 queues 340 */ 341 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 342 VPLAN_TX_QBASE_VFFIRSTQ_M) | 343 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 344 VPLAN_TX_QBASE_VFNUMQ_M)); 345 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 346 } else { 347 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 348 } 349 350 /* set regardless of mapping mode */ 351 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 352 353 /* VF Rx queues allocation */ 354 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 355 /* set the VF PF Rx queue range 356 * VFNUMQ value should be set to (number of queues - 1). A value 357 * of 0 means 1 queue and a value of 255 means 256 queues 358 */ 359 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 360 VPLAN_RX_QBASE_VFFIRSTQ_M) | 361 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 362 VPLAN_RX_QBASE_VFNUMQ_M)); 363 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 364 } else { 365 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 366 } 367 } 368 369 /** 370 * ice_ena_vf_mappings - enable VF MSIX and queue mapping 371 * @vf: pointer to the VF structure 372 */ 373 static void ice_ena_vf_mappings(struct ice_vf *vf) 374 { 375 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 376 377 if (WARN_ON(!vsi)) 378 return; 379 380 ice_ena_vf_msix_mappings(vf); 381 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 382 } 383 384 /** 385 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 386 * @vf: VF to calculate the register index for 387 * @q_vector: a q_vector associated to the VF 388 */ 389 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 390 { 391 struct ice_pf *pf; 392 393 if (!vf || !q_vector) 394 return -EINVAL; 395 396 pf = vf->pf; 397 398 /* always add one to account for the OICR being the first MSIX */ 399 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + 400 q_vector->v_idx + 1; 401 } 402 403 /** 404 * ice_sriov_set_msix_res - Set any used MSIX resources 405 * @pf: pointer to PF structure 406 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 407 * 408 * This function allows SR-IOV resources to be taken from the end of the PF's 409 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 410 * just set the pf->sriov_base_vector and return success. 411 * 412 * If there are not enough resources available, return an error. This should 413 * always be caught by ice_set_per_vf_res(). 414 * 415 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 416 * in the PF's space available for SR-IOV. 417 */ 418 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 419 { 420 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 421 int vectors_used = pf->irq_tracker.num_entries; 422 int sriov_base_vector; 423 424 sriov_base_vector = total_vectors - num_msix_needed; 425 426 /* make sure we only grab irq_tracker entries from the list end and 427 * that we have enough available MSIX vectors 428 */ 429 if (sriov_base_vector < vectors_used) 430 return -EINVAL; 431 432 pf->sriov_base_vector = sriov_base_vector; 433 434 return 0; 435 } 436 437 /** 438 * ice_set_per_vf_res - check if vectors and queues are available 439 * @pf: pointer to the PF structure 440 * @num_vfs: the number of SR-IOV VFs being configured 441 * 442 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 443 * get more vectors and can enable more queues per VF. Note that this does not 444 * grab any vectors from the SW pool already allocated. Also note, that all 445 * vector counts include one for each VF's miscellaneous interrupt vector 446 * (i.e. OICR). 447 * 448 * Minimum VFs - 2 vectors, 1 queue pair 449 * Small VFs - 5 vectors, 4 queue pairs 450 * Medium VFs - 17 vectors, 16 queue pairs 451 * 452 * Second, determine number of queue pairs per VF by starting with a pre-defined 453 * maximum each VF supports. If this is not possible, then we adjust based on 454 * queue pairs available on the device. 455 * 456 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 457 * by each VF during VF initialization and reset. 458 */ 459 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 460 { 461 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 462 int msix_avail_per_vf, msix_avail_for_sriov; 463 struct device *dev = ice_pf_to_dev(pf); 464 int err; 465 466 lockdep_assert_held(&pf->vfs.table_lock); 467 468 if (!num_vfs) 469 return -EINVAL; 470 471 /* determine MSI-X resources per VF */ 472 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 473 pf->irq_tracker.num_entries; 474 msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 475 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 476 num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 477 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 478 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 479 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 480 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 481 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 482 num_msix_per_vf = ICE_MIN_INTR_PER_VF; 483 } else { 484 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 485 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 486 num_vfs); 487 return -ENOSPC; 488 } 489 490 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 491 ICE_MAX_RSS_QS_PER_VF); 492 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 493 if (!avail_qs) 494 num_txq = 0; 495 else if (num_txq > avail_qs) 496 num_txq = rounddown_pow_of_two(avail_qs); 497 498 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 499 ICE_MAX_RSS_QS_PER_VF); 500 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 501 if (!avail_qs) 502 num_rxq = 0; 503 else if (num_rxq > avail_qs) 504 num_rxq = rounddown_pow_of_two(avail_qs); 505 506 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 507 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 508 ICE_MIN_QS_PER_VF, num_vfs); 509 return -ENOSPC; 510 } 511 512 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); 513 if (err) { 514 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", 515 num_vfs, err); 516 return err; 517 } 518 519 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 520 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 521 pf->vfs.num_msix_per = num_msix_per_vf; 522 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 523 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 524 525 return 0; 526 } 527 528 /** 529 * ice_init_vf_vsi_res - initialize/setup VF VSI resources 530 * @vf: VF to initialize/setup the VSI for 531 * 532 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 533 * VF VSI's broadcast filter and is only used during initial VF creation. 534 */ 535 static int ice_init_vf_vsi_res(struct ice_vf *vf) 536 { 537 struct ice_pf *pf = vf->pf; 538 struct ice_vsi *vsi; 539 int err; 540 541 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); 542 543 vsi = ice_vf_vsi_setup(vf); 544 if (!vsi) 545 return -ENOMEM; 546 547 err = ice_vf_init_host_cfg(vf, vsi); 548 if (err) 549 goto release_vsi; 550 551 return 0; 552 553 release_vsi: 554 ice_vf_vsi_release(vf); 555 return err; 556 } 557 558 /** 559 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 560 * @pf: PF the VFs are associated with 561 */ 562 static int ice_start_vfs(struct ice_pf *pf) 563 { 564 struct ice_hw *hw = &pf->hw; 565 unsigned int bkt, it_cnt; 566 struct ice_vf *vf; 567 int retval; 568 569 lockdep_assert_held(&pf->vfs.table_lock); 570 571 it_cnt = 0; 572 ice_for_each_vf(pf, bkt, vf) { 573 vf->vf_ops->clear_reset_trigger(vf); 574 575 retval = ice_init_vf_vsi_res(vf); 576 if (retval) { 577 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 578 vf->vf_id, retval); 579 goto teardown; 580 } 581 582 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 583 ice_ena_vf_mappings(vf); 584 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 585 it_cnt++; 586 } 587 588 ice_flush(hw); 589 return 0; 590 591 teardown: 592 ice_for_each_vf(pf, bkt, vf) { 593 if (it_cnt == 0) 594 break; 595 596 ice_dis_vf_mappings(vf); 597 ice_vf_vsi_release(vf); 598 it_cnt--; 599 } 600 601 return retval; 602 } 603 604 /** 605 * ice_sriov_free_vf - Free VF memory after all references are dropped 606 * @vf: pointer to VF to free 607 * 608 * Called by ice_put_vf through ice_release_vf once the last reference to a VF 609 * structure has been dropped. 610 */ 611 static void ice_sriov_free_vf(struct ice_vf *vf) 612 { 613 mutex_destroy(&vf->cfg_lock); 614 615 kfree_rcu(vf, rcu); 616 } 617 618 /** 619 * ice_sriov_clear_reset_state - clears VF Reset status register 620 * @vf: the vf to configure 621 */ 622 static void ice_sriov_clear_reset_state(struct ice_vf *vf) 623 { 624 struct ice_hw *hw = &vf->pf->hw; 625 626 /* Clear the reset status register so that VF immediately sees that 627 * the device is resetting, even if hardware hasn't yet gotten around 628 * to clearing VFGEN_RSTAT for us. 629 */ 630 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS); 631 } 632 633 /** 634 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers 635 * @vf: the vf to configure 636 */ 637 static void ice_sriov_clear_mbx_register(struct ice_vf *vf) 638 { 639 struct ice_pf *pf = vf->pf; 640 641 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); 642 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); 643 } 644 645 /** 646 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF 647 * @vf: pointer to VF structure 648 * @is_vflr: true if reset occurred due to VFLR 649 * 650 * Trigger and cleanup after a VF reset for a SR-IOV VF. 651 */ 652 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) 653 { 654 struct ice_pf *pf = vf->pf; 655 u32 reg, reg_idx, bit_idx; 656 unsigned int vf_abs_id, i; 657 struct device *dev; 658 struct ice_hw *hw; 659 660 dev = ice_pf_to_dev(pf); 661 hw = &pf->hw; 662 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 663 664 /* In the case of a VFLR, HW has already reset the VF and we just need 665 * to clean up. Otherwise we must first trigger the reset using the 666 * VFRTRIG register. 667 */ 668 if (!is_vflr) { 669 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 670 reg |= VPGEN_VFRTRIG_VFSWR_M; 671 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 672 } 673 674 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 675 reg_idx = (vf_abs_id) / 32; 676 bit_idx = (vf_abs_id) % 32; 677 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 678 ice_flush(hw); 679 680 wr32(hw, PF_PCI_CIAA, 681 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 682 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 683 reg = rd32(hw, PF_PCI_CIAD); 684 /* no transactions pending so stop polling */ 685 if ((reg & VF_TRANS_PENDING_M) == 0) 686 break; 687 688 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 689 udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 690 } 691 } 692 693 /** 694 * ice_sriov_poll_reset_status - poll SRIOV VF reset status 695 * @vf: pointer to VF structure 696 * 697 * Returns true when reset is successful, else returns false 698 */ 699 static bool ice_sriov_poll_reset_status(struct ice_vf *vf) 700 { 701 struct ice_pf *pf = vf->pf; 702 unsigned int i; 703 u32 reg; 704 705 for (i = 0; i < 10; i++) { 706 /* VF reset requires driver to first reset the VF and then 707 * poll the status register to make sure that the reset 708 * completed successfully. 709 */ 710 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 711 if (reg & VPGEN_VFRSTAT_VFRD_M) 712 return true; 713 714 /* only sleep if the reset is not done */ 715 usleep_range(10, 20); 716 } 717 return false; 718 } 719 720 /** 721 * ice_sriov_clear_reset_trigger - enable VF to access hardware 722 * @vf: VF to enabled hardware access for 723 */ 724 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) 725 { 726 struct ice_hw *hw = &vf->pf->hw; 727 u32 reg; 728 729 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 730 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 731 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 732 ice_flush(hw); 733 } 734 735 /** 736 * ice_sriov_create_vsi - Create a new VSI for a VF 737 * @vf: VF to create the VSI for 738 * 739 * This is called by ice_vf_recreate_vsi to create the new VSI after the old 740 * VSI has been released. 741 */ 742 static int ice_sriov_create_vsi(struct ice_vf *vf) 743 { 744 struct ice_vsi *vsi; 745 746 vsi = ice_vf_vsi_setup(vf); 747 if (!vsi) 748 return -ENOMEM; 749 750 return 0; 751 } 752 753 /** 754 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 755 * @vf: VF to perform tasks on 756 */ 757 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) 758 { 759 ice_ena_vf_mappings(vf); 760 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 761 } 762 763 static const struct ice_vf_ops ice_sriov_vf_ops = { 764 .reset_type = ICE_VF_RESET, 765 .free = ice_sriov_free_vf, 766 .clear_reset_state = ice_sriov_clear_reset_state, 767 .clear_mbx_register = ice_sriov_clear_mbx_register, 768 .trigger_reset_register = ice_sriov_trigger_reset_register, 769 .poll_reset_status = ice_sriov_poll_reset_status, 770 .clear_reset_trigger = ice_sriov_clear_reset_trigger, 771 .irq_close = NULL, 772 .create_vsi = ice_sriov_create_vsi, 773 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, 774 }; 775 776 /** 777 * ice_create_vf_entries - Allocate and insert VF entries 778 * @pf: pointer to the PF structure 779 * @num_vfs: the number of VFs to allocate 780 * 781 * Allocate new VF entries and insert them into the hash table. Set some 782 * basic default fields for initializing the new VFs. 783 * 784 * After this function exits, the hash table will have num_vfs entries 785 * inserted. 786 * 787 * Returns 0 on success or an integer error code on failure. 788 */ 789 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 790 { 791 struct ice_vfs *vfs = &pf->vfs; 792 struct ice_vf *vf; 793 u16 vf_id; 794 int err; 795 796 lockdep_assert_held(&vfs->table_lock); 797 798 for (vf_id = 0; vf_id < num_vfs; vf_id++) { 799 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 800 if (!vf) { 801 err = -ENOMEM; 802 goto err_free_entries; 803 } 804 kref_init(&vf->refcnt); 805 806 vf->pf = pf; 807 vf->vf_id = vf_id; 808 809 /* set sriov vf ops for VFs created during SRIOV flow */ 810 vf->vf_ops = &ice_sriov_vf_ops; 811 812 ice_initialize_vf_entry(vf); 813 814 vf->vf_sw_id = pf->first_sw; 815 816 hash_add_rcu(vfs->table, &vf->entry, vf_id); 817 } 818 819 return 0; 820 821 err_free_entries: 822 ice_free_vf_entries(pf); 823 return err; 824 } 825 826 /** 827 * ice_ena_vfs - enable VFs so they are ready to be used 828 * @pf: pointer to the PF structure 829 * @num_vfs: number of VFs to enable 830 */ 831 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 832 { 833 struct device *dev = ice_pf_to_dev(pf); 834 struct ice_hw *hw = &pf->hw; 835 int ret; 836 837 /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 838 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), 839 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 840 set_bit(ICE_OICR_INTR_DIS, pf->state); 841 ice_flush(hw); 842 843 ret = pci_enable_sriov(pf->pdev, num_vfs); 844 if (ret) 845 goto err_unroll_intr; 846 847 mutex_lock(&pf->vfs.table_lock); 848 849 ret = ice_set_per_vf_res(pf, num_vfs); 850 if (ret) { 851 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", 852 num_vfs, ret); 853 goto err_unroll_sriov; 854 } 855 856 ret = ice_create_vf_entries(pf, num_vfs); 857 if (ret) { 858 dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 859 num_vfs); 860 goto err_unroll_sriov; 861 } 862 863 ret = ice_start_vfs(pf); 864 if (ret) { 865 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); 866 ret = -EAGAIN; 867 goto err_unroll_vf_entries; 868 } 869 870 clear_bit(ICE_VF_DIS, pf->state); 871 872 ret = ice_eswitch_configure(pf); 873 if (ret) { 874 dev_err(dev, "Failed to configure eswitch, err %d\n", ret); 875 goto err_unroll_sriov; 876 } 877 878 /* rearm global interrupts */ 879 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 880 ice_irq_dynamic_ena(hw, NULL, NULL); 881 882 mutex_unlock(&pf->vfs.table_lock); 883 884 return 0; 885 886 err_unroll_vf_entries: 887 ice_free_vf_entries(pf); 888 err_unroll_sriov: 889 mutex_unlock(&pf->vfs.table_lock); 890 pci_disable_sriov(pf->pdev); 891 err_unroll_intr: 892 /* rearm interrupts here */ 893 ice_irq_dynamic_ena(hw, NULL, NULL); 894 clear_bit(ICE_OICR_INTR_DIS, pf->state); 895 return ret; 896 } 897 898 /** 899 * ice_pci_sriov_ena - Enable or change number of VFs 900 * @pf: pointer to the PF structure 901 * @num_vfs: number of VFs to allocate 902 * 903 * Returns 0 on success and negative on failure 904 */ 905 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 906 { 907 int pre_existing_vfs = pci_num_vf(pf->pdev); 908 struct device *dev = ice_pf_to_dev(pf); 909 int err; 910 911 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 912 ice_free_vfs(pf); 913 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 914 return 0; 915 916 if (num_vfs > pf->vfs.num_supported) { 917 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 918 num_vfs, pf->vfs.num_supported); 919 return -EOPNOTSUPP; 920 } 921 922 dev_info(dev, "Enabling %d VFs\n", num_vfs); 923 err = ice_ena_vfs(pf, num_vfs); 924 if (err) { 925 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 926 return err; 927 } 928 929 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 930 return 0; 931 } 932 933 /** 934 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 935 * @pf: PF to enabled SR-IOV on 936 */ 937 static int ice_check_sriov_allowed(struct ice_pf *pf) 938 { 939 struct device *dev = ice_pf_to_dev(pf); 940 941 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 942 dev_err(dev, "This device is not capable of SR-IOV\n"); 943 return -EOPNOTSUPP; 944 } 945 946 if (ice_is_safe_mode(pf)) { 947 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 948 return -EOPNOTSUPP; 949 } 950 951 if (!ice_pf_state_is_nominal(pf)) { 952 dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 953 return -EBUSY; 954 } 955 956 return 0; 957 } 958 959 /** 960 * ice_sriov_configure - Enable or change number of VFs via sysfs 961 * @pdev: pointer to a pci_dev structure 962 * @num_vfs: number of VFs to allocate or 0 to free VFs 963 * 964 * This function is called when the user updates the number of VFs in sysfs. On 965 * success return whatever num_vfs was set to by the caller. Return negative on 966 * failure. 967 */ 968 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 969 { 970 struct ice_pf *pf = pci_get_drvdata(pdev); 971 struct device *dev = ice_pf_to_dev(pf); 972 int err; 973 974 err = ice_check_sriov_allowed(pf); 975 if (err) 976 return err; 977 978 if (!num_vfs) { 979 if (!pci_vfs_assigned(pdev)) { 980 ice_free_vfs(pf); 981 if (pf->lag) 982 ice_enable_lag(pf->lag); 983 return 0; 984 } 985 986 dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 987 return -EBUSY; 988 } 989 990 err = ice_pci_sriov_ena(pf, num_vfs); 991 if (err) 992 return err; 993 994 if (pf->lag) 995 ice_disable_lag(pf->lag); 996 return num_vfs; 997 } 998 999 /** 1000 * ice_process_vflr_event - Free VF resources via IRQ calls 1001 * @pf: pointer to the PF structure 1002 * 1003 * called from the VFLR IRQ handler to 1004 * free up VF resources and state variables 1005 */ 1006 void ice_process_vflr_event(struct ice_pf *pf) 1007 { 1008 struct ice_hw *hw = &pf->hw; 1009 struct ice_vf *vf; 1010 unsigned int bkt; 1011 u32 reg; 1012 1013 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 1014 !ice_has_vfs(pf)) 1015 return; 1016 1017 mutex_lock(&pf->vfs.table_lock); 1018 ice_for_each_vf(pf, bkt, vf) { 1019 u32 reg_idx, bit_idx; 1020 1021 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1022 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1023 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1024 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 1025 if (reg & BIT(bit_idx)) 1026 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 1027 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 1028 } 1029 mutex_unlock(&pf->vfs.table_lock); 1030 } 1031 1032 /** 1033 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 1034 * @pf: PF used to index all VFs 1035 * @pfq: queue index relative to the PF's function space 1036 * 1037 * If no VF is found who owns the pfq then return NULL, otherwise return a 1038 * pointer to the VF who owns the pfq 1039 * 1040 * If this function returns non-NULL, it acquires a reference count of the VF 1041 * structure. The caller is responsible for calling ice_put_vf() to drop this 1042 * reference. 1043 */ 1044 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 1045 { 1046 struct ice_vf *vf; 1047 unsigned int bkt; 1048 1049 rcu_read_lock(); 1050 ice_for_each_vf_rcu(pf, bkt, vf) { 1051 struct ice_vsi *vsi; 1052 u16 rxq_idx; 1053 1054 vsi = ice_get_vf_vsi(vf); 1055 if (!vsi) 1056 continue; 1057 1058 ice_for_each_rxq(vsi, rxq_idx) 1059 if (vsi->rxq_map[rxq_idx] == pfq) { 1060 struct ice_vf *found; 1061 1062 if (kref_get_unless_zero(&vf->refcnt)) 1063 found = vf; 1064 else 1065 found = NULL; 1066 rcu_read_unlock(); 1067 return found; 1068 } 1069 } 1070 rcu_read_unlock(); 1071 1072 return NULL; 1073 } 1074 1075 /** 1076 * ice_globalq_to_pfq - convert from global queue index to PF space queue index 1077 * @pf: PF used for conversion 1078 * @globalq: global queue index used to convert to PF space queue index 1079 */ 1080 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 1081 { 1082 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 1083 } 1084 1085 /** 1086 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 1087 * @pf: PF that the LAN overflow event happened on 1088 * @event: structure holding the event information for the LAN overflow event 1089 * 1090 * Determine if the LAN overflow event was caused by a VF queue. If it was not 1091 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 1092 * reset on the offending VF. 1093 */ 1094 void 1095 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1096 { 1097 u32 gldcb_rtctq, queue; 1098 struct ice_vf *vf; 1099 1100 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 1101 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 1102 1103 /* event returns device global Rx queue number */ 1104 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 1105 GLDCB_RTCTQ_RXQNUM_S; 1106 1107 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 1108 if (!vf) 1109 return; 1110 1111 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1112 ice_put_vf(vf); 1113 } 1114 1115 /** 1116 * ice_set_vf_spoofchk 1117 * @netdev: network interface device structure 1118 * @vf_id: VF identifier 1119 * @ena: flag to enable or disable feature 1120 * 1121 * Enable or disable VF spoof checking 1122 */ 1123 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 1124 { 1125 struct ice_netdev_priv *np = netdev_priv(netdev); 1126 struct ice_pf *pf = np->vsi->back; 1127 struct ice_vsi *vf_vsi; 1128 struct device *dev; 1129 struct ice_vf *vf; 1130 int ret; 1131 1132 dev = ice_pf_to_dev(pf); 1133 1134 vf = ice_get_vf_by_id(pf, vf_id); 1135 if (!vf) 1136 return -EINVAL; 1137 1138 ret = ice_check_vf_ready_for_cfg(vf); 1139 if (ret) 1140 goto out_put_vf; 1141 1142 vf_vsi = ice_get_vf_vsi(vf); 1143 if (!vf_vsi) { 1144 netdev_err(netdev, "VSI %d for VF %d is null\n", 1145 vf->lan_vsi_idx, vf->vf_id); 1146 ret = -EINVAL; 1147 goto out_put_vf; 1148 } 1149 1150 if (vf_vsi->type != ICE_VSI_VF) { 1151 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 1152 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 1153 ret = -ENODEV; 1154 goto out_put_vf; 1155 } 1156 1157 if (ena == vf->spoofchk) { 1158 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 1159 ret = 0; 1160 goto out_put_vf; 1161 } 1162 1163 ret = ice_vsi_apply_spoofchk(vf_vsi, ena); 1164 if (ret) 1165 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 1166 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 1167 else 1168 vf->spoofchk = ena; 1169 1170 out_put_vf: 1171 ice_put_vf(vf); 1172 return ret; 1173 } 1174 1175 /** 1176 * ice_get_vf_cfg 1177 * @netdev: network interface device structure 1178 * @vf_id: VF identifier 1179 * @ivi: VF configuration structure 1180 * 1181 * return VF configuration 1182 */ 1183 int 1184 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 1185 { 1186 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1187 struct ice_vf *vf; 1188 int ret; 1189 1190 vf = ice_get_vf_by_id(pf, vf_id); 1191 if (!vf) 1192 return -EINVAL; 1193 1194 ret = ice_check_vf_ready_for_cfg(vf); 1195 if (ret) 1196 goto out_put_vf; 1197 1198 ivi->vf = vf_id; 1199 ether_addr_copy(ivi->mac, vf->hw_lan_addr); 1200 1201 /* VF configuration for VLAN and applicable QoS */ 1202 ivi->vlan = ice_vf_get_port_vlan_id(vf); 1203 ivi->qos = ice_vf_get_port_vlan_prio(vf); 1204 if (ice_vf_is_port_vlan_ena(vf)) 1205 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 1206 1207 ivi->trusted = vf->trusted; 1208 ivi->spoofchk = vf->spoofchk; 1209 if (!vf->link_forced) 1210 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 1211 else if (vf->link_up) 1212 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1213 else 1214 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 1215 ivi->max_tx_rate = vf->max_tx_rate; 1216 ivi->min_tx_rate = vf->min_tx_rate; 1217 1218 out_put_vf: 1219 ice_put_vf(vf); 1220 return ret; 1221 } 1222 1223 /** 1224 * ice_set_vf_mac 1225 * @netdev: network interface device structure 1226 * @vf_id: VF identifier 1227 * @mac: MAC address 1228 * 1229 * program VF MAC address 1230 */ 1231 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1232 { 1233 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1234 struct ice_vf *vf; 1235 int ret; 1236 1237 if (is_multicast_ether_addr(mac)) { 1238 netdev_err(netdev, "%pM not a valid unicast address\n", mac); 1239 return -EINVAL; 1240 } 1241 1242 vf = ice_get_vf_by_id(pf, vf_id); 1243 if (!vf) 1244 return -EINVAL; 1245 1246 /* nothing left to do, unicast MAC already set */ 1247 if (ether_addr_equal(vf->dev_lan_addr, mac) && 1248 ether_addr_equal(vf->hw_lan_addr, mac)) { 1249 ret = 0; 1250 goto out_put_vf; 1251 } 1252 1253 ret = ice_check_vf_ready_for_cfg(vf); 1254 if (ret) 1255 goto out_put_vf; 1256 1257 mutex_lock(&vf->cfg_lock); 1258 1259 /* VF is notified of its new MAC via the PF's response to the 1260 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 1261 */ 1262 ether_addr_copy(vf->dev_lan_addr, mac); 1263 ether_addr_copy(vf->hw_lan_addr, mac); 1264 if (is_zero_ether_addr(mac)) { 1265 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 1266 vf->pf_set_mac = false; 1267 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 1268 vf->vf_id); 1269 } else { 1270 /* PF will add MAC rule for the VF */ 1271 vf->pf_set_mac = true; 1272 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 1273 mac, vf_id); 1274 } 1275 1276 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1277 mutex_unlock(&vf->cfg_lock); 1278 1279 out_put_vf: 1280 ice_put_vf(vf); 1281 return ret; 1282 } 1283 1284 /** 1285 * ice_set_vf_trust 1286 * @netdev: network interface device structure 1287 * @vf_id: VF identifier 1288 * @trusted: Boolean value to enable/disable trusted VF 1289 * 1290 * Enable or disable a given VF as trusted 1291 */ 1292 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 1293 { 1294 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1295 struct ice_vf *vf; 1296 int ret; 1297 1298 vf = ice_get_vf_by_id(pf, vf_id); 1299 if (!vf) 1300 return -EINVAL; 1301 1302 if (ice_is_eswitch_mode_switchdev(pf)) { 1303 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 1304 return -EOPNOTSUPP; 1305 } 1306 1307 ret = ice_check_vf_ready_for_cfg(vf); 1308 if (ret) 1309 goto out_put_vf; 1310 1311 /* Check if already trusted */ 1312 if (trusted == vf->trusted) { 1313 ret = 0; 1314 goto out_put_vf; 1315 } 1316 1317 mutex_lock(&vf->cfg_lock); 1318 1319 vf->trusted = trusted; 1320 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1321 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 1322 vf_id, trusted ? "" : "un"); 1323 1324 mutex_unlock(&vf->cfg_lock); 1325 1326 out_put_vf: 1327 ice_put_vf(vf); 1328 return ret; 1329 } 1330 1331 /** 1332 * ice_set_vf_link_state 1333 * @netdev: network interface device structure 1334 * @vf_id: VF identifier 1335 * @link_state: required link state 1336 * 1337 * Set VF's link state, irrespective of physical link state status 1338 */ 1339 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 1340 { 1341 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1342 struct ice_vf *vf; 1343 int ret; 1344 1345 vf = ice_get_vf_by_id(pf, vf_id); 1346 if (!vf) 1347 return -EINVAL; 1348 1349 ret = ice_check_vf_ready_for_cfg(vf); 1350 if (ret) 1351 goto out_put_vf; 1352 1353 switch (link_state) { 1354 case IFLA_VF_LINK_STATE_AUTO: 1355 vf->link_forced = false; 1356 break; 1357 case IFLA_VF_LINK_STATE_ENABLE: 1358 vf->link_forced = true; 1359 vf->link_up = true; 1360 break; 1361 case IFLA_VF_LINK_STATE_DISABLE: 1362 vf->link_forced = true; 1363 vf->link_up = false; 1364 break; 1365 default: 1366 ret = -EINVAL; 1367 goto out_put_vf; 1368 } 1369 1370 ice_vc_notify_vf_link_state(vf); 1371 1372 out_put_vf: 1373 ice_put_vf(vf); 1374 return ret; 1375 } 1376 1377 /** 1378 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 1379 * @pf: PF associated with VFs 1380 */ 1381 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 1382 { 1383 struct ice_vf *vf; 1384 unsigned int bkt; 1385 int rate = 0; 1386 1387 rcu_read_lock(); 1388 ice_for_each_vf_rcu(pf, bkt, vf) 1389 rate += vf->min_tx_rate; 1390 rcu_read_unlock(); 1391 1392 return rate; 1393 } 1394 1395 /** 1396 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 1397 * @vf: VF trying to configure min_tx_rate 1398 * @min_tx_rate: min Tx rate in Mbps 1399 * 1400 * Check if the min_tx_rate being passed in will cause oversubscription of total 1401 * min_tx_rate based on the current link speed and all other VFs configured 1402 * min_tx_rate 1403 * 1404 * Return true if the passed min_tx_rate would cause oversubscription, else 1405 * return false 1406 */ 1407 static bool 1408 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 1409 { 1410 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1411 int all_vfs_min_tx_rate; 1412 int link_speed_mbps; 1413 1414 if (WARN_ON(!vsi)) 1415 return false; 1416 1417 link_speed_mbps = ice_get_link_speed_mbps(vsi); 1418 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 1419 1420 /* this VF's previous rate is being overwritten */ 1421 all_vfs_min_tx_rate -= vf->min_tx_rate; 1422 1423 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 1424 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 1425 min_tx_rate, vf->vf_id, 1426 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 1427 link_speed_mbps); 1428 return true; 1429 } 1430 1431 return false; 1432 } 1433 1434 /** 1435 * ice_set_vf_bw - set min/max VF bandwidth 1436 * @netdev: network interface device structure 1437 * @vf_id: VF identifier 1438 * @min_tx_rate: Minimum Tx rate in Mbps 1439 * @max_tx_rate: Maximum Tx rate in Mbps 1440 */ 1441 int 1442 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 1443 int max_tx_rate) 1444 { 1445 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1446 struct ice_vsi *vsi; 1447 struct device *dev; 1448 struct ice_vf *vf; 1449 int ret; 1450 1451 dev = ice_pf_to_dev(pf); 1452 1453 vf = ice_get_vf_by_id(pf, vf_id); 1454 if (!vf) 1455 return -EINVAL; 1456 1457 ret = ice_check_vf_ready_for_cfg(vf); 1458 if (ret) 1459 goto out_put_vf; 1460 1461 vsi = ice_get_vf_vsi(vf); 1462 if (!vsi) { 1463 ret = -EINVAL; 1464 goto out_put_vf; 1465 } 1466 1467 if (min_tx_rate && ice_is_dcb_active(pf)) { 1468 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 1469 ret = -EOPNOTSUPP; 1470 goto out_put_vf; 1471 } 1472 1473 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 1474 ret = -EINVAL; 1475 goto out_put_vf; 1476 } 1477 1478 if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 1479 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 1480 if (ret) { 1481 dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 1482 vf->vf_id); 1483 goto out_put_vf; 1484 } 1485 1486 vf->min_tx_rate = min_tx_rate; 1487 } 1488 1489 if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 1490 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 1491 if (ret) { 1492 dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 1493 vf->vf_id); 1494 goto out_put_vf; 1495 } 1496 1497 vf->max_tx_rate = max_tx_rate; 1498 } 1499 1500 out_put_vf: 1501 ice_put_vf(vf); 1502 return ret; 1503 } 1504 1505 /** 1506 * ice_get_vf_stats - populate some stats for the VF 1507 * @netdev: the netdev of the PF 1508 * @vf_id: the host OS identifier (0-255) 1509 * @vf_stats: pointer to the OS memory to be initialized 1510 */ 1511 int ice_get_vf_stats(struct net_device *netdev, int vf_id, 1512 struct ifla_vf_stats *vf_stats) 1513 { 1514 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1515 struct ice_eth_stats *stats; 1516 struct ice_vsi *vsi; 1517 struct ice_vf *vf; 1518 int ret; 1519 1520 vf = ice_get_vf_by_id(pf, vf_id); 1521 if (!vf) 1522 return -EINVAL; 1523 1524 ret = ice_check_vf_ready_for_cfg(vf); 1525 if (ret) 1526 goto out_put_vf; 1527 1528 vsi = ice_get_vf_vsi(vf); 1529 if (!vsi) { 1530 ret = -EINVAL; 1531 goto out_put_vf; 1532 } 1533 1534 ice_update_eth_stats(vsi); 1535 stats = &vsi->eth_stats; 1536 1537 memset(vf_stats, 0, sizeof(*vf_stats)); 1538 1539 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 1540 stats->rx_multicast; 1541 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 1542 stats->tx_multicast; 1543 vf_stats->rx_bytes = stats->rx_bytes; 1544 vf_stats->tx_bytes = stats->tx_bytes; 1545 vf_stats->broadcast = stats->rx_broadcast; 1546 vf_stats->multicast = stats->rx_multicast; 1547 vf_stats->rx_dropped = stats->rx_discards; 1548 vf_stats->tx_dropped = stats->tx_discards; 1549 1550 out_put_vf: 1551 ice_put_vf(vf); 1552 return ret; 1553 } 1554 1555 /** 1556 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 1557 * @hw: hardware structure used to check the VLAN mode 1558 * @vlan_proto: VLAN TPID being checked 1559 * 1560 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 1561 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 1562 * Mode (SVM), then only ETH_P_8021Q is supported. 1563 */ 1564 static bool 1565 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 1566 { 1567 bool is_supported = false; 1568 1569 switch (vlan_proto) { 1570 case ETH_P_8021Q: 1571 is_supported = true; 1572 break; 1573 case ETH_P_8021AD: 1574 if (ice_is_dvm_ena(hw)) 1575 is_supported = true; 1576 break; 1577 } 1578 1579 return is_supported; 1580 } 1581 1582 /** 1583 * ice_set_vf_port_vlan 1584 * @netdev: network interface device structure 1585 * @vf_id: VF identifier 1586 * @vlan_id: VLAN ID being set 1587 * @qos: priority setting 1588 * @vlan_proto: VLAN protocol 1589 * 1590 * program VF Port VLAN ID and/or QoS 1591 */ 1592 int 1593 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 1594 __be16 vlan_proto) 1595 { 1596 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1597 u16 local_vlan_proto = ntohs(vlan_proto); 1598 struct device *dev; 1599 struct ice_vf *vf; 1600 int ret; 1601 1602 dev = ice_pf_to_dev(pf); 1603 1604 if (vlan_id >= VLAN_N_VID || qos > 7) { 1605 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 1606 vf_id, vlan_id, qos); 1607 return -EINVAL; 1608 } 1609 1610 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 1611 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 1612 local_vlan_proto); 1613 return -EPROTONOSUPPORT; 1614 } 1615 1616 vf = ice_get_vf_by_id(pf, vf_id); 1617 if (!vf) 1618 return -EINVAL; 1619 1620 ret = ice_check_vf_ready_for_cfg(vf); 1621 if (ret) 1622 goto out_put_vf; 1623 1624 if (ice_vf_get_port_vlan_prio(vf) == qos && 1625 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 1626 ice_vf_get_port_vlan_id(vf) == vlan_id) { 1627 /* duplicate request, so just return success */ 1628 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 1629 vlan_id, qos, local_vlan_proto); 1630 ret = 0; 1631 goto out_put_vf; 1632 } 1633 1634 mutex_lock(&vf->cfg_lock); 1635 1636 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 1637 if (ice_vf_is_port_vlan_ena(vf)) 1638 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 1639 vlan_id, qos, local_vlan_proto, vf_id); 1640 else 1641 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 1642 1643 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1644 mutex_unlock(&vf->cfg_lock); 1645 1646 out_put_vf: 1647 ice_put_vf(vf); 1648 return ret; 1649 } 1650 1651 /** 1652 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 1653 * @vf: pointer to the VF structure 1654 */ 1655 void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 1656 { 1657 struct ice_pf *pf = vf->pf; 1658 struct device *dev; 1659 1660 dev = ice_pf_to_dev(pf); 1661 1662 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 1663 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 1664 vf->dev_lan_addr, 1665 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 1666 ? "on" : "off"); 1667 } 1668 1669 /** 1670 * ice_print_vfs_mdd_events - print VFs malicious driver detect event 1671 * @pf: pointer to the PF structure 1672 * 1673 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 1674 */ 1675 void ice_print_vfs_mdd_events(struct ice_pf *pf) 1676 { 1677 struct device *dev = ice_pf_to_dev(pf); 1678 struct ice_hw *hw = &pf->hw; 1679 struct ice_vf *vf; 1680 unsigned int bkt; 1681 1682 /* check that there are pending MDD events to print */ 1683 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 1684 return; 1685 1686 /* VF MDD event logs are rate limited to one second intervals */ 1687 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 1688 return; 1689 1690 pf->vfs.last_printed_mdd_jiffies = jiffies; 1691 1692 mutex_lock(&pf->vfs.table_lock); 1693 ice_for_each_vf(pf, bkt, vf) { 1694 /* only print Rx MDD event message if there are new events */ 1695 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 1696 vf->mdd_rx_events.last_printed = 1697 vf->mdd_rx_events.count; 1698 ice_print_vf_rx_mdd_event(vf); 1699 } 1700 1701 /* only print Tx MDD event message if there are new events */ 1702 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 1703 vf->mdd_tx_events.last_printed = 1704 vf->mdd_tx_events.count; 1705 1706 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 1707 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 1708 vf->dev_lan_addr); 1709 } 1710 } 1711 mutex_unlock(&pf->vfs.table_lock); 1712 } 1713 1714 /** 1715 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 1716 * @pdev: pointer to a pci_dev structure 1717 * 1718 * Called when recovering from a PF FLR to restore interrupt capability to 1719 * the VFs. 1720 */ 1721 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) 1722 { 1723 u16 vf_id; 1724 int pos; 1725 1726 if (!pci_num_vf(pdev)) 1727 return; 1728 1729 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1730 if (pos) { 1731 struct pci_dev *vfdev; 1732 1733 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, 1734 &vf_id); 1735 vfdev = pci_get_device(pdev->vendor, vf_id, NULL); 1736 while (vfdev) { 1737 if (vfdev->is_virtfn && vfdev->physfn == pdev) 1738 pci_restore_msi_state(vfdev); 1739 vfdev = pci_get_device(pdev->vendor, vf_id, 1740 vfdev); 1741 } 1742 } 1743 } 1744