1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice_base.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_flow.h"
11 #include "ice_eswitch.h"
12 #include "ice_virtchnl_allowlist.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_vf_vsi_vlan_ops.h"
15 #include "ice_vlan.h"
16
17 /**
18 * ice_free_vf_entries - Free all VF entries from the hash table
19 * @pf: pointer to the PF structure
20 *
21 * Iterate over the VF hash table, removing and releasing all VF entries.
22 * Called during VF teardown or as cleanup during failed VF initialization.
23 */
ice_free_vf_entries(struct ice_pf * pf)24 static void ice_free_vf_entries(struct ice_pf *pf)
25 {
26 struct ice_vfs *vfs = &pf->vfs;
27 struct hlist_node *tmp;
28 struct ice_vf *vf;
29 unsigned int bkt;
30
31 /* Remove all VFs from the hash table and release their main
32 * reference. Once all references to the VF are dropped, ice_put_vf()
33 * will call ice_release_vf which will remove the VF memory.
34 */
35 lockdep_assert_held(&vfs->table_lock);
36
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38 hash_del_rcu(&vf->entry);
39 ice_deinitialize_vf_entry(vf);
40 ice_put_vf(vf);
41 }
42 }
43
44 /**
45 * ice_free_vf_res - Free a VF's resources
46 * @vf: pointer to the VF info
47 */
ice_free_vf_res(struct ice_vf * vf)48 static void ice_free_vf_res(struct ice_vf *vf)
49 {
50 struct ice_pf *pf = vf->pf;
51 int i, last_vector_idx;
52
53 /* First, disable VF's configuration API to prevent OS from
54 * accessing the VF's VSI after it's freed or invalidated.
55 */
56 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
57 ice_vf_fdir_exit(vf);
58 /* free VF control VSI */
59 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
60 ice_vf_ctrl_vsi_release(vf);
61
62 /* free VSI and disconnect it from the parent uplink */
63 if (vf->lan_vsi_idx != ICE_NO_VSI) {
64 ice_vf_vsi_release(vf);
65 vf->num_mac = 0;
66 }
67
68 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
69
70 /* clear VF MDD event information */
71 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
72 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
73
74 /* Disable interrupts so that VF starts in a known state */
75 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
76 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
77 ice_flush(&pf->hw);
78 }
79 /* reset some of the state variables keeping track of the resources */
80 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
81 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
82 }
83
84 /**
85 * ice_dis_vf_mappings
86 * @vf: pointer to the VF structure
87 */
ice_dis_vf_mappings(struct ice_vf * vf)88 static void ice_dis_vf_mappings(struct ice_vf *vf)
89 {
90 struct ice_pf *pf = vf->pf;
91 struct ice_vsi *vsi;
92 struct device *dev;
93 int first, last, v;
94 struct ice_hw *hw;
95
96 hw = &pf->hw;
97 vsi = ice_get_vf_vsi(vf);
98 if (WARN_ON(!vsi))
99 return;
100
101 dev = ice_pf_to_dev(pf);
102 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
103 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
104
105 first = vf->first_vector_idx;
106 last = first + pf->vfs.num_msix_per - 1;
107 for (v = first; v <= last; v++) {
108 u32 reg;
109
110 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
111 GLINT_VECT2FUNC_IS_PF_M) |
112 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
113 GLINT_VECT2FUNC_PF_NUM_M));
114 wr32(hw, GLINT_VECT2FUNC(v), reg);
115 }
116
117 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
118 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
119 else
120 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
121
122 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
123 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
124 else
125 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
126 }
127
128 /**
129 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
130 * @pf: pointer to the PF structure
131 *
132 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
133 * the pf->sriov_base_vector.
134 *
135 * Returns 0 on success, and -EINVAL on error.
136 */
ice_sriov_free_msix_res(struct ice_pf * pf)137 static int ice_sriov_free_msix_res(struct ice_pf *pf)
138 {
139 if (!pf)
140 return -EINVAL;
141
142 pf->sriov_base_vector = 0;
143
144 return 0;
145 }
146
147 /**
148 * ice_free_vfs - Free all VFs
149 * @pf: pointer to the PF structure
150 */
ice_free_vfs(struct ice_pf * pf)151 void ice_free_vfs(struct ice_pf *pf)
152 {
153 struct device *dev = ice_pf_to_dev(pf);
154 struct ice_vfs *vfs = &pf->vfs;
155 struct ice_hw *hw = &pf->hw;
156 struct ice_vf *vf;
157 unsigned int bkt;
158
159 if (!ice_has_vfs(pf))
160 return;
161
162 while (test_and_set_bit(ICE_VF_DIS, pf->state))
163 usleep_range(1000, 2000);
164
165 /* Disable IOV before freeing resources. This lets any VF drivers
166 * running in the host get themselves cleaned up before we yank
167 * the carpet out from underneath their feet.
168 */
169 if (!pci_vfs_assigned(pf->pdev))
170 pci_disable_sriov(pf->pdev);
171 else
172 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
173
174 mutex_lock(&vfs->table_lock);
175
176 ice_eswitch_release(pf);
177
178 ice_for_each_vf(pf, bkt, vf) {
179 mutex_lock(&vf->cfg_lock);
180
181 ice_dis_vf_qs(vf);
182
183 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
184 /* disable VF qp mappings and set VF disable state */
185 ice_dis_vf_mappings(vf);
186 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
187 ice_free_vf_res(vf);
188 }
189
190 if (!pci_vfs_assigned(pf->pdev)) {
191 u32 reg_idx, bit_idx;
192
193 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
194 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
195 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
196 }
197
198 mutex_unlock(&vf->cfg_lock);
199 }
200
201 if (ice_sriov_free_msix_res(pf))
202 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
203
204 vfs->num_qps_per = 0;
205 ice_free_vf_entries(pf);
206
207 mutex_unlock(&vfs->table_lock);
208
209 clear_bit(ICE_VF_DIS, pf->state);
210 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
211 }
212
213 /**
214 * ice_vf_vsi_setup - Set up a VF VSI
215 * @vf: VF to setup VSI for
216 *
217 * Returns pointer to the successfully allocated VSI struct on success,
218 * otherwise returns NULL on failure.
219 */
ice_vf_vsi_setup(struct ice_vf * vf)220 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
221 {
222 struct ice_vsi_cfg_params params = {};
223 struct ice_pf *pf = vf->pf;
224 struct ice_vsi *vsi;
225
226 params.type = ICE_VSI_VF;
227 params.pi = ice_vf_get_port_info(vf);
228 params.vf = vf;
229 params.flags = ICE_VSI_FLAG_INIT;
230
231 vsi = ice_vsi_setup(pf, ¶ms);
232
233 if (!vsi) {
234 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
235 ice_vf_invalidate_vsi(vf);
236 return NULL;
237 }
238
239 vf->lan_vsi_idx = vsi->idx;
240 vf->lan_vsi_num = vsi->vsi_num;
241
242 return vsi;
243 }
244
245 /**
246 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
247 * @pf: pointer to PF structure
248 * @vf: pointer to VF that the first MSIX vector index is being calculated for
249 *
250 * This returns the first MSIX vector index in PF space that is used by this VF.
251 * This index is used when accessing PF relative registers such as
252 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
253 * This will always be the OICR index in the AVF driver so any functionality
254 * using vf->first_vector_idx for queue configuration will have to increment by
255 * 1 to avoid meddling with the OICR index.
256 */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)257 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
258 {
259 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
260 }
261
262 /**
263 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
264 * @vf: VF to enable MSIX mappings for
265 *
266 * Some of the registers need to be indexed/configured using hardware global
267 * device values and other registers need 0-based values, which represent PF
268 * based values.
269 */
ice_ena_vf_msix_mappings(struct ice_vf * vf)270 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
271 {
272 int device_based_first_msix, device_based_last_msix;
273 int pf_based_first_msix, pf_based_last_msix, v;
274 struct ice_pf *pf = vf->pf;
275 int device_based_vf_id;
276 struct ice_hw *hw;
277 u32 reg;
278
279 hw = &pf->hw;
280 pf_based_first_msix = vf->first_vector_idx;
281 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
282
283 device_based_first_msix = pf_based_first_msix +
284 pf->hw.func_caps.common_cap.msix_vector_first_id;
285 device_based_last_msix =
286 (device_based_first_msix + pf->vfs.num_msix_per) - 1;
287 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
288
289 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
290 VPINT_ALLOC_FIRST_M) |
291 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
292 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
293 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
294
295 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
296 & VPINT_ALLOC_PCI_FIRST_M) |
297 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
298 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
299 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
300
301 /* map the interrupts to its functions */
302 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
303 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
304 GLINT_VECT2FUNC_VF_NUM_M) |
305 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
306 GLINT_VECT2FUNC_PF_NUM_M));
307 wr32(hw, GLINT_VECT2FUNC(v), reg);
308 }
309
310 /* Map mailbox interrupt to VF MSI-X vector 0 */
311 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
312 }
313
314 /**
315 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
316 * @vf: VF to enable the mappings for
317 * @max_txq: max Tx queues allowed on the VF's VSI
318 * @max_rxq: max Rx queues allowed on the VF's VSI
319 */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)320 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
321 {
322 struct device *dev = ice_pf_to_dev(vf->pf);
323 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
324 struct ice_hw *hw = &vf->pf->hw;
325 u32 reg;
326
327 if (WARN_ON(!vsi))
328 return;
329
330 /* set regardless of mapping mode */
331 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
332
333 /* VF Tx queues allocation */
334 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
335 /* set the VF PF Tx queue range
336 * VFNUMQ value should be set to (number of queues - 1). A value
337 * of 0 means 1 queue and a value of 255 means 256 queues
338 */
339 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
340 VPLAN_TX_QBASE_VFFIRSTQ_M) |
341 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
342 VPLAN_TX_QBASE_VFNUMQ_M));
343 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
344 } else {
345 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
346 }
347
348 /* set regardless of mapping mode */
349 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
350
351 /* VF Rx queues allocation */
352 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
353 /* set the VF PF Rx queue range
354 * VFNUMQ value should be set to (number of queues - 1). A value
355 * of 0 means 1 queue and a value of 255 means 256 queues
356 */
357 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
358 VPLAN_RX_QBASE_VFFIRSTQ_M) |
359 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
360 VPLAN_RX_QBASE_VFNUMQ_M));
361 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
362 } else {
363 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
364 }
365 }
366
367 /**
368 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
369 * @vf: pointer to the VF structure
370 */
ice_ena_vf_mappings(struct ice_vf * vf)371 static void ice_ena_vf_mappings(struct ice_vf *vf)
372 {
373 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
374
375 if (WARN_ON(!vsi))
376 return;
377
378 ice_ena_vf_msix_mappings(vf);
379 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
380 }
381
382 /**
383 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
384 * @vf: VF to calculate the register index for
385 * @q_vector: a q_vector associated to the VF
386 */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)387 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
388 {
389 struct ice_pf *pf;
390
391 if (!vf || !q_vector)
392 return -EINVAL;
393
394 pf = vf->pf;
395
396 /* always add one to account for the OICR being the first MSIX */
397 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
398 q_vector->v_idx + 1;
399 }
400
401 /**
402 * ice_sriov_set_msix_res - Set any used MSIX resources
403 * @pf: pointer to PF structure
404 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
405 *
406 * This function allows SR-IOV resources to be taken from the end of the PF's
407 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
408 * just set the pf->sriov_base_vector and return success.
409 *
410 * If there are not enough resources available, return an error. This should
411 * always be caught by ice_set_per_vf_res().
412 *
413 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
414 * in the PF's space available for SR-IOV.
415 */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)416 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
417 {
418 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
419 int vectors_used = ice_get_max_used_msix_vector(pf);
420 int sriov_base_vector;
421
422 sriov_base_vector = total_vectors - num_msix_needed;
423
424 /* make sure we only grab irq_tracker entries from the list end and
425 * that we have enough available MSIX vectors
426 */
427 if (sriov_base_vector < vectors_used)
428 return -EINVAL;
429
430 pf->sriov_base_vector = sriov_base_vector;
431
432 return 0;
433 }
434
435 /**
436 * ice_set_per_vf_res - check if vectors and queues are available
437 * @pf: pointer to the PF structure
438 * @num_vfs: the number of SR-IOV VFs being configured
439 *
440 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
441 * get more vectors and can enable more queues per VF. Note that this does not
442 * grab any vectors from the SW pool already allocated. Also note, that all
443 * vector counts include one for each VF's miscellaneous interrupt vector
444 * (i.e. OICR).
445 *
446 * Minimum VFs - 2 vectors, 1 queue pair
447 * Small VFs - 5 vectors, 4 queue pairs
448 * Medium VFs - 17 vectors, 16 queue pairs
449 *
450 * Second, determine number of queue pairs per VF by starting with a pre-defined
451 * maximum each VF supports. If this is not possible, then we adjust based on
452 * queue pairs available on the device.
453 *
454 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
455 * by each VF during VF initialization and reset.
456 */
ice_set_per_vf_res(struct ice_pf * pf,u16 num_vfs)457 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
458 {
459 int vectors_used = ice_get_max_used_msix_vector(pf);
460 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
461 int msix_avail_per_vf, msix_avail_for_sriov;
462 struct device *dev = ice_pf_to_dev(pf);
463 int err;
464
465 lockdep_assert_held(&pf->vfs.table_lock);
466
467 if (!num_vfs)
468 return -EINVAL;
469
470 /* determine MSI-X resources per VF */
471 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
472 vectors_used;
473 msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
474 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
475 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
476 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
477 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
478 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
479 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
480 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
481 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
482 } else {
483 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
484 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
485 num_vfs);
486 return -ENOSPC;
487 }
488
489 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
490 ICE_MAX_RSS_QS_PER_VF);
491 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
492 if (!avail_qs)
493 num_txq = 0;
494 else if (num_txq > avail_qs)
495 num_txq = rounddown_pow_of_two(avail_qs);
496
497 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
498 ICE_MAX_RSS_QS_PER_VF);
499 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
500 if (!avail_qs)
501 num_rxq = 0;
502 else if (num_rxq > avail_qs)
503 num_rxq = rounddown_pow_of_two(avail_qs);
504
505 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
506 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
507 ICE_MIN_QS_PER_VF, num_vfs);
508 return -ENOSPC;
509 }
510
511 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
512 if (err) {
513 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
514 num_vfs, err);
515 return err;
516 }
517
518 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
519 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
520 pf->vfs.num_msix_per = num_msix_per_vf;
521 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
522 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
523
524 return 0;
525 }
526
527 /**
528 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
529 * @vf: VF to initialize/setup the VSI for
530 *
531 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
532 * VF VSI's broadcast filter and is only used during initial VF creation.
533 */
ice_init_vf_vsi_res(struct ice_vf * vf)534 static int ice_init_vf_vsi_res(struct ice_vf *vf)
535 {
536 struct ice_pf *pf = vf->pf;
537 struct ice_vsi *vsi;
538 int err;
539
540 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
541
542 vsi = ice_vf_vsi_setup(vf);
543 if (!vsi)
544 return -ENOMEM;
545
546 err = ice_vf_init_host_cfg(vf, vsi);
547 if (err)
548 goto release_vsi;
549
550 return 0;
551
552 release_vsi:
553 ice_vf_vsi_release(vf);
554 return err;
555 }
556
557 /**
558 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
559 * @pf: PF the VFs are associated with
560 */
ice_start_vfs(struct ice_pf * pf)561 static int ice_start_vfs(struct ice_pf *pf)
562 {
563 struct ice_hw *hw = &pf->hw;
564 unsigned int bkt, it_cnt;
565 struct ice_vf *vf;
566 int retval;
567
568 lockdep_assert_held(&pf->vfs.table_lock);
569
570 it_cnt = 0;
571 ice_for_each_vf(pf, bkt, vf) {
572 vf->vf_ops->clear_reset_trigger(vf);
573
574 retval = ice_init_vf_vsi_res(vf);
575 if (retval) {
576 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
577 vf->vf_id, retval);
578 goto teardown;
579 }
580
581 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
582 ice_ena_vf_mappings(vf);
583 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
584 it_cnt++;
585 }
586
587 ice_flush(hw);
588 return 0;
589
590 teardown:
591 ice_for_each_vf(pf, bkt, vf) {
592 if (it_cnt == 0)
593 break;
594
595 ice_dis_vf_mappings(vf);
596 ice_vf_vsi_release(vf);
597 it_cnt--;
598 }
599
600 return retval;
601 }
602
603 /**
604 * ice_sriov_free_vf - Free VF memory after all references are dropped
605 * @vf: pointer to VF to free
606 *
607 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
608 * structure has been dropped.
609 */
ice_sriov_free_vf(struct ice_vf * vf)610 static void ice_sriov_free_vf(struct ice_vf *vf)
611 {
612 mutex_destroy(&vf->cfg_lock);
613
614 kfree_rcu(vf, rcu);
615 }
616
617 /**
618 * ice_sriov_clear_reset_state - clears VF Reset status register
619 * @vf: the vf to configure
620 */
ice_sriov_clear_reset_state(struct ice_vf * vf)621 static void ice_sriov_clear_reset_state(struct ice_vf *vf)
622 {
623 struct ice_hw *hw = &vf->pf->hw;
624
625 /* Clear the reset status register so that VF immediately sees that
626 * the device is resetting, even if hardware hasn't yet gotten around
627 * to clearing VFGEN_RSTAT for us.
628 */
629 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
630 }
631
632 /**
633 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
634 * @vf: the vf to configure
635 */
ice_sriov_clear_mbx_register(struct ice_vf * vf)636 static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
637 {
638 struct ice_pf *pf = vf->pf;
639
640 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
641 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
642 }
643
644 /**
645 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
646 * @vf: pointer to VF structure
647 * @is_vflr: true if reset occurred due to VFLR
648 *
649 * Trigger and cleanup after a VF reset for a SR-IOV VF.
650 */
ice_sriov_trigger_reset_register(struct ice_vf * vf,bool is_vflr)651 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
652 {
653 struct ice_pf *pf = vf->pf;
654 u32 reg, reg_idx, bit_idx;
655 unsigned int vf_abs_id, i;
656 struct device *dev;
657 struct ice_hw *hw;
658
659 dev = ice_pf_to_dev(pf);
660 hw = &pf->hw;
661 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
662
663 /* In the case of a VFLR, HW has already reset the VF and we just need
664 * to clean up. Otherwise we must first trigger the reset using the
665 * VFRTRIG register.
666 */
667 if (!is_vflr) {
668 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
669 reg |= VPGEN_VFRTRIG_VFSWR_M;
670 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
671 }
672
673 /* clear the VFLR bit in GLGEN_VFLRSTAT */
674 reg_idx = (vf_abs_id) / 32;
675 bit_idx = (vf_abs_id) % 32;
676 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
677 ice_flush(hw);
678
679 wr32(hw, PF_PCI_CIAA,
680 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
681 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
682 reg = rd32(hw, PF_PCI_CIAD);
683 /* no transactions pending so stop polling */
684 if ((reg & VF_TRANS_PENDING_M) == 0)
685 break;
686
687 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
688 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
689 }
690 }
691
692 /**
693 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
694 * @vf: pointer to VF structure
695 *
696 * Returns true when reset is successful, else returns false
697 */
ice_sriov_poll_reset_status(struct ice_vf * vf)698 static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
699 {
700 struct ice_pf *pf = vf->pf;
701 unsigned int i;
702 u32 reg;
703
704 for (i = 0; i < 10; i++) {
705 /* VF reset requires driver to first reset the VF and then
706 * poll the status register to make sure that the reset
707 * completed successfully.
708 */
709 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
710 if (reg & VPGEN_VFRSTAT_VFRD_M)
711 return true;
712
713 /* only sleep if the reset is not done */
714 usleep_range(10, 20);
715 }
716 return false;
717 }
718
719 /**
720 * ice_sriov_clear_reset_trigger - enable VF to access hardware
721 * @vf: VF to enabled hardware access for
722 */
ice_sriov_clear_reset_trigger(struct ice_vf * vf)723 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
724 {
725 struct ice_hw *hw = &vf->pf->hw;
726 u32 reg;
727
728 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
729 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
730 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
731 ice_flush(hw);
732 }
733
734 /**
735 * ice_sriov_create_vsi - Create a new VSI for a VF
736 * @vf: VF to create the VSI for
737 *
738 * This is called by ice_vf_recreate_vsi to create the new VSI after the old
739 * VSI has been released.
740 */
ice_sriov_create_vsi(struct ice_vf * vf)741 static int ice_sriov_create_vsi(struct ice_vf *vf)
742 {
743 struct ice_vsi *vsi;
744
745 vsi = ice_vf_vsi_setup(vf);
746 if (!vsi)
747 return -ENOMEM;
748
749 return 0;
750 }
751
752 /**
753 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
754 * @vf: VF to perform tasks on
755 */
ice_sriov_post_vsi_rebuild(struct ice_vf * vf)756 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
757 {
758 ice_ena_vf_mappings(vf);
759 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
760 }
761
762 static const struct ice_vf_ops ice_sriov_vf_ops = {
763 .reset_type = ICE_VF_RESET,
764 .free = ice_sriov_free_vf,
765 .clear_reset_state = ice_sriov_clear_reset_state,
766 .clear_mbx_register = ice_sriov_clear_mbx_register,
767 .trigger_reset_register = ice_sriov_trigger_reset_register,
768 .poll_reset_status = ice_sriov_poll_reset_status,
769 .clear_reset_trigger = ice_sriov_clear_reset_trigger,
770 .irq_close = NULL,
771 .create_vsi = ice_sriov_create_vsi,
772 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
773 };
774
775 /**
776 * ice_create_vf_entries - Allocate and insert VF entries
777 * @pf: pointer to the PF structure
778 * @num_vfs: the number of VFs to allocate
779 *
780 * Allocate new VF entries and insert them into the hash table. Set some
781 * basic default fields for initializing the new VFs.
782 *
783 * After this function exits, the hash table will have num_vfs entries
784 * inserted.
785 *
786 * Returns 0 on success or an integer error code on failure.
787 */
ice_create_vf_entries(struct ice_pf * pf,u16 num_vfs)788 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
789 {
790 struct ice_vfs *vfs = &pf->vfs;
791 struct ice_vf *vf;
792 u16 vf_id;
793 int err;
794
795 lockdep_assert_held(&vfs->table_lock);
796
797 for (vf_id = 0; vf_id < num_vfs; vf_id++) {
798 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
799 if (!vf) {
800 err = -ENOMEM;
801 goto err_free_entries;
802 }
803 kref_init(&vf->refcnt);
804
805 vf->pf = pf;
806 vf->vf_id = vf_id;
807
808 /* set sriov vf ops for VFs created during SRIOV flow */
809 vf->vf_ops = &ice_sriov_vf_ops;
810
811 ice_initialize_vf_entry(vf);
812
813 vf->vf_sw_id = pf->first_sw;
814
815 hash_add_rcu(vfs->table, &vf->entry, vf_id);
816 }
817
818 return 0;
819
820 err_free_entries:
821 ice_free_vf_entries(pf);
822 return err;
823 }
824
825 /**
826 * ice_ena_vfs - enable VFs so they are ready to be used
827 * @pf: pointer to the PF structure
828 * @num_vfs: number of VFs to enable
829 */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)830 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
831 {
832 struct device *dev = ice_pf_to_dev(pf);
833 struct ice_hw *hw = &pf->hw;
834 int ret;
835
836 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
837 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
838 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
839 set_bit(ICE_OICR_INTR_DIS, pf->state);
840 ice_flush(hw);
841
842 ret = pci_enable_sriov(pf->pdev, num_vfs);
843 if (ret)
844 goto err_unroll_intr;
845
846 mutex_lock(&pf->vfs.table_lock);
847
848 ret = ice_set_per_vf_res(pf, num_vfs);
849 if (ret) {
850 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
851 num_vfs, ret);
852 goto err_unroll_sriov;
853 }
854
855 ret = ice_create_vf_entries(pf, num_vfs);
856 if (ret) {
857 dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
858 num_vfs);
859 goto err_unroll_sriov;
860 }
861
862 ret = ice_start_vfs(pf);
863 if (ret) {
864 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
865 ret = -EAGAIN;
866 goto err_unroll_vf_entries;
867 }
868
869 clear_bit(ICE_VF_DIS, pf->state);
870
871 ret = ice_eswitch_configure(pf);
872 if (ret) {
873 dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
874 goto err_unroll_sriov;
875 }
876
877 /* rearm global interrupts */
878 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
879 ice_irq_dynamic_ena(hw, NULL, NULL);
880
881 mutex_unlock(&pf->vfs.table_lock);
882
883 return 0;
884
885 err_unroll_vf_entries:
886 ice_free_vf_entries(pf);
887 err_unroll_sriov:
888 mutex_unlock(&pf->vfs.table_lock);
889 pci_disable_sriov(pf->pdev);
890 err_unroll_intr:
891 /* rearm interrupts here */
892 ice_irq_dynamic_ena(hw, NULL, NULL);
893 clear_bit(ICE_OICR_INTR_DIS, pf->state);
894 return ret;
895 }
896
897 /**
898 * ice_pci_sriov_ena - Enable or change number of VFs
899 * @pf: pointer to the PF structure
900 * @num_vfs: number of VFs to allocate
901 *
902 * Returns 0 on success and negative on failure
903 */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)904 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
905 {
906 struct device *dev = ice_pf_to_dev(pf);
907 int err;
908
909 if (!num_vfs) {
910 ice_free_vfs(pf);
911 return 0;
912 }
913
914 if (num_vfs > pf->vfs.num_supported) {
915 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
916 num_vfs, pf->vfs.num_supported);
917 return -EOPNOTSUPP;
918 }
919
920 dev_info(dev, "Enabling %d VFs\n", num_vfs);
921 err = ice_ena_vfs(pf, num_vfs);
922 if (err) {
923 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
924 return err;
925 }
926
927 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
928 return 0;
929 }
930
931 /**
932 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
933 * @pf: PF to enabled SR-IOV on
934 */
ice_check_sriov_allowed(struct ice_pf * pf)935 static int ice_check_sriov_allowed(struct ice_pf *pf)
936 {
937 struct device *dev = ice_pf_to_dev(pf);
938
939 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
940 dev_err(dev, "This device is not capable of SR-IOV\n");
941 return -EOPNOTSUPP;
942 }
943
944 if (ice_is_safe_mode(pf)) {
945 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
946 return -EOPNOTSUPP;
947 }
948
949 if (!ice_pf_state_is_nominal(pf)) {
950 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
951 return -EBUSY;
952 }
953
954 return 0;
955 }
956
957 /**
958 * ice_sriov_configure - Enable or change number of VFs via sysfs
959 * @pdev: pointer to a pci_dev structure
960 * @num_vfs: number of VFs to allocate or 0 to free VFs
961 *
962 * This function is called when the user updates the number of VFs in sysfs. On
963 * success return whatever num_vfs was set to by the caller. Return negative on
964 * failure.
965 */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)966 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
967 {
968 struct ice_pf *pf = pci_get_drvdata(pdev);
969 struct device *dev = ice_pf_to_dev(pf);
970 int err;
971
972 err = ice_check_sriov_allowed(pf);
973 if (err)
974 return err;
975
976 if (!num_vfs) {
977 if (!pci_vfs_assigned(pdev)) {
978 ice_free_vfs(pf);
979 return 0;
980 }
981
982 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
983 return -EBUSY;
984 }
985
986 err = ice_pci_sriov_ena(pf, num_vfs);
987 if (err)
988 return err;
989
990 return num_vfs;
991 }
992
993 /**
994 * ice_process_vflr_event - Free VF resources via IRQ calls
995 * @pf: pointer to the PF structure
996 *
997 * called from the VFLR IRQ handler to
998 * free up VF resources and state variables
999 */
ice_process_vflr_event(struct ice_pf * pf)1000 void ice_process_vflr_event(struct ice_pf *pf)
1001 {
1002 struct ice_hw *hw = &pf->hw;
1003 struct ice_vf *vf;
1004 unsigned int bkt;
1005 u32 reg;
1006
1007 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1008 !ice_has_vfs(pf))
1009 return;
1010
1011 mutex_lock(&pf->vfs.table_lock);
1012 ice_for_each_vf(pf, bkt, vf) {
1013 u32 reg_idx, bit_idx;
1014
1015 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1016 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1017 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1018 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1019 if (reg & BIT(bit_idx))
1020 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1021 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1022 }
1023 mutex_unlock(&pf->vfs.table_lock);
1024 }
1025
1026 /**
1027 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1028 * @pf: PF used to index all VFs
1029 * @pfq: queue index relative to the PF's function space
1030 *
1031 * If no VF is found who owns the pfq then return NULL, otherwise return a
1032 * pointer to the VF who owns the pfq
1033 *
1034 * If this function returns non-NULL, it acquires a reference count of the VF
1035 * structure. The caller is responsible for calling ice_put_vf() to drop this
1036 * reference.
1037 */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1038 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1039 {
1040 struct ice_vf *vf;
1041 unsigned int bkt;
1042
1043 rcu_read_lock();
1044 ice_for_each_vf_rcu(pf, bkt, vf) {
1045 struct ice_vsi *vsi;
1046 u16 rxq_idx;
1047
1048 vsi = ice_get_vf_vsi(vf);
1049 if (!vsi)
1050 continue;
1051
1052 ice_for_each_rxq(vsi, rxq_idx)
1053 if (vsi->rxq_map[rxq_idx] == pfq) {
1054 struct ice_vf *found;
1055
1056 if (kref_get_unless_zero(&vf->refcnt))
1057 found = vf;
1058 else
1059 found = NULL;
1060 rcu_read_unlock();
1061 return found;
1062 }
1063 }
1064 rcu_read_unlock();
1065
1066 return NULL;
1067 }
1068
1069 /**
1070 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1071 * @pf: PF used for conversion
1072 * @globalq: global queue index used to convert to PF space queue index
1073 */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1074 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1075 {
1076 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1077 }
1078
1079 /**
1080 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1081 * @pf: PF that the LAN overflow event happened on
1082 * @event: structure holding the event information for the LAN overflow event
1083 *
1084 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1085 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1086 * reset on the offending VF.
1087 */
1088 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1089 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1090 {
1091 u32 gldcb_rtctq, queue;
1092 struct ice_vf *vf;
1093
1094 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1095 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1096
1097 /* event returns device global Rx queue number */
1098 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1099 GLDCB_RTCTQ_RXQNUM_S;
1100
1101 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1102 if (!vf)
1103 return;
1104
1105 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1106 ice_put_vf(vf);
1107 }
1108
1109 /**
1110 * ice_set_vf_spoofchk
1111 * @netdev: network interface device structure
1112 * @vf_id: VF identifier
1113 * @ena: flag to enable or disable feature
1114 *
1115 * Enable or disable VF spoof checking
1116 */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)1117 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1118 {
1119 struct ice_netdev_priv *np = netdev_priv(netdev);
1120 struct ice_pf *pf = np->vsi->back;
1121 struct ice_vsi *vf_vsi;
1122 struct device *dev;
1123 struct ice_vf *vf;
1124 int ret;
1125
1126 dev = ice_pf_to_dev(pf);
1127
1128 vf = ice_get_vf_by_id(pf, vf_id);
1129 if (!vf)
1130 return -EINVAL;
1131
1132 ret = ice_check_vf_ready_for_cfg(vf);
1133 if (ret)
1134 goto out_put_vf;
1135
1136 vf_vsi = ice_get_vf_vsi(vf);
1137 if (!vf_vsi) {
1138 netdev_err(netdev, "VSI %d for VF %d is null\n",
1139 vf->lan_vsi_idx, vf->vf_id);
1140 ret = -EINVAL;
1141 goto out_put_vf;
1142 }
1143
1144 if (vf_vsi->type != ICE_VSI_VF) {
1145 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1146 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1147 ret = -ENODEV;
1148 goto out_put_vf;
1149 }
1150
1151 if (ena == vf->spoofchk) {
1152 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1153 ret = 0;
1154 goto out_put_vf;
1155 }
1156
1157 ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1158 if (ret)
1159 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1160 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1161 else
1162 vf->spoofchk = ena;
1163
1164 out_put_vf:
1165 ice_put_vf(vf);
1166 return ret;
1167 }
1168
1169 /**
1170 * ice_get_vf_cfg
1171 * @netdev: network interface device structure
1172 * @vf_id: VF identifier
1173 * @ivi: VF configuration structure
1174 *
1175 * return VF configuration
1176 */
1177 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)1178 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1179 {
1180 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1181 struct ice_vf *vf;
1182 int ret;
1183
1184 vf = ice_get_vf_by_id(pf, vf_id);
1185 if (!vf)
1186 return -EINVAL;
1187
1188 ret = ice_check_vf_ready_for_cfg(vf);
1189 if (ret)
1190 goto out_put_vf;
1191
1192 ivi->vf = vf_id;
1193 ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1194
1195 /* VF configuration for VLAN and applicable QoS */
1196 ivi->vlan = ice_vf_get_port_vlan_id(vf);
1197 ivi->qos = ice_vf_get_port_vlan_prio(vf);
1198 if (ice_vf_is_port_vlan_ena(vf))
1199 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1200
1201 ivi->trusted = vf->trusted;
1202 ivi->spoofchk = vf->spoofchk;
1203 if (!vf->link_forced)
1204 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1205 else if (vf->link_up)
1206 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1207 else
1208 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1209 ivi->max_tx_rate = vf->max_tx_rate;
1210 ivi->min_tx_rate = vf->min_tx_rate;
1211
1212 out_put_vf:
1213 ice_put_vf(vf);
1214 return ret;
1215 }
1216
1217 /**
1218 * ice_set_vf_mac
1219 * @netdev: network interface device structure
1220 * @vf_id: VF identifier
1221 * @mac: MAC address
1222 *
1223 * program VF MAC address
1224 */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)1225 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1226 {
1227 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1228 struct ice_vf *vf;
1229 int ret;
1230
1231 if (is_multicast_ether_addr(mac)) {
1232 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
1233 return -EINVAL;
1234 }
1235
1236 vf = ice_get_vf_by_id(pf, vf_id);
1237 if (!vf)
1238 return -EINVAL;
1239
1240 /* nothing left to do, unicast MAC already set */
1241 if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1242 ether_addr_equal(vf->hw_lan_addr, mac)) {
1243 ret = 0;
1244 goto out_put_vf;
1245 }
1246
1247 ret = ice_check_vf_ready_for_cfg(vf);
1248 if (ret)
1249 goto out_put_vf;
1250
1251 mutex_lock(&vf->cfg_lock);
1252
1253 /* VF is notified of its new MAC via the PF's response to the
1254 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1255 */
1256 ether_addr_copy(vf->dev_lan_addr, mac);
1257 ether_addr_copy(vf->hw_lan_addr, mac);
1258 if (is_zero_ether_addr(mac)) {
1259 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1260 vf->pf_set_mac = false;
1261 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1262 vf->vf_id);
1263 } else {
1264 /* PF will add MAC rule for the VF */
1265 vf->pf_set_mac = true;
1266 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1267 mac, vf_id);
1268 }
1269
1270 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1271 mutex_unlock(&vf->cfg_lock);
1272
1273 out_put_vf:
1274 ice_put_vf(vf);
1275 return ret;
1276 }
1277
1278 /**
1279 * ice_set_vf_trust
1280 * @netdev: network interface device structure
1281 * @vf_id: VF identifier
1282 * @trusted: Boolean value to enable/disable trusted VF
1283 *
1284 * Enable or disable a given VF as trusted
1285 */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)1286 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1287 {
1288 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1289 struct ice_vf *vf;
1290 int ret;
1291
1292 vf = ice_get_vf_by_id(pf, vf_id);
1293 if (!vf)
1294 return -EINVAL;
1295
1296 if (ice_is_eswitch_mode_switchdev(pf)) {
1297 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1298 return -EOPNOTSUPP;
1299 }
1300
1301 ret = ice_check_vf_ready_for_cfg(vf);
1302 if (ret)
1303 goto out_put_vf;
1304
1305 /* Check if already trusted */
1306 if (trusted == vf->trusted) {
1307 ret = 0;
1308 goto out_put_vf;
1309 }
1310
1311 mutex_lock(&vf->cfg_lock);
1312
1313 vf->trusted = trusted;
1314 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1315 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1316 vf_id, trusted ? "" : "un");
1317
1318 mutex_unlock(&vf->cfg_lock);
1319
1320 out_put_vf:
1321 ice_put_vf(vf);
1322 return ret;
1323 }
1324
1325 /**
1326 * ice_set_vf_link_state
1327 * @netdev: network interface device structure
1328 * @vf_id: VF identifier
1329 * @link_state: required link state
1330 *
1331 * Set VF's link state, irrespective of physical link state status
1332 */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)1333 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1334 {
1335 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1336 struct ice_vf *vf;
1337 int ret;
1338
1339 vf = ice_get_vf_by_id(pf, vf_id);
1340 if (!vf)
1341 return -EINVAL;
1342
1343 ret = ice_check_vf_ready_for_cfg(vf);
1344 if (ret)
1345 goto out_put_vf;
1346
1347 switch (link_state) {
1348 case IFLA_VF_LINK_STATE_AUTO:
1349 vf->link_forced = false;
1350 break;
1351 case IFLA_VF_LINK_STATE_ENABLE:
1352 vf->link_forced = true;
1353 vf->link_up = true;
1354 break;
1355 case IFLA_VF_LINK_STATE_DISABLE:
1356 vf->link_forced = true;
1357 vf->link_up = false;
1358 break;
1359 default:
1360 ret = -EINVAL;
1361 goto out_put_vf;
1362 }
1363
1364 ice_vc_notify_vf_link_state(vf);
1365
1366 out_put_vf:
1367 ice_put_vf(vf);
1368 return ret;
1369 }
1370
1371 /**
1372 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1373 * @pf: PF associated with VFs
1374 */
ice_calc_all_vfs_min_tx_rate(struct ice_pf * pf)1375 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1376 {
1377 struct ice_vf *vf;
1378 unsigned int bkt;
1379 int rate = 0;
1380
1381 rcu_read_lock();
1382 ice_for_each_vf_rcu(pf, bkt, vf)
1383 rate += vf->min_tx_rate;
1384 rcu_read_unlock();
1385
1386 return rate;
1387 }
1388
1389 /**
1390 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1391 * @vf: VF trying to configure min_tx_rate
1392 * @min_tx_rate: min Tx rate in Mbps
1393 *
1394 * Check if the min_tx_rate being passed in will cause oversubscription of total
1395 * min_tx_rate based on the current link speed and all other VFs configured
1396 * min_tx_rate
1397 *
1398 * Return true if the passed min_tx_rate would cause oversubscription, else
1399 * return false
1400 */
1401 static bool
ice_min_tx_rate_oversubscribed(struct ice_vf * vf,int min_tx_rate)1402 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1403 {
1404 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1405 int all_vfs_min_tx_rate;
1406 int link_speed_mbps;
1407
1408 if (WARN_ON(!vsi))
1409 return false;
1410
1411 link_speed_mbps = ice_get_link_speed_mbps(vsi);
1412 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1413
1414 /* this VF's previous rate is being overwritten */
1415 all_vfs_min_tx_rate -= vf->min_tx_rate;
1416
1417 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1418 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1419 min_tx_rate, vf->vf_id,
1420 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1421 link_speed_mbps);
1422 return true;
1423 }
1424
1425 return false;
1426 }
1427
1428 /**
1429 * ice_set_vf_bw - set min/max VF bandwidth
1430 * @netdev: network interface device structure
1431 * @vf_id: VF identifier
1432 * @min_tx_rate: Minimum Tx rate in Mbps
1433 * @max_tx_rate: Maximum Tx rate in Mbps
1434 */
1435 int
ice_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)1436 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1437 int max_tx_rate)
1438 {
1439 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1440 struct ice_vsi *vsi;
1441 struct device *dev;
1442 struct ice_vf *vf;
1443 int ret;
1444
1445 dev = ice_pf_to_dev(pf);
1446
1447 vf = ice_get_vf_by_id(pf, vf_id);
1448 if (!vf)
1449 return -EINVAL;
1450
1451 ret = ice_check_vf_ready_for_cfg(vf);
1452 if (ret)
1453 goto out_put_vf;
1454
1455 vsi = ice_get_vf_vsi(vf);
1456 if (!vsi) {
1457 ret = -EINVAL;
1458 goto out_put_vf;
1459 }
1460
1461 if (min_tx_rate && ice_is_dcb_active(pf)) {
1462 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1463 ret = -EOPNOTSUPP;
1464 goto out_put_vf;
1465 }
1466
1467 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1468 ret = -EINVAL;
1469 goto out_put_vf;
1470 }
1471
1472 if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1473 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1474 if (ret) {
1475 dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1476 vf->vf_id);
1477 goto out_put_vf;
1478 }
1479
1480 vf->min_tx_rate = min_tx_rate;
1481 }
1482
1483 if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1484 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1485 if (ret) {
1486 dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1487 vf->vf_id);
1488 goto out_put_vf;
1489 }
1490
1491 vf->max_tx_rate = max_tx_rate;
1492 }
1493
1494 out_put_vf:
1495 ice_put_vf(vf);
1496 return ret;
1497 }
1498
1499 /**
1500 * ice_get_vf_stats - populate some stats for the VF
1501 * @netdev: the netdev of the PF
1502 * @vf_id: the host OS identifier (0-255)
1503 * @vf_stats: pointer to the OS memory to be initialized
1504 */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)1505 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1506 struct ifla_vf_stats *vf_stats)
1507 {
1508 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1509 struct ice_eth_stats *stats;
1510 struct ice_vsi *vsi;
1511 struct ice_vf *vf;
1512 int ret;
1513
1514 vf = ice_get_vf_by_id(pf, vf_id);
1515 if (!vf)
1516 return -EINVAL;
1517
1518 ret = ice_check_vf_ready_for_cfg(vf);
1519 if (ret)
1520 goto out_put_vf;
1521
1522 vsi = ice_get_vf_vsi(vf);
1523 if (!vsi) {
1524 ret = -EINVAL;
1525 goto out_put_vf;
1526 }
1527
1528 ice_update_eth_stats(vsi);
1529 stats = &vsi->eth_stats;
1530
1531 memset(vf_stats, 0, sizeof(*vf_stats));
1532
1533 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1534 stats->rx_multicast;
1535 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1536 stats->tx_multicast;
1537 vf_stats->rx_bytes = stats->rx_bytes;
1538 vf_stats->tx_bytes = stats->tx_bytes;
1539 vf_stats->broadcast = stats->rx_broadcast;
1540 vf_stats->multicast = stats->rx_multicast;
1541 vf_stats->rx_dropped = stats->rx_discards;
1542 vf_stats->tx_dropped = stats->tx_discards;
1543
1544 out_put_vf:
1545 ice_put_vf(vf);
1546 return ret;
1547 }
1548
1549 /**
1550 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1551 * @hw: hardware structure used to check the VLAN mode
1552 * @vlan_proto: VLAN TPID being checked
1553 *
1554 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1555 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1556 * Mode (SVM), then only ETH_P_8021Q is supported.
1557 */
1558 static bool
ice_is_supported_port_vlan_proto(struct ice_hw * hw,u16 vlan_proto)1559 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1560 {
1561 bool is_supported = false;
1562
1563 switch (vlan_proto) {
1564 case ETH_P_8021Q:
1565 is_supported = true;
1566 break;
1567 case ETH_P_8021AD:
1568 if (ice_is_dvm_ena(hw))
1569 is_supported = true;
1570 break;
1571 }
1572
1573 return is_supported;
1574 }
1575
1576 /**
1577 * ice_set_vf_port_vlan
1578 * @netdev: network interface device structure
1579 * @vf_id: VF identifier
1580 * @vlan_id: VLAN ID being set
1581 * @qos: priority setting
1582 * @vlan_proto: VLAN protocol
1583 *
1584 * program VF Port VLAN ID and/or QoS
1585 */
1586 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)1587 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1588 __be16 vlan_proto)
1589 {
1590 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1591 u16 local_vlan_proto = ntohs(vlan_proto);
1592 struct device *dev;
1593 struct ice_vf *vf;
1594 int ret;
1595
1596 dev = ice_pf_to_dev(pf);
1597
1598 if (vlan_id >= VLAN_N_VID || qos > 7) {
1599 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1600 vf_id, vlan_id, qos);
1601 return -EINVAL;
1602 }
1603
1604 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1605 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1606 local_vlan_proto);
1607 return -EPROTONOSUPPORT;
1608 }
1609
1610 vf = ice_get_vf_by_id(pf, vf_id);
1611 if (!vf)
1612 return -EINVAL;
1613
1614 ret = ice_check_vf_ready_for_cfg(vf);
1615 if (ret)
1616 goto out_put_vf;
1617
1618 if (ice_vf_get_port_vlan_prio(vf) == qos &&
1619 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1620 ice_vf_get_port_vlan_id(vf) == vlan_id) {
1621 /* duplicate request, so just return success */
1622 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1623 vlan_id, qos, local_vlan_proto);
1624 ret = 0;
1625 goto out_put_vf;
1626 }
1627
1628 mutex_lock(&vf->cfg_lock);
1629
1630 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1631 if (ice_vf_is_port_vlan_ena(vf))
1632 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1633 vlan_id, qos, local_vlan_proto, vf_id);
1634 else
1635 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1636
1637 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1638 mutex_unlock(&vf->cfg_lock);
1639
1640 out_put_vf:
1641 ice_put_vf(vf);
1642 return ret;
1643 }
1644
1645 /**
1646 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1647 * @vf: pointer to the VF structure
1648 */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)1649 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1650 {
1651 struct ice_pf *pf = vf->pf;
1652 struct device *dev;
1653
1654 dev = ice_pf_to_dev(pf);
1655
1656 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1657 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1658 vf->dev_lan_addr,
1659 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1660 ? "on" : "off");
1661 }
1662
1663 /**
1664 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1665 * @pf: pointer to the PF structure
1666 *
1667 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1668 */
ice_print_vfs_mdd_events(struct ice_pf * pf)1669 void ice_print_vfs_mdd_events(struct ice_pf *pf)
1670 {
1671 struct device *dev = ice_pf_to_dev(pf);
1672 struct ice_hw *hw = &pf->hw;
1673 struct ice_vf *vf;
1674 unsigned int bkt;
1675
1676 /* check that there are pending MDD events to print */
1677 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1678 return;
1679
1680 /* VF MDD event logs are rate limited to one second intervals */
1681 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1682 return;
1683
1684 pf->vfs.last_printed_mdd_jiffies = jiffies;
1685
1686 mutex_lock(&pf->vfs.table_lock);
1687 ice_for_each_vf(pf, bkt, vf) {
1688 /* only print Rx MDD event message if there are new events */
1689 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1690 vf->mdd_rx_events.last_printed =
1691 vf->mdd_rx_events.count;
1692 ice_print_vf_rx_mdd_event(vf);
1693 }
1694
1695 /* only print Tx MDD event message if there are new events */
1696 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1697 vf->mdd_tx_events.last_printed =
1698 vf->mdd_tx_events.count;
1699
1700 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
1701 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
1702 vf->dev_lan_addr);
1703 }
1704 }
1705 mutex_unlock(&pf->vfs.table_lock);
1706 }
1707
1708 /**
1709 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1710 * @pdev: pointer to a pci_dev structure
1711 *
1712 * Called when recovering from a PF FLR to restore interrupt capability to
1713 * the VFs.
1714 */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)1715 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
1716 {
1717 u16 vf_id;
1718 int pos;
1719
1720 if (!pci_num_vf(pdev))
1721 return;
1722
1723 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1724 if (pos) {
1725 struct pci_dev *vfdev;
1726
1727 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
1728 &vf_id);
1729 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
1730 while (vfdev) {
1731 if (vfdev->is_virtfn && vfdev->physfn == pdev)
1732 pci_restore_msi_state(vfdev);
1733 vfdev = pci_get_device(pdev->vendor, vf_id,
1734 vfdev);
1735 }
1736 }
1737 }
1738