xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_vf_lib.c (revision 0f9b4c3ca5fdf3e177266ef994071b1a03f07318)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9 
10 /* Public functions which may be accessed by all driver files */
11 
12 /**
13  * ice_get_vf_by_id - Get pointer to VF by ID
14  * @pf: the PF private structure
15  * @vf_id: the VF ID to locate
16  *
17  * Locate and return a pointer to the VF structure associated with a given ID.
18  * Returns NULL if the ID does not have a valid VF structure associated with
19  * it.
20  *
21  * This function takes a reference to the VF, which must be released by
22  * calling ice_put_vf() once the caller is finished accessing the VF structure
23  * returned.
24  */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 	struct ice_vf *vf;
28 
29 	rcu_read_lock();
30 	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 		if (vf->vf_id == vf_id) {
32 			struct ice_vf *found;
33 
34 			if (kref_get_unless_zero(&vf->refcnt))
35 				found = vf;
36 			else
37 				found = NULL;
38 
39 			rcu_read_unlock();
40 			return found;
41 		}
42 	}
43 	rcu_read_unlock();
44 
45 	return NULL;
46 }
47 
48 /**
49  * ice_release_vf - Release VF associated with a refcount
50  * @ref: the kref decremented to zero
51  *
52  * Callback function for kref_put to release a VF once its reference count has
53  * hit zero.
54  */
ice_release_vf(struct kref * ref)55 static void ice_release_vf(struct kref *ref)
56 {
57 	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58 
59 	vf->vf_ops->free(vf);
60 }
61 
62 /**
63  * ice_put_vf - Release a reference to a VF
64  * @vf: the VF structure to decrease reference count on
65  *
66  * Decrease the reference count for a VF, and free the entry if it is no
67  * longer in use.
68  *
69  * This must be called after ice_get_vf_by_id() once the reference to the VF
70  * structure is no longer used. Otherwise, the VF structure will never be
71  * freed.
72  */
ice_put_vf(struct ice_vf * vf)73 void ice_put_vf(struct ice_vf *vf)
74 {
75 	kref_put(&vf->refcnt, ice_release_vf);
76 }
77 
78 /**
79  * ice_has_vfs - Return true if the PF has any associated VFs
80  * @pf: the PF private structure
81  *
82  * Return whether or not the PF has any allocated VFs.
83  *
84  * Note that this function only guarantees that there are no VFs at the point
85  * of calling it. It does not guarantee that no more VFs will be added.
86  */
ice_has_vfs(struct ice_pf * pf)87 bool ice_has_vfs(struct ice_pf *pf)
88 {
89 	/* A simple check that the hash table is not empty does not require
90 	 * the mutex or rcu_read_lock.
91 	 */
92 	return !hash_empty(pf->vfs.table);
93 }
94 
95 /**
96  * ice_get_num_vfs - Get number of allocated VFs
97  * @pf: the PF private structure
98  *
99  * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
100  * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
101  * the output of this function.
102  */
ice_get_num_vfs(struct ice_pf * pf)103 u16 ice_get_num_vfs(struct ice_pf *pf)
104 {
105 	struct ice_vf *vf;
106 	unsigned int bkt;
107 	u16 num_vfs = 0;
108 
109 	rcu_read_lock();
110 	ice_for_each_vf_rcu(pf, bkt, vf)
111 		num_vfs++;
112 	rcu_read_unlock();
113 
114 	return num_vfs;
115 }
116 
117 /**
118  * ice_get_vf_vsi - get VF's VSI based on the stored index
119  * @vf: VF used to get VSI
120  */
ice_get_vf_vsi(struct ice_vf * vf)121 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
122 {
123 	if (vf->lan_vsi_idx == ICE_NO_VSI)
124 		return NULL;
125 
126 	return vf->pf->vsi[vf->lan_vsi_idx];
127 }
128 
129 /**
130  * ice_is_vf_disabled
131  * @vf: pointer to the VF info
132  *
133  * If the PF has been disabled, there is no need resetting VF until PF is
134  * active again. Similarly, if the VF has been disabled, this means something
135  * else is resetting the VF, so we shouldn't continue.
136  *
137  * Returns true if the caller should consider the VF as disabled whether
138  * because that single VF is explicitly disabled or because the PF is
139  * currently disabled.
140  */
ice_is_vf_disabled(struct ice_vf * vf)141 bool ice_is_vf_disabled(struct ice_vf *vf)
142 {
143 	struct ice_pf *pf = vf->pf;
144 
145 	return (test_bit(ICE_VF_DIS, pf->state) ||
146 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
147 }
148 
149 /**
150  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
151  * @vf: The VF being resseting
152  *
153  * The max poll time is about ~800ms, which is about the maximum time it takes
154  * for a VF to be reset and/or a VF driver to be removed.
155  */
ice_wait_on_vf_reset(struct ice_vf * vf)156 static void ice_wait_on_vf_reset(struct ice_vf *vf)
157 {
158 	int i;
159 
160 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
161 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
162 			break;
163 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
164 	}
165 }
166 
167 /**
168  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
169  * @vf: VF to check if it's ready to be configured/queried
170  *
171  * The purpose of this function is to make sure the VF is not in reset, not
172  * disabled, and initialized so it can be configured and/or queried by a host
173  * administrator.
174  */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)175 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
176 {
177 	ice_wait_on_vf_reset(vf);
178 
179 	if (ice_is_vf_disabled(vf))
180 		return -EINVAL;
181 
182 	if (ice_check_vf_init(vf))
183 		return -EBUSY;
184 
185 	return 0;
186 }
187 
188 /**
189  * ice_trigger_vf_reset - Reset a VF on HW
190  * @vf: pointer to the VF structure
191  * @is_vflr: true if VFLR was issued, false if not
192  * @is_pfr: true if the reset was triggered due to a previous PFR
193  *
194  * Trigger hardware to start a reset for a particular VF. Expects the caller
195  * to wait the proper amount of time to allow hardware to reset the VF before
196  * it cleans up and restores VF functionality.
197  */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)198 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
199 {
200 	/* Inform VF that it is no longer active, as a warning */
201 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
202 
203 	/* Disable VF's configuration API during reset. The flag is re-enabled
204 	 * when it's safe again to access VF's VSI.
205 	 */
206 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
207 
208 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
209 	 * needs to clear them in the case of VFR/VFLR. If this is done for
210 	 * PFR, it can mess up VF resets because the VF driver may already
211 	 * have started cleanup by the time we get here.
212 	 */
213 	if (!is_pfr)
214 		vf->vf_ops->clear_mbx_register(vf);
215 
216 	vf->vf_ops->trigger_reset_register(vf, is_vflr);
217 }
218 
ice_vf_clear_counters(struct ice_vf * vf)219 static void ice_vf_clear_counters(struct ice_vf *vf)
220 {
221 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
222 
223 	if (vsi)
224 		vsi->num_vlan = 0;
225 
226 	vf->num_mac = 0;
227 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229 }
230 
231 /**
232  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
233  * @vf: VF to perform pre VSI rebuild tasks
234  *
235  * These tasks are items that don't need to be amortized since they are most
236  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
237  */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)238 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
239 {
240 	/* Close any IRQ mapping now */
241 	if (vf->vf_ops->irq_close)
242 		vf->vf_ops->irq_close(vf);
243 
244 	ice_vf_clear_counters(vf);
245 	vf->vf_ops->clear_reset_trigger(vf);
246 }
247 
248 /**
249  * ice_vf_recreate_vsi - Release and re-create the VF's VSI
250  * @vf: VF to recreate the VSI for
251  *
252  * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
253  * VF configuration change, etc)
254  *
255  * It releases and then re-creates a new VSI.
256  */
ice_vf_recreate_vsi(struct ice_vf * vf)257 static int ice_vf_recreate_vsi(struct ice_vf *vf)
258 {
259 	struct ice_pf *pf = vf->pf;
260 	int err;
261 
262 	ice_vf_vsi_release(vf);
263 
264 	err = vf->vf_ops->create_vsi(vf);
265 	if (err) {
266 		dev_err(ice_pf_to_dev(pf),
267 			"Failed to recreate the VF%u's VSI, error %d\n",
268 			vf->vf_id, err);
269 		return err;
270 	}
271 
272 	return 0;
273 }
274 
275 /**
276  * ice_vf_rebuild_vsi - rebuild the VF's VSI
277  * @vf: VF to rebuild the VSI for
278  *
279  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
280  * host, PFR, CORER, etc.).
281  *
282  * It reprograms the VSI configuration back into hardware.
283  */
ice_vf_rebuild_vsi(struct ice_vf * vf)284 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
285 {
286 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
287 	struct ice_pf *pf = vf->pf;
288 
289 	if (WARN_ON(!vsi))
290 		return -EINVAL;
291 
292 	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
293 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
294 			vf->vf_id);
295 		return -EIO;
296 	}
297 	/* vsi->idx will remain the same in this case so don't update
298 	 * vf->lan_vsi_idx
299 	 */
300 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
301 	vf->lan_vsi_num = vsi->vsi_num;
302 
303 	return 0;
304 }
305 
306 /**
307  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
308  * @vf: VF to add MAC filters for
309  * @vsi: Pointer to VSI
310  *
311  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
312  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
313  */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf,struct ice_vsi * vsi)314 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
315 {
316 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
317 	struct device *dev = ice_pf_to_dev(vf->pf);
318 	int err;
319 
320 	if (ice_vf_is_port_vlan_ena(vf)) {
321 		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
322 		if (err) {
323 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
324 				vf->vf_id, err);
325 			return err;
326 		}
327 
328 		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
329 	} else {
330 		err = ice_vsi_add_vlan_zero(vsi);
331 	}
332 
333 	if (err) {
334 		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
335 			ice_vf_is_port_vlan_ena(vf) ?
336 			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
337 		return err;
338 	}
339 
340 	err = vlan_ops->ena_rx_filtering(vsi);
341 	if (err)
342 		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
343 			 vf->vf_id, vsi->idx, err);
344 
345 	return 0;
346 }
347 
348 /**
349  * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
350  * @vf: VF to re-apply the configuration for
351  *
352  * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
353  * needs to re-apply the host configured Tx rate limiting configuration.
354  */
ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf * vf)355 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
356 {
357 	struct device *dev = ice_pf_to_dev(vf->pf);
358 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
359 	int err;
360 
361 	if (WARN_ON(!vsi))
362 		return -EINVAL;
363 
364 	if (vf->min_tx_rate) {
365 		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
366 		if (err) {
367 			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
368 				vf->min_tx_rate, vf->vf_id, err);
369 			return err;
370 		}
371 	}
372 
373 	if (vf->max_tx_rate) {
374 		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
375 		if (err) {
376 			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
377 				vf->max_tx_rate, vf->vf_id, err);
378 			return err;
379 		}
380 	}
381 
382 	return 0;
383 }
384 
385 /**
386  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
387  * @vf: VF to configure trust setting for
388  */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)389 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
390 {
391 	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
392 }
393 
394 /**
395  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
396  * @vf: VF to add MAC filters for
397  *
398  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
399  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
400  */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)401 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
402 {
403 	struct device *dev = ice_pf_to_dev(vf->pf);
404 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
405 	u8 broadcast[ETH_ALEN];
406 	int status;
407 
408 	if (WARN_ON(!vsi))
409 		return -EINVAL;
410 
411 	if (ice_is_eswitch_mode_switchdev(vf->pf))
412 		return 0;
413 
414 	eth_broadcast_addr(broadcast);
415 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
416 	if (status) {
417 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
418 			vf->vf_id, status);
419 		return status;
420 	}
421 
422 	vf->num_mac++;
423 
424 	if (is_valid_ether_addr(vf->hw_lan_addr)) {
425 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
426 					  ICE_FWD_TO_VSI);
427 		if (status) {
428 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
429 				&vf->hw_lan_addr[0], vf->vf_id,
430 				status);
431 			return status;
432 		}
433 		vf->num_mac++;
434 
435 		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
436 	}
437 
438 	return 0;
439 }
440 
441 /**
442  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
443  * @vsi: Pointer to VSI
444  *
445  * This function moves VSI into corresponding scheduler aggregator node
446  * based on cached value of "aggregator node info" per VSI
447  */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)448 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
449 {
450 	struct ice_pf *pf = vsi->back;
451 	struct device *dev;
452 	int status;
453 
454 	if (!vsi->agg_node)
455 		return;
456 
457 	dev = ice_pf_to_dev(pf);
458 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
459 		dev_dbg(dev,
460 			"agg_id %u already has reached max_num_vsis %u\n",
461 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
462 		return;
463 	}
464 
465 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
466 				     vsi->idx, vsi->tc_cfg.ena_tc);
467 	if (status)
468 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
469 			vsi->idx, vsi->agg_node->agg_id);
470 	else
471 		vsi->agg_node->num_vsis++;
472 }
473 
474 /**
475  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
476  * @vf: VF to rebuild host configuration on
477  */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)478 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
479 {
480 	struct device *dev = ice_pf_to_dev(vf->pf);
481 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
482 
483 	if (WARN_ON(!vsi))
484 		return;
485 
486 	ice_vf_set_host_trust_cfg(vf);
487 
488 	if (ice_vf_rebuild_host_mac_cfg(vf))
489 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
490 			vf->vf_id);
491 
492 	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
493 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
494 			vf->vf_id);
495 
496 	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
497 		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
498 			vf->vf_id);
499 
500 	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
501 		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
502 			vf->vf_id);
503 
504 	/* rebuild aggregator node config for main VF VSI */
505 	ice_vf_rebuild_aggregator_node_cfg(vsi);
506 }
507 
508 /**
509  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
510  * @vf: pointer to the VF structure
511  */
ice_set_vf_state_qs_dis(struct ice_vf * vf)512 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
513 {
514 	/* Clear Rx/Tx enabled queues flag */
515 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
516 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
517 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
518 }
519 
520 /**
521  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
522  * @vf: VF to set in initialized state
523  *
524  * After this function the VF will be ready to receive/handle the
525  * VIRTCHNL_OP_GET_VF_RESOURCES message
526  */
ice_vf_set_initialized(struct ice_vf * vf)527 static void ice_vf_set_initialized(struct ice_vf *vf)
528 {
529 	ice_set_vf_state_qs_dis(vf);
530 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
531 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
532 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
533 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
534 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
535 }
536 
537 /**
538  * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
539  * @vf: the VF being reset
540  *
541  * Perform reset tasks which must occur after the VSI has been re-created or
542  * rebuilt during a VF reset.
543  */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)544 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
545 {
546 	ice_vf_rebuild_host_cfg(vf);
547 	ice_vf_set_initialized(vf);
548 
549 	vf->vf_ops->post_vsi_rebuild(vf);
550 }
551 
552 /**
553  * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
554  * are in unicast promiscuous mode
555  * @pf: PF structure for accessing VF(s)
556  *
557  * Return false if no VF(s) are in unicast promiscuous mode,
558  * else return true
559  */
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)560 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
561 {
562 	bool is_vf_promisc = false;
563 	struct ice_vf *vf;
564 	unsigned int bkt;
565 
566 	rcu_read_lock();
567 	ice_for_each_vf_rcu(pf, bkt, vf) {
568 		/* found a VF that has promiscuous mode configured */
569 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
570 			is_vf_promisc = true;
571 			break;
572 		}
573 	}
574 	rcu_read_unlock();
575 
576 	return is_vf_promisc;
577 }
578 
579 /**
580  * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
581  * @vf: the VF pointer
582  * @vsi: the VSI to configure
583  * @ucast_m: promiscuous mask to apply to unicast
584  * @mcast_m: promiscuous mask to apply to multicast
585  *
586  * Decide which mask should be used for unicast and multicast filter,
587  * based on presence of VLANs
588  */
589 void
ice_vf_get_promisc_masks(struct ice_vf * vf,struct ice_vsi * vsi,u8 * ucast_m,u8 * mcast_m)590 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
591 			 u8 *ucast_m, u8 *mcast_m)
592 {
593 	if (ice_vf_is_port_vlan_ena(vf) ||
594 	    ice_vsi_has_non_zero_vlans(vsi)) {
595 		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
596 		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
597 	} else {
598 		*mcast_m = ICE_MCAST_PROMISC_BITS;
599 		*ucast_m = ICE_UCAST_PROMISC_BITS;
600 	}
601 }
602 
603 /**
604  * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
605  * @vf: the VF pointer
606  * @vsi: the VSI to configure
607  *
608  * Clear all promiscuous/allmulticast filters for a VF
609  */
610 static int
ice_vf_clear_all_promisc_modes(struct ice_vf * vf,struct ice_vsi * vsi)611 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
612 {
613 	struct ice_pf *pf = vf->pf;
614 	u8 ucast_m, mcast_m;
615 	int ret = 0;
616 
617 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
618 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
619 		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
620 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
621 				ret = ice_clear_dflt_vsi(vsi);
622 		} else {
623 			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
624 		}
625 
626 		if (ret) {
627 			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
628 		} else {
629 			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
630 			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
631 		}
632 	}
633 
634 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
635 		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
636 		if (ret) {
637 			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
638 		} else {
639 			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
640 			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
641 		}
642 	}
643 	return ret;
644 }
645 
646 /**
647  * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
648  * @vf: the VF to configure
649  * @vsi: the VF's VSI
650  * @promisc_m: the promiscuous mode to enable
651  */
652 int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)653 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
654 {
655 	struct ice_hw *hw = &vsi->back->hw;
656 	int status;
657 
658 	if (ice_vf_is_port_vlan_ena(vf))
659 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
660 						  ice_vf_get_port_vlan_id(vf));
661 	else if (ice_vsi_has_non_zero_vlans(vsi))
662 		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
663 	else
664 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
665 
666 	if (status && status != -EEXIST) {
667 		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
668 			vf->vf_id, status);
669 		return status;
670 	}
671 
672 	return 0;
673 }
674 
675 /**
676  * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
677  * @vf: the VF to configure
678  * @vsi: the VF's VSI
679  * @promisc_m: the promiscuous mode to disable
680  */
681 int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)682 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
683 {
684 	struct ice_hw *hw = &vsi->back->hw;
685 	int status;
686 
687 	if (ice_vf_is_port_vlan_ena(vf))
688 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
689 						    ice_vf_get_port_vlan_id(vf));
690 	else if (ice_vsi_has_non_zero_vlans(vsi))
691 		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
692 	else
693 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
694 
695 	if (status && status != -ENOENT) {
696 		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
697 			vf->vf_id, status);
698 		return status;
699 	}
700 
701 	return 0;
702 }
703 
704 /**
705  * ice_reset_vf_mbx_cnt - reset VF mailbox message count
706  * @vf: pointer to the VF structure
707  *
708  * This function clears the VF mailbox message count, and should be called on
709  * VF reset.
710  */
ice_reset_vf_mbx_cnt(struct ice_vf * vf)711 static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
712 {
713 	struct ice_pf *pf = vf->pf;
714 
715 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
716 		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
717 	else
718 		ice_mbx_clear_malvf(&vf->mbx_info);
719 }
720 
721 /**
722  * ice_reset_all_vfs - reset all allocated VFs in one go
723  * @pf: pointer to the PF structure
724  *
725  * Reset all VFs at once, in response to a PF or other device reset.
726  *
727  * First, tell the hardware to reset each VF, then do all the waiting in one
728  * chunk, and finally finish restoring each VF after the wait. This is useful
729  * during PF routines which need to reset all VFs, as otherwise it must perform
730  * these resets in a serialized fashion.
731  */
ice_reset_all_vfs(struct ice_pf * pf)732 void ice_reset_all_vfs(struct ice_pf *pf)
733 {
734 	struct device *dev = ice_pf_to_dev(pf);
735 	struct ice_hw *hw = &pf->hw;
736 	struct ice_vf *vf;
737 	unsigned int bkt;
738 
739 	/* If we don't have any VFs, then there is nothing to reset */
740 	if (!ice_has_vfs(pf))
741 		return;
742 
743 	mutex_lock(&pf->vfs.table_lock);
744 
745 	/* clear all malicious info if the VFs are getting reset */
746 	ice_for_each_vf(pf, bkt, vf)
747 		ice_reset_vf_mbx_cnt(vf);
748 
749 	/* If VFs have been disabled, there is no need to reset */
750 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
751 		mutex_unlock(&pf->vfs.table_lock);
752 		return;
753 	}
754 
755 	/* Begin reset on all VFs at once */
756 	ice_for_each_vf(pf, bkt, vf)
757 		ice_trigger_vf_reset(vf, true, true);
758 
759 	/* HW requires some time to make sure it can flush the FIFO for a VF
760 	 * when it resets it. Now that we've triggered all of the VFs, iterate
761 	 * the table again and wait for each VF to complete.
762 	 */
763 	ice_for_each_vf(pf, bkt, vf) {
764 		if (!vf->vf_ops->poll_reset_status(vf)) {
765 			/* Display a warning if at least one VF didn't manage
766 			 * to reset in time, but continue on with the
767 			 * operation.
768 			 */
769 			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
770 			break;
771 		}
772 	}
773 
774 	/* free VF resources to begin resetting the VSI state */
775 	ice_for_each_vf(pf, bkt, vf) {
776 		mutex_lock(&vf->cfg_lock);
777 
778 		vf->driver_caps = 0;
779 		ice_vc_set_default_allowlist(vf);
780 
781 		ice_vf_fdir_exit(vf);
782 		ice_vf_fdir_init(vf);
783 		/* clean VF control VSI when resetting VFs since it should be
784 		 * setup only when VF creates its first FDIR rule.
785 		 */
786 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
787 			ice_vf_ctrl_invalidate_vsi(vf);
788 
789 		ice_vf_pre_vsi_rebuild(vf);
790 		ice_vf_rebuild_vsi(vf);
791 		ice_vf_post_vsi_rebuild(vf);
792 
793 		mutex_unlock(&vf->cfg_lock);
794 	}
795 
796 	if (ice_is_eswitch_mode_switchdev(pf))
797 		if (ice_eswitch_rebuild(pf))
798 			dev_warn(dev, "eswitch rebuild failed\n");
799 
800 	ice_flush(hw);
801 	clear_bit(ICE_VF_DIS, pf->state);
802 
803 	mutex_unlock(&pf->vfs.table_lock);
804 }
805 
806 /**
807  * ice_notify_vf_reset - Notify VF of a reset event
808  * @vf: pointer to the VF structure
809  */
ice_notify_vf_reset(struct ice_vf * vf)810 static void ice_notify_vf_reset(struct ice_vf *vf)
811 {
812 	struct ice_hw *hw = &vf->pf->hw;
813 	struct virtchnl_pf_event pfe;
814 
815 	/* Bail out if VF is in disabled state, neither initialized, nor active
816 	 * state - otherwise proceed with notifications
817 	 */
818 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
819 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
820 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
821 		return;
822 
823 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
824 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
825 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
826 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
827 			      NULL);
828 }
829 
830 /**
831  * ice_reset_vf - Reset a particular VF
832  * @vf: pointer to the VF structure
833  * @flags: flags controlling behavior of the reset
834  *
835  * Flags:
836  *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
837  *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
838  *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
839  *
840  * Returns 0 if the VF is currently in reset, if resets are disabled, or if
841  * the VF resets successfully. Returns an error code if the VF fails to
842  * rebuild.
843  */
ice_reset_vf(struct ice_vf * vf,u32 flags)844 int ice_reset_vf(struct ice_vf *vf, u32 flags)
845 {
846 	struct ice_pf *pf = vf->pf;
847 	struct ice_lag *lag;
848 	struct ice_vsi *vsi;
849 	u8 act_prt, pri_prt;
850 	struct device *dev;
851 	int err = 0;
852 	bool rsd;
853 
854 	dev = ice_pf_to_dev(pf);
855 	act_prt = ICE_LAG_INVALID_PORT;
856 	pri_prt = pf->hw.port_info->lport;
857 
858 	if (flags & ICE_VF_RESET_NOTIFY)
859 		ice_notify_vf_reset(vf);
860 
861 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
862 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
863 			vf->vf_id);
864 		return 0;
865 	}
866 
867 	if (flags & ICE_VF_RESET_LOCK)
868 		mutex_lock(&vf->cfg_lock);
869 	else
870 		lockdep_assert_held(&vf->cfg_lock);
871 
872 	lag = pf->lag;
873 	mutex_lock(&pf->lag_mutex);
874 	if (lag && lag->bonded && lag->primary) {
875 		act_prt = lag->active_port;
876 		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
877 		    lag->upper_netdev)
878 			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
879 		else
880 			act_prt = ICE_LAG_INVALID_PORT;
881 	}
882 
883 	if (ice_is_vf_disabled(vf)) {
884 		vsi = ice_get_vf_vsi(vf);
885 		if (!vsi) {
886 			dev_dbg(dev, "VF is already removed\n");
887 			err = -EINVAL;
888 			goto out_unlock;
889 		}
890 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
891 
892 		if (ice_vsi_is_rx_queue_active(vsi))
893 			ice_vsi_stop_all_rx_rings(vsi);
894 
895 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
896 			vf->vf_id);
897 		goto out_unlock;
898 	}
899 
900 	/* Set VF disable bit state here, before triggering reset */
901 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
902 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
903 
904 	vsi = ice_get_vf_vsi(vf);
905 	if (WARN_ON(!vsi)) {
906 		err = -EIO;
907 		goto out_unlock;
908 	}
909 
910 	ice_dis_vf_qs(vf);
911 
912 	/* Call Disable LAN Tx queue AQ whether or not queues are
913 	 * enabled. This is needed for successful completion of VFR.
914 	 */
915 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
916 			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
917 
918 	/* poll VPGEN_VFRSTAT reg to make sure
919 	 * that reset is complete
920 	 */
921 	rsd = vf->vf_ops->poll_reset_status(vf);
922 
923 	/* Display a warning if VF didn't manage to reset in time, but need to
924 	 * continue on with the operation.
925 	 */
926 	if (!rsd)
927 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
928 
929 	vf->driver_caps = 0;
930 	ice_vc_set_default_allowlist(vf);
931 
932 	/* disable promiscuous modes in case they were enabled
933 	 * ignore any error if disabling process failed
934 	 */
935 	ice_vf_clear_all_promisc_modes(vf, vsi);
936 
937 	ice_vf_fdir_exit(vf);
938 	ice_vf_fdir_init(vf);
939 	/* clean VF control VSI when resetting VF since it should be setup
940 	 * only when VF creates its first FDIR rule.
941 	 */
942 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
943 		ice_vf_ctrl_vsi_release(vf);
944 
945 	ice_vf_pre_vsi_rebuild(vf);
946 
947 	if (ice_vf_recreate_vsi(vf)) {
948 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
949 			vf->vf_id);
950 		err = -EFAULT;
951 		goto out_unlock;
952 	}
953 
954 	ice_vf_post_vsi_rebuild(vf);
955 	vsi = ice_get_vf_vsi(vf);
956 	if (WARN_ON(!vsi)) {
957 		err = -EINVAL;
958 		goto out_unlock;
959 	}
960 
961 	ice_eswitch_update_repr(vsi);
962 
963 	/* if the VF has been reset allow it to come up again */
964 	ice_reset_vf_mbx_cnt(vf);
965 
966 out_unlock:
967 	if (lag && lag->bonded && lag->primary &&
968 	    act_prt != ICE_LAG_INVALID_PORT)
969 		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
970 	mutex_unlock(&pf->lag_mutex);
971 
972 	if (flags & ICE_VF_RESET_LOCK)
973 		mutex_unlock(&vf->cfg_lock);
974 
975 	return err;
976 }
977 
978 /**
979  * ice_set_vf_state_dis - Set VF state to disabled
980  * @vf: pointer to the VF structure
981  */
ice_set_vf_state_dis(struct ice_vf * vf)982 void ice_set_vf_state_dis(struct ice_vf *vf)
983 {
984 	ice_set_vf_state_qs_dis(vf);
985 	vf->vf_ops->clear_reset_state(vf);
986 }
987 
988 /* Private functions only accessed from other virtualization files */
989 
990 /**
991  * ice_initialize_vf_entry - Initialize a VF entry
992  * @vf: pointer to the VF structure
993  */
ice_initialize_vf_entry(struct ice_vf * vf)994 void ice_initialize_vf_entry(struct ice_vf *vf)
995 {
996 	struct ice_pf *pf = vf->pf;
997 	struct ice_vfs *vfs;
998 
999 	vfs = &pf->vfs;
1000 
1001 	/* assign default capabilities */
1002 	vf->spoofchk = true;
1003 	vf->num_vf_qs = vfs->num_qps_per;
1004 	ice_vc_set_default_allowlist(vf);
1005 	ice_virtchnl_set_dflt_ops(vf);
1006 
1007 	/* ctrl_vsi_idx will be set to a valid value only when iAVF
1008 	 * creates its first fdir rule.
1009 	 */
1010 	ice_vf_ctrl_invalidate_vsi(vf);
1011 	ice_vf_fdir_init(vf);
1012 
1013 	/* Initialize mailbox info for this VF */
1014 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1015 		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
1016 	else
1017 		ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1018 
1019 	mutex_init(&vf->cfg_lock);
1020 }
1021 
ice_deinitialize_vf_entry(struct ice_vf * vf)1022 void ice_deinitialize_vf_entry(struct ice_vf *vf)
1023 {
1024 	struct ice_pf *pf = vf->pf;
1025 
1026 	if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1027 		list_del(&vf->mbx_info.list_entry);
1028 }
1029 
1030 /**
1031  * ice_dis_vf_qs - Disable the VF queues
1032  * @vf: pointer to the VF structure
1033  */
ice_dis_vf_qs(struct ice_vf * vf)1034 void ice_dis_vf_qs(struct ice_vf *vf)
1035 {
1036 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1037 
1038 	if (WARN_ON(!vsi))
1039 		return;
1040 
1041 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1042 	ice_vsi_stop_all_rx_rings(vsi);
1043 	ice_set_vf_state_qs_dis(vf);
1044 }
1045 
1046 /**
1047  * ice_err_to_virt_err - translate errors for VF return code
1048  * @err: error return code
1049  */
ice_err_to_virt_err(int err)1050 enum virtchnl_status_code ice_err_to_virt_err(int err)
1051 {
1052 	switch (err) {
1053 	case 0:
1054 		return VIRTCHNL_STATUS_SUCCESS;
1055 	case -EINVAL:
1056 	case -ENODEV:
1057 		return VIRTCHNL_STATUS_ERR_PARAM;
1058 	case -ENOMEM:
1059 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1060 	case -EALREADY:
1061 	case -EBUSY:
1062 	case -EIO:
1063 	case -ENOSPC:
1064 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1065 	default:
1066 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1067 	}
1068 }
1069 
1070 /**
1071  * ice_check_vf_init - helper to check if VF init complete
1072  * @vf: the pointer to the VF to check
1073  */
ice_check_vf_init(struct ice_vf * vf)1074 int ice_check_vf_init(struct ice_vf *vf)
1075 {
1076 	struct ice_pf *pf = vf->pf;
1077 
1078 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1079 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1080 			vf->vf_id);
1081 		return -EBUSY;
1082 	}
1083 	return 0;
1084 }
1085 
1086 /**
1087  * ice_vf_get_port_info - Get the VF's port info structure
1088  * @vf: VF used to get the port info structure for
1089  */
ice_vf_get_port_info(struct ice_vf * vf)1090 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1091 {
1092 	return vf->pf->hw.port_info;
1093 }
1094 
1095 /**
1096  * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1097  * @vsi: the VSI to configure
1098  * @enable: whether to enable or disable the spoof checking
1099  *
1100  * Configure a VSI to enable (or disable) spoof checking behavior.
1101  */
ice_cfg_mac_antispoof(struct ice_vsi * vsi,bool enable)1102 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1103 {
1104 	struct ice_vsi_ctx *ctx;
1105 	int err;
1106 
1107 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1108 	if (!ctx)
1109 		return -ENOMEM;
1110 
1111 	ctx->info.sec_flags = vsi->info.sec_flags;
1112 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1113 
1114 	if (enable)
1115 		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1116 	else
1117 		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1118 
1119 	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1120 	if (err)
1121 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1122 			enable ? "ON" : "OFF", vsi->vsi_num, err);
1123 	else
1124 		vsi->info.sec_flags = ctx->info.sec_flags;
1125 
1126 	kfree(ctx);
1127 
1128 	return err;
1129 }
1130 
1131 /**
1132  * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1133  * @vsi: VSI to enable Tx spoof checking for
1134  */
ice_vsi_ena_spoofchk(struct ice_vsi * vsi)1135 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1136 {
1137 	struct ice_vsi_vlan_ops *vlan_ops;
1138 	int err = 0;
1139 
1140 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1141 
1142 	/* Allow VF with VLAN 0 only to send all tagged traffic */
1143 	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1144 		err = vlan_ops->ena_tx_filtering(vsi);
1145 		if (err)
1146 			return err;
1147 	}
1148 
1149 	return ice_cfg_mac_antispoof(vsi, true);
1150 }
1151 
1152 /**
1153  * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1154  * @vsi: VSI to disable Tx spoof checking for
1155  */
ice_vsi_dis_spoofchk(struct ice_vsi * vsi)1156 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1157 {
1158 	struct ice_vsi_vlan_ops *vlan_ops;
1159 	int err;
1160 
1161 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1162 
1163 	err = vlan_ops->dis_tx_filtering(vsi);
1164 	if (err)
1165 		return err;
1166 
1167 	return ice_cfg_mac_antispoof(vsi, false);
1168 }
1169 
1170 /**
1171  * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1172  * @vsi: VSI associated to the VF
1173  * @enable: whether to enable or disable the spoof checking
1174  */
ice_vsi_apply_spoofchk(struct ice_vsi * vsi,bool enable)1175 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1176 {
1177 	int err;
1178 
1179 	if (enable)
1180 		err = ice_vsi_ena_spoofchk(vsi);
1181 	else
1182 		err = ice_vsi_dis_spoofchk(vsi);
1183 
1184 	return err;
1185 }
1186 
1187 /**
1188  * ice_is_vf_trusted
1189  * @vf: pointer to the VF info
1190  */
ice_is_vf_trusted(struct ice_vf * vf)1191 bool ice_is_vf_trusted(struct ice_vf *vf)
1192 {
1193 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1194 }
1195 
1196 /**
1197  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1198  * @vf: the VF to check
1199  *
1200  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1201  * otherwise
1202  */
ice_vf_has_no_qs_ena(struct ice_vf * vf)1203 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1204 {
1205 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1206 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1207 }
1208 
1209 /**
1210  * ice_is_vf_link_up - check if the VF's link is up
1211  * @vf: VF to check if link is up
1212  */
ice_is_vf_link_up(struct ice_vf * vf)1213 bool ice_is_vf_link_up(struct ice_vf *vf)
1214 {
1215 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1216 
1217 	if (ice_check_vf_init(vf))
1218 		return false;
1219 
1220 	if (ice_vf_has_no_qs_ena(vf))
1221 		return false;
1222 	else if (vf->link_forced)
1223 		return vf->link_up;
1224 	else
1225 		return pi->phy.link_info.link_info &
1226 			ICE_AQ_LINK_UP;
1227 }
1228 
1229 /**
1230  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1231  * @vf: VF that control VSI is being invalidated on
1232  */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)1233 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1234 {
1235 	vf->ctrl_vsi_idx = ICE_NO_VSI;
1236 }
1237 
1238 /**
1239  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1240  * @vf: VF that control VSI is being released on
1241  */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)1242 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1243 {
1244 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1245 	ice_vf_ctrl_invalidate_vsi(vf);
1246 }
1247 
1248 /**
1249  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1250  * @vf: VF to setup control VSI for
1251  *
1252  * Returns pointer to the successfully allocated VSI struct on success,
1253  * otherwise returns NULL on failure.
1254  */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)1255 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1256 {
1257 	struct ice_vsi_cfg_params params = {};
1258 	struct ice_pf *pf = vf->pf;
1259 	struct ice_vsi *vsi;
1260 
1261 	params.type = ICE_VSI_CTRL;
1262 	params.pi = ice_vf_get_port_info(vf);
1263 	params.vf = vf;
1264 	params.flags = ICE_VSI_FLAG_INIT;
1265 
1266 	vsi = ice_vsi_setup(pf, &params);
1267 	if (!vsi) {
1268 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1269 		ice_vf_ctrl_invalidate_vsi(vf);
1270 	}
1271 
1272 	return vsi;
1273 }
1274 
1275 /**
1276  * ice_vf_init_host_cfg - Initialize host admin configuration
1277  * @vf: VF to initialize
1278  * @vsi: the VSI created at initialization
1279  *
1280  * Initialize the VF host configuration. Called during VF creation to setup
1281  * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1282  * should only be called during VF creation.
1283  */
ice_vf_init_host_cfg(struct ice_vf * vf,struct ice_vsi * vsi)1284 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1285 {
1286 	struct ice_vsi_vlan_ops *vlan_ops;
1287 	struct ice_pf *pf = vf->pf;
1288 	u8 broadcast[ETH_ALEN];
1289 	struct device *dev;
1290 	int err;
1291 
1292 	dev = ice_pf_to_dev(pf);
1293 
1294 	err = ice_vsi_add_vlan_zero(vsi);
1295 	if (err) {
1296 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1297 			 vf->vf_id);
1298 		return err;
1299 	}
1300 
1301 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1302 	err = vlan_ops->ena_rx_filtering(vsi);
1303 	if (err) {
1304 		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1305 			 vf->vf_id);
1306 		return err;
1307 	}
1308 
1309 	eth_broadcast_addr(broadcast);
1310 	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1311 	if (err) {
1312 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1313 			vf->vf_id, err);
1314 		return err;
1315 	}
1316 
1317 	vf->num_mac = 1;
1318 
1319 	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1320 	if (err) {
1321 		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1322 			 vf->vf_id);
1323 		return err;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 /**
1330  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1331  * @vf: VF to remove access to VSI for
1332  */
ice_vf_invalidate_vsi(struct ice_vf * vf)1333 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1334 {
1335 	vf->lan_vsi_idx = ICE_NO_VSI;
1336 	vf->lan_vsi_num = ICE_NO_VSI;
1337 }
1338 
1339 /**
1340  * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1341  * @vf: pointer to the VF structure
1342  *
1343  * Release the VF associated with this VSI and then invalidate the VSI
1344  * indexes.
1345  */
ice_vf_vsi_release(struct ice_vf * vf)1346 void ice_vf_vsi_release(struct ice_vf *vf)
1347 {
1348 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1349 
1350 	if (WARN_ON(!vsi))
1351 		return;
1352 
1353 	ice_vsi_release(vsi);
1354 	ice_vf_invalidate_vsi(vf);
1355 }
1356 
1357 /**
1358  * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1359  * @pf: the PF private structure
1360  * @vsi: pointer to the VSI
1361  *
1362  * Return first found VF control VSI other than the vsi
1363  * passed by parameter. This function is used to determine
1364  * whether new resources have to be allocated for control VSI
1365  * or they can be shared with existing one.
1366  *
1367  * Return found VF control VSI pointer other itself. Return
1368  * NULL Otherwise.
1369  *
1370  */
ice_get_vf_ctrl_vsi(struct ice_pf * pf,struct ice_vsi * vsi)1371 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1372 {
1373 	struct ice_vsi *ctrl_vsi = NULL;
1374 	struct ice_vf *vf;
1375 	unsigned int bkt;
1376 
1377 	rcu_read_lock();
1378 	ice_for_each_vf_rcu(pf, bkt, vf) {
1379 		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1380 			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1381 			break;
1382 		}
1383 	}
1384 
1385 	rcu_read_unlock();
1386 	return ctrl_vsi;
1387 }
1388