1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "ice_fltr.h"
8 #include "ice_repr.h"
9 #include "ice_devlink.h"
10 #include "ice_tc_lib.h"
11 
12 /**
13  * ice_eswitch_setup_env - configure switchdev HW filters
14  * @pf: pointer to PF struct
15  *
16  * This function adds HW filters configuration specific for switchdev
17  * mode.
18  */
19 static int ice_eswitch_setup_env(struct ice_pf *pf)
20 {
21 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
22 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
23 	struct ice_port_info *pi = pf->hw.port_info;
24 	bool rule_added = false;
25 
26 	ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
27 
28 	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
29 
30 	if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
31 		goto err_def_rx;
32 
33 	if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
34 		if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
35 			goto err_def_rx;
36 		rule_added = true;
37 	}
38 
39 	if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
40 		goto err_def_tx;
41 
42 	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
43 		goto err_override_uplink;
44 
45 	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
46 		goto err_override_control;
47 
48 	if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
49 					    ICE_FLTR_TX,
50 					    ICE_SINGLE_ACT_LB_ENABLE))
51 		goto err_update_action;
52 
53 	return 0;
54 
55 err_update_action:
56 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
57 err_override_control:
58 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
59 err_override_uplink:
60 	ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
61 err_def_tx:
62 	if (rule_added)
63 		ice_clear_dflt_vsi(uplink_vsi->vsw);
64 err_def_rx:
65 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
66 				       uplink_vsi->port_info->mac.perm_addr,
67 				       ICE_FWD_TO_VSI);
68 	return -ENODEV;
69 }
70 
71 /**
72  * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
73  * @pf: pointer to PF struct
74  *
75  * In switchdev number of allocated Tx/Rx rings is equal.
76  *
77  * This function fills q_vectors structures associated with representor and
78  * move each ring pairs to port representor netdevs. Each port representor
79  * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
80  * number of VFs.
81  */
82 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
83 {
84 	struct ice_vsi *vsi = pf->switchdev.control_vsi;
85 	int q_id;
86 
87 	ice_for_each_txq(vsi, q_id) {
88 		struct ice_repr *repr = pf->vf[q_id].repr;
89 		struct ice_q_vector *q_vector = repr->q_vector;
90 		struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
91 		struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
92 
93 		q_vector->vsi = vsi;
94 		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
95 
96 		q_vector->num_ring_tx = 1;
97 		q_vector->tx.tx_ring = tx_ring;
98 		tx_ring->q_vector = q_vector;
99 		tx_ring->next = NULL;
100 		tx_ring->netdev = repr->netdev;
101 		/* In switchdev mode, from OS stack perspective, there is only
102 		 * one queue for given netdev, so it needs to be indexed as 0.
103 		 */
104 		tx_ring->q_index = 0;
105 
106 		q_vector->num_ring_rx = 1;
107 		q_vector->rx.rx_ring = rx_ring;
108 		rx_ring->q_vector = q_vector;
109 		rx_ring->next = NULL;
110 		rx_ring->netdev = repr->netdev;
111 	}
112 }
113 
114 /**
115  * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
116  * @pf: pointer to PF struct
117  */
118 static int ice_eswitch_setup_reprs(struct ice_pf *pf)
119 {
120 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
121 	int max_vsi_num = 0;
122 	int i;
123 
124 	ice_for_each_vf(pf, i) {
125 		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
126 		struct ice_vf *vf = &pf->vf[i];
127 
128 		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
129 		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
130 						   GFP_KERNEL);
131 		if (!vf->repr->dst) {
132 			ice_fltr_add_mac_and_broadcast(vsi,
133 						       vf->hw_lan_addr.addr,
134 						       ICE_FWD_TO_VSI);
135 			goto err;
136 		}
137 
138 		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
139 			ice_fltr_add_mac_and_broadcast(vsi,
140 						       vf->hw_lan_addr.addr,
141 						       ICE_FWD_TO_VSI);
142 			metadata_dst_free(vf->repr->dst);
143 			goto err;
144 		}
145 
146 		if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
147 			ice_fltr_add_mac_and_broadcast(vsi,
148 						       vf->hw_lan_addr.addr,
149 						       ICE_FWD_TO_VSI);
150 			metadata_dst_free(vf->repr->dst);
151 			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
152 			goto err;
153 		}
154 
155 		if (max_vsi_num < vsi->vsi_num)
156 			max_vsi_num = vsi->vsi_num;
157 
158 		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
159 			       NAPI_POLL_WEIGHT);
160 
161 		netif_keep_dst(vf->repr->netdev);
162 	}
163 
164 	kfree(ctrl_vsi->target_netdevs);
165 
166 	ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
167 					   sizeof(*ctrl_vsi->target_netdevs),
168 					   GFP_KERNEL);
169 	if (!ctrl_vsi->target_netdevs)
170 		goto err;
171 
172 	ice_for_each_vf(pf, i) {
173 		struct ice_repr *repr = pf->vf[i].repr;
174 		struct ice_vsi *vsi = repr->src_vsi;
175 		struct metadata_dst *dst;
176 
177 		ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
178 
179 		dst = repr->dst;
180 		dst->u.port_info.port_id = vsi->vsi_num;
181 		dst->u.port_info.lower_dev = repr->netdev;
182 		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
183 	}
184 
185 	return 0;
186 
187 err:
188 	for (i = i - 1; i >= 0; i--) {
189 		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
190 		struct ice_vf *vf = &pf->vf[i];
191 
192 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
193 		metadata_dst_free(vf->repr->dst);
194 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
195 					       ICE_FWD_TO_VSI);
196 	}
197 
198 	return -ENODEV;
199 }
200 
201 /**
202  * ice_eswitch_release_reprs - clear PR VSIs configuration
203  * @pf: poiner to PF struct
204  * @ctrl_vsi: pointer to switchdev control VSI
205  */
206 static void
207 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
208 {
209 	int i;
210 
211 	kfree(ctrl_vsi->target_netdevs);
212 	ice_for_each_vf(pf, i) {
213 		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
214 		struct ice_vf *vf = &pf->vf[i];
215 
216 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
217 		metadata_dst_free(vf->repr->dst);
218 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
219 					       ICE_FWD_TO_VSI);
220 
221 		netif_napi_del(&vf->repr->q_vector->napi);
222 	}
223 }
224 
225 /**
226  * ice_eswitch_update_repr - reconfigure VF port representor
227  * @vsi: VF VSI for which port representor is configured
228  */
229 void ice_eswitch_update_repr(struct ice_vsi *vsi)
230 {
231 	struct ice_pf *pf = vsi->back;
232 	struct ice_repr *repr;
233 	struct ice_vf *vf;
234 	int ret;
235 
236 	if (!ice_is_switchdev_running(pf))
237 		return;
238 
239 	vf = &pf->vf[vsi->vf_id];
240 	repr = vf->repr;
241 	repr->src_vsi = vsi;
242 	repr->dst->u.port_info.port_id = vsi->vsi_num;
243 
244 	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
245 	if (ret) {
246 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
247 		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
248 	}
249 }
250 
251 /**
252  * ice_eswitch_port_start_xmit - callback for packets transmit
253  * @skb: send buffer
254  * @netdev: network interface device structure
255  *
256  * Returns NETDEV_TX_OK if sent, else an error code
257  */
258 netdev_tx_t
259 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
260 {
261 	struct ice_netdev_priv *np;
262 	struct ice_repr *repr;
263 	struct ice_vsi *vsi;
264 
265 	np = netdev_priv(netdev);
266 	vsi = np->vsi;
267 
268 	if (ice_is_reset_in_progress(vsi->back->state))
269 		return NETDEV_TX_BUSY;
270 
271 	repr = ice_netdev_to_repr(netdev);
272 	skb_dst_drop(skb);
273 	dst_hold((struct dst_entry *)repr->dst);
274 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
275 	skb->queue_mapping = repr->vf->vf_id;
276 
277 	return ice_start_xmit(skb, netdev);
278 }
279 
280 /**
281  * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
282  * @skb: pointer to send buffer
283  * @off: pointer to offload struct
284  */
285 void
286 ice_eswitch_set_target_vsi(struct sk_buff *skb,
287 			   struct ice_tx_offload_params *off)
288 {
289 	struct metadata_dst *dst = skb_metadata_dst(skb);
290 	u64 cd_cmd, dst_vsi;
291 
292 	if (!dst) {
293 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
294 		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
295 	} else {
296 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
297 		dst_vsi = ((u64)dst->u.port_info.port_id <<
298 			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
299 		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
300 	}
301 }
302 
303 /**
304  * ice_eswitch_release_env - clear switchdev HW filters
305  * @pf: pointer to PF struct
306  *
307  * This function removes HW filters configuration specific for switchdev
308  * mode and restores default legacy mode settings.
309  */
310 static void ice_eswitch_release_env(struct ice_pf *pf)
311 {
312 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
313 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
314 
315 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
316 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
317 	ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
318 	ice_clear_dflt_vsi(uplink_vsi->vsw);
319 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
320 				       uplink_vsi->port_info->mac.perm_addr,
321 				       ICE_FWD_TO_VSI);
322 }
323 
324 /**
325  * ice_eswitch_vsi_setup - configure switchdev control VSI
326  * @pf: pointer to PF structure
327  * @pi: pointer to port_info structure
328  */
329 static struct ice_vsi *
330 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
331 {
332 	return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID);
333 }
334 
335 /**
336  * ice_eswitch_napi_del - remove NAPI handle for all port representors
337  * @pf: pointer to PF structure
338  */
339 static void ice_eswitch_napi_del(struct ice_pf *pf)
340 {
341 	int i;
342 
343 	ice_for_each_vf(pf, i)
344 		netif_napi_del(&pf->vf[i].repr->q_vector->napi);
345 }
346 
347 /**
348  * ice_eswitch_napi_enable - enable NAPI for all port representors
349  * @pf: pointer to PF structure
350  */
351 static void ice_eswitch_napi_enable(struct ice_pf *pf)
352 {
353 	int i;
354 
355 	ice_for_each_vf(pf, i)
356 		napi_enable(&pf->vf[i].repr->q_vector->napi);
357 }
358 
359 /**
360  * ice_eswitch_napi_disable - disable NAPI for all port representors
361  * @pf: pointer to PF structure
362  */
363 static void ice_eswitch_napi_disable(struct ice_pf *pf)
364 {
365 	int i;
366 
367 	ice_for_each_vf(pf, i)
368 		napi_disable(&pf->vf[i].repr->q_vector->napi);
369 }
370 
371 /**
372  * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
373  * @vsi: VSI to setup rxdid on
374  * @rxdid: flex descriptor id
375  */
376 static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
377 {
378 	struct ice_hw *hw = &vsi->back->hw;
379 	int i;
380 
381 	ice_for_each_rxq(vsi, i) {
382 		struct ice_rx_ring *ring = vsi->rx_rings[i];
383 		u16 pf_q = vsi->rxq_map[ring->q_index];
384 
385 		ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
386 	}
387 }
388 
389 /**
390  * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
391  * @pf: pointer to PF structure
392  */
393 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
394 {
395 	struct ice_vsi *ctrl_vsi;
396 
397 	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
398 	if (!pf->switchdev.control_vsi)
399 		return -ENODEV;
400 
401 	ctrl_vsi = pf->switchdev.control_vsi;
402 	pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
403 	if (!pf->switchdev.uplink_vsi)
404 		goto err_vsi;
405 
406 	if (ice_eswitch_setup_env(pf))
407 		goto err_vsi;
408 
409 	if (ice_repr_add_for_all_vfs(pf))
410 		goto err_repr_add;
411 
412 	if (ice_eswitch_setup_reprs(pf))
413 		goto err_setup_reprs;
414 
415 	ice_eswitch_remap_rings_to_vectors(pf);
416 
417 	if (ice_vsi_open(ctrl_vsi))
418 		goto err_setup_reprs;
419 
420 	ice_eswitch_napi_enable(pf);
421 
422 	ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
423 
424 	return 0;
425 
426 err_setup_reprs:
427 	ice_repr_rem_from_all_vfs(pf);
428 err_repr_add:
429 	ice_eswitch_release_env(pf);
430 err_vsi:
431 	ice_vsi_release(ctrl_vsi);
432 	return -ENODEV;
433 }
434 
435 /**
436  * ice_eswitch_disable_switchdev - disable switchdev resources
437  * @pf: pointer to PF structure
438  */
439 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
440 {
441 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
442 
443 	ice_eswitch_napi_disable(pf);
444 	ice_eswitch_release_env(pf);
445 	ice_eswitch_release_reprs(pf, ctrl_vsi);
446 	ice_vsi_release(ctrl_vsi);
447 	ice_repr_rem_from_all_vfs(pf);
448 }
449 
450 /**
451  * ice_eswitch_mode_set - set new eswitch mode
452  * @devlink: pointer to devlink structure
453  * @mode: eswitch mode to switch to
454  * @extack: pointer to extack structure
455  */
456 int
457 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
458 		     struct netlink_ext_ack *extack)
459 {
460 	struct ice_pf *pf = devlink_priv(devlink);
461 
462 	if (pf->eswitch_mode == mode)
463 		return 0;
464 
465 	if (pf->num_alloc_vfs) {
466 		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
467 		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
468 		return -EOPNOTSUPP;
469 	}
470 
471 	switch (mode) {
472 	case DEVLINK_ESWITCH_MODE_LEGACY:
473 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
474 			 pf->hw.pf_id);
475 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
476 		break;
477 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
478 	{
479 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
480 			 pf->hw.pf_id);
481 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
482 		break;
483 	}
484 	default:
485 		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
486 		return -EINVAL;
487 	}
488 
489 	pf->eswitch_mode = mode;
490 	return 0;
491 }
492 
493 /**
494  * ice_eswitch_get_target_netdev - return port representor netdev
495  * @rx_ring: pointer to Rx ring
496  * @rx_desc: pointer to Rx descriptor
497  *
498  * When working in switchdev mode context (when control VSI is used), this
499  * function returns netdev of appropriate port representor. For non-switchdev
500  * context, regular netdev associated with Rx ring is returned.
501  */
502 struct net_device *
503 ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
504 			      union ice_32b_rx_flex_desc *rx_desc)
505 {
506 	struct ice_32b_rx_flex_desc_nic_2 *desc;
507 	struct ice_vsi *vsi = rx_ring->vsi;
508 	struct ice_vsi *control_vsi;
509 	u16 target_vsi_id;
510 
511 	control_vsi = vsi->back->switchdev.control_vsi;
512 	if (vsi != control_vsi)
513 		return rx_ring->netdev;
514 
515 	desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
516 	target_vsi_id = le16_to_cpu(desc->src_vsi);
517 
518 	return vsi->target_netdevs[target_vsi_id];
519 }
520 
521 /**
522  * ice_eswitch_mode_get - get current eswitch mode
523  * @devlink: pointer to devlink structure
524  * @mode: output parameter for current eswitch mode
525  */
526 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
527 {
528 	struct ice_pf *pf = devlink_priv(devlink);
529 
530 	*mode = pf->eswitch_mode;
531 	return 0;
532 }
533 
534 /**
535  * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
536  * @pf: pointer to PF structure
537  *
538  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
539  * false otherwise.
540  */
541 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
542 {
543 	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
544 }
545 
546 /**
547  * ice_eswitch_release - cleanup eswitch
548  * @pf: pointer to PF structure
549  */
550 void ice_eswitch_release(struct ice_pf *pf)
551 {
552 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
553 		return;
554 
555 	ice_eswitch_disable_switchdev(pf);
556 	pf->switchdev.is_running = false;
557 }
558 
559 /**
560  * ice_eswitch_configure - configure eswitch
561  * @pf: pointer to PF structure
562  */
563 int ice_eswitch_configure(struct ice_pf *pf)
564 {
565 	int status;
566 
567 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
568 		return 0;
569 
570 	status = ice_eswitch_enable_switchdev(pf);
571 	if (status)
572 		return status;
573 
574 	pf->switchdev.is_running = true;
575 	return 0;
576 }
577 
578 /**
579  * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
580  * @pf: pointer to PF structure
581  */
582 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
583 {
584 	struct ice_repr *repr;
585 	int i;
586 
587 	if (test_bit(ICE_DOWN, pf->state))
588 		return;
589 
590 	ice_for_each_vf(pf, i) {
591 		repr = pf->vf[i].repr;
592 		if (repr)
593 			ice_repr_start_tx_queues(repr);
594 	}
595 }
596 
597 /**
598  * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
599  * @pf: pointer to PF structure
600  */
601 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
602 {
603 	struct ice_repr *repr;
604 	int i;
605 
606 	if (test_bit(ICE_DOWN, pf->state))
607 		return;
608 
609 	ice_for_each_vf(pf, i) {
610 		repr = pf->vf[i].repr;
611 		if (repr)
612 			ice_repr_stop_tx_queues(repr);
613 	}
614 }
615 
616 /**
617  * ice_eswitch_rebuild - rebuild eswitch
618  * @pf: pointer to PF structure
619  */
620 int ice_eswitch_rebuild(struct ice_pf *pf)
621 {
622 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
623 	int status;
624 
625 	ice_eswitch_napi_disable(pf);
626 	ice_eswitch_napi_del(pf);
627 
628 	status = ice_eswitch_setup_env(pf);
629 	if (status)
630 		return status;
631 
632 	status = ice_eswitch_setup_reprs(pf);
633 	if (status)
634 		return status;
635 
636 	ice_eswitch_remap_rings_to_vectors(pf);
637 
638 	ice_replay_tc_fltrs(pf);
639 
640 	status = ice_vsi_open(ctrl_vsi);
641 	if (status)
642 		return status;
643 
644 	ice_eswitch_napi_enable(pf);
645 	ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
646 	ice_eswitch_start_all_tx_queues(pf);
647 
648 	return 0;
649 }
650