1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "ice_fltr.h"
8 #include "ice_repr.h"
9 #include "ice_devlink.h"
10 #include "ice_tc_lib.h"
11 
12 /**
13  * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
14  * @pf: pointer to PF struct
15  * @vf: pointer to VF struct
16  * @mac: VF's MAC address
17  *
18  * This function adds advanced rule that forwards packets with
19  * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
20  */
21 int
22 ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
23 {
24 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
25 	struct ice_adv_rule_info rule_info = { 0 };
26 	struct ice_adv_lkup_elem *list;
27 	struct ice_hw *hw = &pf->hw;
28 	const u16 lkups_cnt = 1;
29 	int err;
30 
31 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
32 	if (!list)
33 		return -ENOMEM;
34 
35 	list[0].type = ICE_MAC_OFOS;
36 	ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
37 	eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
38 
39 	rule_info.sw_act.flag |= ICE_FLTR_TX;
40 	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
41 	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
42 	rule_info.rx = false;
43 	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
44 				       ctrl_vsi->rxq_map[vf->vf_id];
45 	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
46 	rule_info.flags_info.act_valid = true;
47 
48 	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
49 			       vf->repr->mac_rule);
50 	if (err)
51 		dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
52 			vf->vf_id);
53 	else
54 		vf->repr->rule_added = true;
55 
56 	kfree(list);
57 	return err;
58 }
59 
60 /**
61  * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
62  * @vf: pointer to vF struct
63  *
64  * This function replays VF's MAC rule after reset.
65  */
66 void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
67 {
68 	int err;
69 
70 	if (!ice_is_switchdev_running(vf->pf))
71 		return;
72 
73 	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
74 		err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
75 						  vf->hw_lan_addr.addr);
76 		if (err) {
77 			dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
78 				vf->hw_lan_addr.addr, vf->vf_id, err);
79 			return;
80 		}
81 		vf->num_mac++;
82 
83 		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
84 	}
85 }
86 
87 /**
88  * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
89  * @vf: pointer to the VF struct
90  *
91  * Delete the advanced rule that was used to forward packets with the VF's MAC
92  * address (src MAC) to the corresponding switchdev ctrl VSI queue.
93  */
94 void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
95 {
96 	if (!ice_is_switchdev_running(vf->pf))
97 		return;
98 
99 	if (!vf->repr->rule_added)
100 		return;
101 
102 	ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
103 	vf->repr->rule_added = false;
104 }
105 
106 /**
107  * ice_eswitch_setup_env - configure switchdev HW filters
108  * @pf: pointer to PF struct
109  *
110  * This function adds HW filters configuration specific for switchdev
111  * mode.
112  */
113 static int ice_eswitch_setup_env(struct ice_pf *pf)
114 {
115 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
116 	struct net_device *uplink_netdev = uplink_vsi->netdev;
117 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
118 	bool rule_added = false;
119 
120 	ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
121 
122 	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
123 
124 	netif_addr_lock_bh(uplink_netdev);
125 	__dev_uc_unsync(uplink_netdev, NULL);
126 	__dev_mc_unsync(uplink_netdev, NULL);
127 	netif_addr_unlock_bh(uplink_netdev);
128 
129 	if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
130 		goto err_def_rx;
131 
132 	if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
133 		if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
134 			goto err_def_rx;
135 		rule_added = true;
136 	}
137 
138 	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
139 		goto err_override_uplink;
140 
141 	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
142 		goto err_override_control;
143 
144 	return 0;
145 
146 err_override_control:
147 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
148 err_override_uplink:
149 	if (rule_added)
150 		ice_clear_dflt_vsi(uplink_vsi->vsw);
151 err_def_rx:
152 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
153 				       uplink_vsi->port_info->mac.perm_addr,
154 				       ICE_FWD_TO_VSI);
155 	return -ENODEV;
156 }
157 
158 /**
159  * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
160  * @pf: pointer to PF struct
161  *
162  * In switchdev number of allocated Tx/Rx rings is equal.
163  *
164  * This function fills q_vectors structures associated with representor and
165  * move each ring pairs to port representor netdevs. Each port representor
166  * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
167  * number of VFs.
168  */
169 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
170 {
171 	struct ice_vsi *vsi = pf->switchdev.control_vsi;
172 	int q_id;
173 
174 	ice_for_each_txq(vsi, q_id) {
175 		struct ice_repr *repr = pf->vf[q_id].repr;
176 		struct ice_q_vector *q_vector = repr->q_vector;
177 		struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
178 		struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
179 
180 		q_vector->vsi = vsi;
181 		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
182 
183 		q_vector->num_ring_tx = 1;
184 		q_vector->tx.tx_ring = tx_ring;
185 		tx_ring->q_vector = q_vector;
186 		tx_ring->next = NULL;
187 		tx_ring->netdev = repr->netdev;
188 		/* In switchdev mode, from OS stack perspective, there is only
189 		 * one queue for given netdev, so it needs to be indexed as 0.
190 		 */
191 		tx_ring->q_index = 0;
192 
193 		q_vector->num_ring_rx = 1;
194 		q_vector->rx.rx_ring = rx_ring;
195 		rx_ring->q_vector = q_vector;
196 		rx_ring->next = NULL;
197 		rx_ring->netdev = repr->netdev;
198 	}
199 }
200 
201 /**
202  * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
203  * @pf: pointer to PF struct
204  */
205 static int ice_eswitch_setup_reprs(struct ice_pf *pf)
206 {
207 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
208 	int max_vsi_num = 0;
209 	int i;
210 
211 	ice_for_each_vf(pf, i) {
212 		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
213 		struct ice_vf *vf = &pf->vf[i];
214 
215 		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
216 		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
217 						   GFP_KERNEL);
218 		if (!vf->repr->dst) {
219 			ice_fltr_add_mac_and_broadcast(vsi,
220 						       vf->hw_lan_addr.addr,
221 						       ICE_FWD_TO_VSI);
222 			goto err;
223 		}
224 
225 		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
226 			ice_fltr_add_mac_and_broadcast(vsi,
227 						       vf->hw_lan_addr.addr,
228 						       ICE_FWD_TO_VSI);
229 			metadata_dst_free(vf->repr->dst);
230 			goto err;
231 		}
232 
233 		if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
234 			ice_fltr_add_mac_and_broadcast(vsi,
235 						       vf->hw_lan_addr.addr,
236 						       ICE_FWD_TO_VSI);
237 			metadata_dst_free(vf->repr->dst);
238 			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
239 			goto err;
240 		}
241 
242 		if (max_vsi_num < vsi->vsi_num)
243 			max_vsi_num = vsi->vsi_num;
244 
245 		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
246 			       NAPI_POLL_WEIGHT);
247 
248 		netif_keep_dst(vf->repr->netdev);
249 	}
250 
251 	ice_for_each_vf(pf, i) {
252 		struct ice_repr *repr = pf->vf[i].repr;
253 		struct ice_vsi *vsi = repr->src_vsi;
254 		struct metadata_dst *dst;
255 
256 		dst = repr->dst;
257 		dst->u.port_info.port_id = vsi->vsi_num;
258 		dst->u.port_info.lower_dev = repr->netdev;
259 		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
260 	}
261 
262 	return 0;
263 
264 err:
265 	for (i = i - 1; i >= 0; i--) {
266 		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
267 		struct ice_vf *vf = &pf->vf[i];
268 
269 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
270 		metadata_dst_free(vf->repr->dst);
271 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
272 					       ICE_FWD_TO_VSI);
273 	}
274 
275 	return -ENODEV;
276 }
277 
278 /**
279  * ice_eswitch_release_reprs - clear PR VSIs configuration
280  * @pf: poiner to PF struct
281  * @ctrl_vsi: pointer to switchdev control VSI
282  */
283 static void
284 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
285 {
286 	int i;
287 
288 	ice_for_each_vf(pf, i) {
289 		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
290 		struct ice_vf *vf = &pf->vf[i];
291 
292 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
293 		metadata_dst_free(vf->repr->dst);
294 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
295 					       ICE_FWD_TO_VSI);
296 
297 		netif_napi_del(&vf->repr->q_vector->napi);
298 	}
299 }
300 
301 /**
302  * ice_eswitch_update_repr - reconfigure VF port representor
303  * @vsi: VF VSI for which port representor is configured
304  */
305 void ice_eswitch_update_repr(struct ice_vsi *vsi)
306 {
307 	struct ice_pf *pf = vsi->back;
308 	struct ice_repr *repr;
309 	struct ice_vf *vf;
310 	int ret;
311 
312 	if (!ice_is_switchdev_running(pf))
313 		return;
314 
315 	vf = &pf->vf[vsi->vf_id];
316 	repr = vf->repr;
317 	repr->src_vsi = vsi;
318 	repr->dst->u.port_info.port_id = vsi->vsi_num;
319 
320 	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
321 	if (ret) {
322 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
323 		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
324 	}
325 }
326 
327 /**
328  * ice_eswitch_port_start_xmit - callback for packets transmit
329  * @skb: send buffer
330  * @netdev: network interface device structure
331  *
332  * Returns NETDEV_TX_OK if sent, else an error code
333  */
334 netdev_tx_t
335 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
336 {
337 	struct ice_netdev_priv *np;
338 	struct ice_repr *repr;
339 	struct ice_vsi *vsi;
340 
341 	np = netdev_priv(netdev);
342 	vsi = np->vsi;
343 
344 	if (ice_is_reset_in_progress(vsi->back->state))
345 		return NETDEV_TX_BUSY;
346 
347 	repr = ice_netdev_to_repr(netdev);
348 	skb_dst_drop(skb);
349 	dst_hold((struct dst_entry *)repr->dst);
350 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
351 	skb->queue_mapping = repr->vf->vf_id;
352 
353 	return ice_start_xmit(skb, netdev);
354 }
355 
356 /**
357  * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
358  * @skb: pointer to send buffer
359  * @off: pointer to offload struct
360  */
361 void
362 ice_eswitch_set_target_vsi(struct sk_buff *skb,
363 			   struct ice_tx_offload_params *off)
364 {
365 	struct metadata_dst *dst = skb_metadata_dst(skb);
366 	u64 cd_cmd, dst_vsi;
367 
368 	if (!dst) {
369 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
370 		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
371 	} else {
372 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
373 		dst_vsi = ((u64)dst->u.port_info.port_id <<
374 			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
375 		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
376 	}
377 }
378 
379 /**
380  * ice_eswitch_release_env - clear switchdev HW filters
381  * @pf: pointer to PF struct
382  *
383  * This function removes HW filters configuration specific for switchdev
384  * mode and restores default legacy mode settings.
385  */
386 static void ice_eswitch_release_env(struct ice_pf *pf)
387 {
388 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
389 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
390 
391 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
392 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
393 	ice_clear_dflt_vsi(uplink_vsi->vsw);
394 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
395 				       uplink_vsi->port_info->mac.perm_addr,
396 				       ICE_FWD_TO_VSI);
397 }
398 
399 /**
400  * ice_eswitch_vsi_setup - configure switchdev control VSI
401  * @pf: pointer to PF structure
402  * @pi: pointer to port_info structure
403  */
404 static struct ice_vsi *
405 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
406 {
407 	return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
408 }
409 
410 /**
411  * ice_eswitch_napi_del - remove NAPI handle for all port representors
412  * @pf: pointer to PF structure
413  */
414 static void ice_eswitch_napi_del(struct ice_pf *pf)
415 {
416 	int i;
417 
418 	ice_for_each_vf(pf, i)
419 		netif_napi_del(&pf->vf[i].repr->q_vector->napi);
420 }
421 
422 /**
423  * ice_eswitch_napi_enable - enable NAPI for all port representors
424  * @pf: pointer to PF structure
425  */
426 static void ice_eswitch_napi_enable(struct ice_pf *pf)
427 {
428 	int i;
429 
430 	ice_for_each_vf(pf, i)
431 		napi_enable(&pf->vf[i].repr->q_vector->napi);
432 }
433 
434 /**
435  * ice_eswitch_napi_disable - disable NAPI for all port representors
436  * @pf: pointer to PF structure
437  */
438 static void ice_eswitch_napi_disable(struct ice_pf *pf)
439 {
440 	int i;
441 
442 	ice_for_each_vf(pf, i)
443 		napi_disable(&pf->vf[i].repr->q_vector->napi);
444 }
445 
446 /**
447  * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
448  * @pf: pointer to PF structure
449  */
450 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
451 {
452 	struct ice_vsi *ctrl_vsi;
453 
454 	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
455 	if (!pf->switchdev.control_vsi)
456 		return -ENODEV;
457 
458 	ctrl_vsi = pf->switchdev.control_vsi;
459 	pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
460 	if (!pf->switchdev.uplink_vsi)
461 		goto err_vsi;
462 
463 	if (ice_eswitch_setup_env(pf))
464 		goto err_vsi;
465 
466 	if (ice_repr_add_for_all_vfs(pf))
467 		goto err_repr_add;
468 
469 	if (ice_eswitch_setup_reprs(pf))
470 		goto err_setup_reprs;
471 
472 	ice_eswitch_remap_rings_to_vectors(pf);
473 
474 	if (ice_vsi_open(ctrl_vsi))
475 		goto err_setup_reprs;
476 
477 	ice_eswitch_napi_enable(pf);
478 
479 	return 0;
480 
481 err_setup_reprs:
482 	ice_repr_rem_from_all_vfs(pf);
483 err_repr_add:
484 	ice_eswitch_release_env(pf);
485 err_vsi:
486 	ice_vsi_release(ctrl_vsi);
487 	return -ENODEV;
488 }
489 
490 /**
491  * ice_eswitch_disable_switchdev - disable switchdev resources
492  * @pf: pointer to PF structure
493  */
494 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
495 {
496 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
497 
498 	ice_eswitch_napi_disable(pf);
499 	ice_eswitch_release_env(pf);
500 	ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
501 	ice_eswitch_release_reprs(pf, ctrl_vsi);
502 	ice_vsi_release(ctrl_vsi);
503 	ice_repr_rem_from_all_vfs(pf);
504 }
505 
506 /**
507  * ice_eswitch_mode_set - set new eswitch mode
508  * @devlink: pointer to devlink structure
509  * @mode: eswitch mode to switch to
510  * @extack: pointer to extack structure
511  */
512 int
513 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
514 		     struct netlink_ext_ack *extack)
515 {
516 	struct ice_pf *pf = devlink_priv(devlink);
517 
518 	if (pf->eswitch_mode == mode)
519 		return 0;
520 
521 	if (pf->num_alloc_vfs) {
522 		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
523 		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
524 		return -EOPNOTSUPP;
525 	}
526 
527 	switch (mode) {
528 	case DEVLINK_ESWITCH_MODE_LEGACY:
529 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
530 			 pf->hw.pf_id);
531 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
532 		break;
533 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
534 	{
535 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
536 			 pf->hw.pf_id);
537 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
538 		break;
539 	}
540 	default:
541 		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
542 		return -EINVAL;
543 	}
544 
545 	pf->eswitch_mode = mode;
546 	return 0;
547 }
548 
549 /**
550  * ice_eswitch_mode_get - get current eswitch mode
551  * @devlink: pointer to devlink structure
552  * @mode: output parameter for current eswitch mode
553  */
554 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
555 {
556 	struct ice_pf *pf = devlink_priv(devlink);
557 
558 	*mode = pf->eswitch_mode;
559 	return 0;
560 }
561 
562 /**
563  * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
564  * @pf: pointer to PF structure
565  *
566  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
567  * false otherwise.
568  */
569 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
570 {
571 	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
572 }
573 
574 /**
575  * ice_eswitch_release - cleanup eswitch
576  * @pf: pointer to PF structure
577  */
578 void ice_eswitch_release(struct ice_pf *pf)
579 {
580 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
581 		return;
582 
583 	ice_eswitch_disable_switchdev(pf);
584 	pf->switchdev.is_running = false;
585 }
586 
587 /**
588  * ice_eswitch_configure - configure eswitch
589  * @pf: pointer to PF structure
590  */
591 int ice_eswitch_configure(struct ice_pf *pf)
592 {
593 	int status;
594 
595 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
596 		return 0;
597 
598 	status = ice_eswitch_enable_switchdev(pf);
599 	if (status)
600 		return status;
601 
602 	pf->switchdev.is_running = true;
603 	return 0;
604 }
605 
606 /**
607  * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
608  * @pf: pointer to PF structure
609  */
610 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
611 {
612 	struct ice_repr *repr;
613 	int i;
614 
615 	if (test_bit(ICE_DOWN, pf->state))
616 		return;
617 
618 	ice_for_each_vf(pf, i) {
619 		repr = pf->vf[i].repr;
620 		if (repr)
621 			ice_repr_start_tx_queues(repr);
622 	}
623 }
624 
625 /**
626  * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
627  * @pf: pointer to PF structure
628  */
629 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
630 {
631 	struct ice_repr *repr;
632 	int i;
633 
634 	if (test_bit(ICE_DOWN, pf->state))
635 		return;
636 
637 	ice_for_each_vf(pf, i) {
638 		repr = pf->vf[i].repr;
639 		if (repr)
640 			ice_repr_stop_tx_queues(repr);
641 	}
642 }
643 
644 /**
645  * ice_eswitch_rebuild - rebuild eswitch
646  * @pf: pointer to PF structure
647  */
648 int ice_eswitch_rebuild(struct ice_pf *pf)
649 {
650 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
651 	int status;
652 
653 	ice_eswitch_napi_disable(pf);
654 	ice_eswitch_napi_del(pf);
655 
656 	status = ice_eswitch_setup_env(pf);
657 	if (status)
658 		return status;
659 
660 	status = ice_eswitch_setup_reprs(pf);
661 	if (status)
662 		return status;
663 
664 	ice_eswitch_remap_rings_to_vectors(pf);
665 
666 	ice_replay_tc_fltrs(pf);
667 
668 	status = ice_vsi_open(ctrl_vsi);
669 	if (status)
670 		return status;
671 
672 	ice_eswitch_napi_enable(pf);
673 	ice_eswitch_start_all_tx_queues(pf);
674 
675 	return 0;
676 }
677