1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "ice_eswitch_br.h"
8 #include "ice_fltr.h"
9 #include "ice_repr.h"
10 #include "ice_devlink.h"
11 #include "ice_tc_lib.h"
12 
13 /**
14  * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
15  * @pf: pointer to PF struct
16  * @vf: pointer to VF struct
17  *
18  * This function adds advanced rule that forwards packets with
19  * VF's VSI index to the corresponding switchdev ctrl VSI queue.
20  */
21 static int
22 ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
23 {
24 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
25 	struct ice_adv_rule_info rule_info = { 0 };
26 	struct ice_adv_lkup_elem *list;
27 	struct ice_hw *hw = &pf->hw;
28 	const u16 lkups_cnt = 1;
29 	int err;
30 
31 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
32 	if (!list)
33 		return -ENOMEM;
34 
35 	ice_rule_add_src_vsi_metadata(list);
36 
37 	rule_info.sw_act.flag = ICE_FLTR_TX;
38 	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
39 	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
40 	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
41 				       ctrl_vsi->rxq_map[vf->vf_id];
42 	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
43 	rule_info.flags_info.act_valid = true;
44 	rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
45 	rule_info.src_vsi = vf->lan_vsi_idx;
46 
47 	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
48 			       &vf->repr->sp_rule);
49 	if (err)
50 		dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
51 			vf->vf_id);
52 
53 	kfree(list);
54 	return err;
55 }
56 
57 /**
58  * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
59  * @vf: pointer to the VF struct
60  *
61  * Delete the advanced rule that was used to forward packets with the VF's VSI
62  * index to the corresponding switchdev ctrl VSI queue.
63  */
64 static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
65 {
66 	if (!vf->repr)
67 		return;
68 
69 	ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
70 }
71 
72 /**
73  * ice_eswitch_setup_env - configure switchdev HW filters
74  * @pf: pointer to PF struct
75  *
76  * This function adds HW filters configuration specific for switchdev
77  * mode.
78  */
79 static int ice_eswitch_setup_env(struct ice_pf *pf)
80 {
81 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
82 	struct net_device *uplink_netdev = uplink_vsi->netdev;
83 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
84 	struct ice_vsi_vlan_ops *vlan_ops;
85 	bool rule_added = false;
86 
87 	vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
88 	if (vlan_ops->dis_stripping(ctrl_vsi))
89 		return -ENODEV;
90 
91 	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
92 
93 	netif_addr_lock_bh(uplink_netdev);
94 	__dev_uc_unsync(uplink_netdev, NULL);
95 	__dev_mc_unsync(uplink_netdev, NULL);
96 	netif_addr_unlock_bh(uplink_netdev);
97 
98 	if (ice_vsi_add_vlan_zero(uplink_vsi))
99 		goto err_def_rx;
100 
101 	if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
102 		if (ice_set_dflt_vsi(uplink_vsi))
103 			goto err_def_rx;
104 		rule_added = true;
105 	}
106 
107 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
108 	if (vlan_ops->dis_rx_filtering(uplink_vsi))
109 		goto err_dis_rx;
110 
111 	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
112 		goto err_override_uplink;
113 
114 	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
115 		goto err_override_control;
116 
117 	if (ice_vsi_update_local_lb(uplink_vsi, true))
118 		goto err_override_local_lb;
119 
120 	return 0;
121 
122 err_override_local_lb:
123 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
124 err_override_control:
125 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
126 err_override_uplink:
127 	vlan_ops->ena_rx_filtering(uplink_vsi);
128 err_dis_rx:
129 	if (rule_added)
130 		ice_clear_dflt_vsi(uplink_vsi);
131 err_def_rx:
132 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
133 				       uplink_vsi->port_info->mac.perm_addr,
134 				       ICE_FWD_TO_VSI);
135 	return -ENODEV;
136 }
137 
138 /**
139  * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
140  * @pf: pointer to PF struct
141  *
142  * In switchdev number of allocated Tx/Rx rings is equal.
143  *
144  * This function fills q_vectors structures associated with representor and
145  * move each ring pairs to port representor netdevs. Each port representor
146  * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
147  * number of VFs.
148  */
149 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
150 {
151 	struct ice_vsi *vsi = pf->switchdev.control_vsi;
152 	int q_id;
153 
154 	ice_for_each_txq(vsi, q_id) {
155 		struct ice_q_vector *q_vector;
156 		struct ice_tx_ring *tx_ring;
157 		struct ice_rx_ring *rx_ring;
158 		struct ice_repr *repr;
159 		struct ice_vf *vf;
160 
161 		vf = ice_get_vf_by_id(pf, q_id);
162 		if (WARN_ON(!vf))
163 			continue;
164 
165 		repr = vf->repr;
166 		q_vector = repr->q_vector;
167 		tx_ring = vsi->tx_rings[q_id];
168 		rx_ring = vsi->rx_rings[q_id];
169 
170 		q_vector->vsi = vsi;
171 		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
172 
173 		q_vector->num_ring_tx = 1;
174 		q_vector->tx.tx_ring = tx_ring;
175 		tx_ring->q_vector = q_vector;
176 		tx_ring->next = NULL;
177 		tx_ring->netdev = repr->netdev;
178 		/* In switchdev mode, from OS stack perspective, there is only
179 		 * one queue for given netdev, so it needs to be indexed as 0.
180 		 */
181 		tx_ring->q_index = 0;
182 
183 		q_vector->num_ring_rx = 1;
184 		q_vector->rx.rx_ring = rx_ring;
185 		rx_ring->q_vector = q_vector;
186 		rx_ring->next = NULL;
187 		rx_ring->netdev = repr->netdev;
188 
189 		ice_put_vf(vf);
190 	}
191 }
192 
193 /**
194  * ice_eswitch_release_reprs - clear PR VSIs configuration
195  * @pf: poiner to PF struct
196  * @ctrl_vsi: pointer to switchdev control VSI
197  */
198 static void
199 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
200 {
201 	struct ice_vf *vf;
202 	unsigned int bkt;
203 
204 	lockdep_assert_held(&pf->vfs.table_lock);
205 
206 	ice_for_each_vf(pf, bkt, vf) {
207 		struct ice_vsi *vsi = vf->repr->src_vsi;
208 
209 		/* Skip VFs that aren't configured */
210 		if (!vf->repr->dst)
211 			continue;
212 
213 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
214 		metadata_dst_free(vf->repr->dst);
215 		vf->repr->dst = NULL;
216 		ice_eswitch_del_vf_sp_rule(vf);
217 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
218 					       ICE_FWD_TO_VSI);
219 
220 		netif_napi_del(&vf->repr->q_vector->napi);
221 	}
222 }
223 
224 /**
225  * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
226  * @pf: pointer to PF struct
227  */
228 static int ice_eswitch_setup_reprs(struct ice_pf *pf)
229 {
230 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
231 	int max_vsi_num = 0;
232 	struct ice_vf *vf;
233 	unsigned int bkt;
234 
235 	lockdep_assert_held(&pf->vfs.table_lock);
236 
237 	ice_for_each_vf(pf, bkt, vf) {
238 		struct ice_vsi *vsi = vf->repr->src_vsi;
239 
240 		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
241 		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
242 						   GFP_KERNEL);
243 		if (!vf->repr->dst) {
244 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
245 						       ICE_FWD_TO_VSI);
246 			goto err;
247 		}
248 
249 		if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
250 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
251 						       ICE_FWD_TO_VSI);
252 			goto err;
253 		}
254 
255 		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
256 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
257 						       ICE_FWD_TO_VSI);
258 			ice_eswitch_del_vf_sp_rule(vf);
259 			metadata_dst_free(vf->repr->dst);
260 			vf->repr->dst = NULL;
261 			goto err;
262 		}
263 
264 		if (ice_vsi_add_vlan_zero(vsi)) {
265 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
266 						       ICE_FWD_TO_VSI);
267 			ice_eswitch_del_vf_sp_rule(vf);
268 			metadata_dst_free(vf->repr->dst);
269 			vf->repr->dst = NULL;
270 			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
271 			goto err;
272 		}
273 
274 		if (max_vsi_num < vsi->vsi_num)
275 			max_vsi_num = vsi->vsi_num;
276 
277 		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
278 			       ice_napi_poll);
279 
280 		netif_keep_dst(vf->repr->netdev);
281 	}
282 
283 	ice_for_each_vf(pf, bkt, vf) {
284 		struct ice_repr *repr = vf->repr;
285 		struct ice_vsi *vsi = repr->src_vsi;
286 		struct metadata_dst *dst;
287 
288 		dst = repr->dst;
289 		dst->u.port_info.port_id = vsi->vsi_num;
290 		dst->u.port_info.lower_dev = repr->netdev;
291 		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
292 	}
293 
294 	return 0;
295 
296 err:
297 	ice_eswitch_release_reprs(pf, ctrl_vsi);
298 
299 	return -ENODEV;
300 }
301 
302 /**
303  * ice_eswitch_update_repr - reconfigure VF port representor
304  * @vsi: VF VSI for which port representor is configured
305  */
306 void ice_eswitch_update_repr(struct ice_vsi *vsi)
307 {
308 	struct ice_pf *pf = vsi->back;
309 	struct ice_repr *repr;
310 	struct ice_vf *vf;
311 	int ret;
312 
313 	if (!ice_is_switchdev_running(pf))
314 		return;
315 
316 	vf = vsi->vf;
317 	repr = vf->repr;
318 	repr->src_vsi = vsi;
319 	repr->dst->u.port_info.port_id = vsi->vsi_num;
320 
321 	if (repr->br_port)
322 		repr->br_port->vsi = vsi;
323 
324 	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
325 	if (ret) {
326 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
327 		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
328 			vsi->vf->vf_id);
329 	}
330 }
331 
332 /**
333  * ice_eswitch_port_start_xmit - callback for packets transmit
334  * @skb: send buffer
335  * @netdev: network interface device structure
336  *
337  * Returns NETDEV_TX_OK if sent, else an error code
338  */
339 netdev_tx_t
340 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
341 {
342 	struct ice_netdev_priv *np;
343 	struct ice_repr *repr;
344 	struct ice_vsi *vsi;
345 
346 	np = netdev_priv(netdev);
347 	vsi = np->vsi;
348 
349 	if (!vsi || !ice_is_switchdev_running(vsi->back))
350 		return NETDEV_TX_BUSY;
351 
352 	if (ice_is_reset_in_progress(vsi->back->state) ||
353 	    test_bit(ICE_VF_DIS, vsi->back->state))
354 		return NETDEV_TX_BUSY;
355 
356 	repr = ice_netdev_to_repr(netdev);
357 	skb_dst_drop(skb);
358 	dst_hold((struct dst_entry *)repr->dst);
359 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
360 	skb->queue_mapping = repr->vf->vf_id;
361 
362 	return ice_start_xmit(skb, netdev);
363 }
364 
365 /**
366  * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
367  * @skb: pointer to send buffer
368  * @off: pointer to offload struct
369  */
370 void
371 ice_eswitch_set_target_vsi(struct sk_buff *skb,
372 			   struct ice_tx_offload_params *off)
373 {
374 	struct metadata_dst *dst = skb_metadata_dst(skb);
375 	u64 cd_cmd, dst_vsi;
376 
377 	if (!dst) {
378 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
379 		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
380 	} else {
381 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
382 		dst_vsi = ((u64)dst->u.port_info.port_id <<
383 			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
384 		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
385 	}
386 }
387 
388 /**
389  * ice_eswitch_release_env - clear switchdev HW filters
390  * @pf: pointer to PF struct
391  *
392  * This function removes HW filters configuration specific for switchdev
393  * mode and restores default legacy mode settings.
394  */
395 static void ice_eswitch_release_env(struct ice_pf *pf)
396 {
397 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
398 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
399 	struct ice_vsi_vlan_ops *vlan_ops;
400 
401 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
402 
403 	ice_vsi_update_local_lb(uplink_vsi, false);
404 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
405 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
406 	vlan_ops->ena_rx_filtering(uplink_vsi);
407 	ice_clear_dflt_vsi(uplink_vsi);
408 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
409 				       uplink_vsi->port_info->mac.perm_addr,
410 				       ICE_FWD_TO_VSI);
411 }
412 
413 /**
414  * ice_eswitch_vsi_setup - configure switchdev control VSI
415  * @pf: pointer to PF structure
416  * @pi: pointer to port_info structure
417  */
418 static struct ice_vsi *
419 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
420 {
421 	struct ice_vsi_cfg_params params = {};
422 
423 	params.type = ICE_VSI_SWITCHDEV_CTRL;
424 	params.pi = pi;
425 	params.flags = ICE_VSI_FLAG_INIT;
426 
427 	return ice_vsi_setup(pf, &params);
428 }
429 
430 /**
431  * ice_eswitch_napi_del - remove NAPI handle for all port representors
432  * @pf: pointer to PF structure
433  */
434 static void ice_eswitch_napi_del(struct ice_pf *pf)
435 {
436 	struct ice_vf *vf;
437 	unsigned int bkt;
438 
439 	lockdep_assert_held(&pf->vfs.table_lock);
440 
441 	ice_for_each_vf(pf, bkt, vf)
442 		netif_napi_del(&vf->repr->q_vector->napi);
443 }
444 
445 /**
446  * ice_eswitch_napi_enable - enable NAPI for all port representors
447  * @pf: pointer to PF structure
448  */
449 static void ice_eswitch_napi_enable(struct ice_pf *pf)
450 {
451 	struct ice_vf *vf;
452 	unsigned int bkt;
453 
454 	lockdep_assert_held(&pf->vfs.table_lock);
455 
456 	ice_for_each_vf(pf, bkt, vf)
457 		napi_enable(&vf->repr->q_vector->napi);
458 }
459 
460 /**
461  * ice_eswitch_napi_disable - disable NAPI for all port representors
462  * @pf: pointer to PF structure
463  */
464 static void ice_eswitch_napi_disable(struct ice_pf *pf)
465 {
466 	struct ice_vf *vf;
467 	unsigned int bkt;
468 
469 	lockdep_assert_held(&pf->vfs.table_lock);
470 
471 	ice_for_each_vf(pf, bkt, vf)
472 		napi_disable(&vf->repr->q_vector->napi);
473 }
474 
475 /**
476  * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
477  * @pf: pointer to PF structure
478  */
479 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
480 {
481 	struct ice_vsi *ctrl_vsi, *uplink_vsi;
482 
483 	uplink_vsi = ice_get_main_vsi(pf);
484 	if (!uplink_vsi)
485 		return -ENODEV;
486 
487 	if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
488 		dev_err(ice_pf_to_dev(pf),
489 			"Uplink port cannot be a bridge port\n");
490 		return -EINVAL;
491 	}
492 
493 	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
494 	if (!pf->switchdev.control_vsi)
495 		return -ENODEV;
496 
497 	ctrl_vsi = pf->switchdev.control_vsi;
498 	pf->switchdev.uplink_vsi = uplink_vsi;
499 
500 	if (ice_eswitch_setup_env(pf))
501 		goto err_vsi;
502 
503 	if (ice_repr_add_for_all_vfs(pf))
504 		goto err_repr_add;
505 
506 	if (ice_eswitch_setup_reprs(pf))
507 		goto err_setup_reprs;
508 
509 	ice_eswitch_remap_rings_to_vectors(pf);
510 
511 	if (ice_vsi_open(ctrl_vsi))
512 		goto err_setup_reprs;
513 
514 	if (ice_eswitch_br_offloads_init(pf))
515 		goto err_br_offloads;
516 
517 	ice_eswitch_napi_enable(pf);
518 
519 	return 0;
520 
521 err_br_offloads:
522 	ice_vsi_close(ctrl_vsi);
523 err_setup_reprs:
524 	ice_repr_rem_from_all_vfs(pf);
525 err_repr_add:
526 	ice_eswitch_release_env(pf);
527 err_vsi:
528 	ice_vsi_release(ctrl_vsi);
529 	return -ENODEV;
530 }
531 
532 /**
533  * ice_eswitch_disable_switchdev - disable switchdev resources
534  * @pf: pointer to PF structure
535  */
536 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
537 {
538 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
539 
540 	ice_eswitch_napi_disable(pf);
541 	ice_eswitch_br_offloads_deinit(pf);
542 	ice_eswitch_release_env(pf);
543 	ice_eswitch_release_reprs(pf, ctrl_vsi);
544 	ice_vsi_release(ctrl_vsi);
545 	ice_repr_rem_from_all_vfs(pf);
546 }
547 
548 /**
549  * ice_eswitch_mode_set - set new eswitch mode
550  * @devlink: pointer to devlink structure
551  * @mode: eswitch mode to switch to
552  * @extack: pointer to extack structure
553  */
554 int
555 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
556 		     struct netlink_ext_ack *extack)
557 {
558 	struct ice_pf *pf = devlink_priv(devlink);
559 
560 	if (pf->eswitch_mode == mode)
561 		return 0;
562 
563 	if (ice_has_vfs(pf)) {
564 		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
565 		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
566 		return -EOPNOTSUPP;
567 	}
568 
569 	switch (mode) {
570 	case DEVLINK_ESWITCH_MODE_LEGACY:
571 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
572 			 pf->hw.pf_id);
573 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
574 		break;
575 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
576 	{
577 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
578 			 pf->hw.pf_id);
579 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
580 		break;
581 	}
582 	default:
583 		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
584 		return -EINVAL;
585 	}
586 
587 	pf->eswitch_mode = mode;
588 	return 0;
589 }
590 
591 /**
592  * ice_eswitch_mode_get - get current eswitch mode
593  * @devlink: pointer to devlink structure
594  * @mode: output parameter for current eswitch mode
595  */
596 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
597 {
598 	struct ice_pf *pf = devlink_priv(devlink);
599 
600 	*mode = pf->eswitch_mode;
601 	return 0;
602 }
603 
604 /**
605  * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
606  * @pf: pointer to PF structure
607  *
608  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
609  * false otherwise.
610  */
611 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
612 {
613 	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
614 }
615 
616 /**
617  * ice_eswitch_release - cleanup eswitch
618  * @pf: pointer to PF structure
619  */
620 void ice_eswitch_release(struct ice_pf *pf)
621 {
622 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
623 		return;
624 
625 	ice_eswitch_disable_switchdev(pf);
626 	pf->switchdev.is_running = false;
627 }
628 
629 /**
630  * ice_eswitch_configure - configure eswitch
631  * @pf: pointer to PF structure
632  */
633 int ice_eswitch_configure(struct ice_pf *pf)
634 {
635 	int status;
636 
637 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
638 		return 0;
639 
640 	status = ice_eswitch_enable_switchdev(pf);
641 	if (status)
642 		return status;
643 
644 	pf->switchdev.is_running = true;
645 	return 0;
646 }
647 
648 /**
649  * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
650  * @pf: pointer to PF structure
651  */
652 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
653 {
654 	struct ice_vf *vf;
655 	unsigned int bkt;
656 
657 	lockdep_assert_held(&pf->vfs.table_lock);
658 
659 	if (test_bit(ICE_DOWN, pf->state))
660 		return;
661 
662 	ice_for_each_vf(pf, bkt, vf) {
663 		if (vf->repr)
664 			ice_repr_start_tx_queues(vf->repr);
665 	}
666 }
667 
668 /**
669  * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
670  * @pf: pointer to PF structure
671  */
672 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
673 {
674 	struct ice_vf *vf;
675 	unsigned int bkt;
676 
677 	lockdep_assert_held(&pf->vfs.table_lock);
678 
679 	if (test_bit(ICE_DOWN, pf->state))
680 		return;
681 
682 	ice_for_each_vf(pf, bkt, vf) {
683 		if (vf->repr)
684 			ice_repr_stop_tx_queues(vf->repr);
685 	}
686 }
687 
688 /**
689  * ice_eswitch_rebuild - rebuild eswitch
690  * @pf: pointer to PF structure
691  */
692 int ice_eswitch_rebuild(struct ice_pf *pf)
693 {
694 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
695 	int status;
696 
697 	ice_eswitch_napi_disable(pf);
698 	ice_eswitch_napi_del(pf);
699 
700 	status = ice_eswitch_setup_env(pf);
701 	if (status)
702 		return status;
703 
704 	status = ice_eswitch_setup_reprs(pf);
705 	if (status)
706 		return status;
707 
708 	ice_eswitch_remap_rings_to_vectors(pf);
709 
710 	ice_replay_tc_fltrs(pf);
711 
712 	status = ice_vsi_open(ctrl_vsi);
713 	if (status)
714 		return status;
715 
716 	ice_eswitch_napi_enable(pf);
717 	ice_eswitch_start_all_tx_queues(pf);
718 
719 	return 0;
720 }
721