1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "ice_eswitch_br.h"
8 #include "ice_fltr.h"
9 #include "ice_repr.h"
10 #include "ice_devlink.h"
11 #include "ice_tc_lib.h"
12 
13 /**
14  * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
15  * @pf: pointer to PF struct
16  * @vf: pointer to VF struct
17  *
18  * This function adds advanced rule that forwards packets with
19  * VF's VSI index to the corresponding switchdev ctrl VSI queue.
20  */
21 static int
22 ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
23 {
24 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
25 	struct ice_adv_rule_info rule_info = { 0 };
26 	struct ice_adv_lkup_elem *list;
27 	struct ice_hw *hw = &pf->hw;
28 	const u16 lkups_cnt = 1;
29 	int err;
30 
31 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
32 	if (!list)
33 		return -ENOMEM;
34 
35 	ice_rule_add_src_vsi_metadata(list);
36 
37 	rule_info.sw_act.flag = ICE_FLTR_TX;
38 	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
39 	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
40 	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
41 				       ctrl_vsi->rxq_map[vf->vf_id];
42 	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
43 	rule_info.flags_info.act_valid = true;
44 	rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
45 	rule_info.src_vsi = vf->lan_vsi_idx;
46 
47 	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
48 			       &vf->repr->sp_rule);
49 	if (err)
50 		dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
51 			vf->vf_id);
52 
53 	kfree(list);
54 	return err;
55 }
56 
57 /**
58  * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
59  * @vf: pointer to the VF struct
60  *
61  * Delete the advanced rule that was used to forward packets with the VF's VSI
62  * index to the corresponding switchdev ctrl VSI queue.
63  */
64 static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
65 {
66 	if (!vf->repr)
67 		return;
68 
69 	ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
70 }
71 
72 /**
73  * ice_eswitch_setup_env - configure switchdev HW filters
74  * @pf: pointer to PF struct
75  *
76  * This function adds HW filters configuration specific for switchdev
77  * mode.
78  */
79 static int ice_eswitch_setup_env(struct ice_pf *pf)
80 {
81 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
82 	struct net_device *uplink_netdev = uplink_vsi->netdev;
83 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
84 	struct ice_vsi_vlan_ops *vlan_ops;
85 	bool rule_added = false;
86 
87 	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
88 
89 	netif_addr_lock_bh(uplink_netdev);
90 	__dev_uc_unsync(uplink_netdev, NULL);
91 	__dev_mc_unsync(uplink_netdev, NULL);
92 	netif_addr_unlock_bh(uplink_netdev);
93 
94 	if (ice_vsi_add_vlan_zero(uplink_vsi))
95 		goto err_def_rx;
96 
97 	if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
98 		if (ice_set_dflt_vsi(uplink_vsi))
99 			goto err_def_rx;
100 		rule_added = true;
101 	}
102 
103 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
104 	if (vlan_ops->dis_rx_filtering(uplink_vsi))
105 		goto err_dis_rx;
106 
107 	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
108 		goto err_override_uplink;
109 
110 	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
111 		goto err_override_control;
112 
113 	if (ice_vsi_update_local_lb(uplink_vsi, true))
114 		goto err_override_local_lb;
115 
116 	return 0;
117 
118 err_override_local_lb:
119 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
120 err_override_control:
121 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
122 err_override_uplink:
123 	vlan_ops->ena_rx_filtering(uplink_vsi);
124 err_dis_rx:
125 	if (rule_added)
126 		ice_clear_dflt_vsi(uplink_vsi);
127 err_def_rx:
128 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
129 				       uplink_vsi->port_info->mac.perm_addr,
130 				       ICE_FWD_TO_VSI);
131 	return -ENODEV;
132 }
133 
134 /**
135  * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
136  * @pf: pointer to PF struct
137  *
138  * In switchdev number of allocated Tx/Rx rings is equal.
139  *
140  * This function fills q_vectors structures associated with representor and
141  * move each ring pairs to port representor netdevs. Each port representor
142  * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
143  * number of VFs.
144  */
145 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
146 {
147 	struct ice_vsi *vsi = pf->switchdev.control_vsi;
148 	int q_id;
149 
150 	ice_for_each_txq(vsi, q_id) {
151 		struct ice_q_vector *q_vector;
152 		struct ice_tx_ring *tx_ring;
153 		struct ice_rx_ring *rx_ring;
154 		struct ice_repr *repr;
155 		struct ice_vf *vf;
156 
157 		vf = ice_get_vf_by_id(pf, q_id);
158 		if (WARN_ON(!vf))
159 			continue;
160 
161 		repr = vf->repr;
162 		q_vector = repr->q_vector;
163 		tx_ring = vsi->tx_rings[q_id];
164 		rx_ring = vsi->rx_rings[q_id];
165 
166 		q_vector->vsi = vsi;
167 		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
168 
169 		q_vector->num_ring_tx = 1;
170 		q_vector->tx.tx_ring = tx_ring;
171 		tx_ring->q_vector = q_vector;
172 		tx_ring->next = NULL;
173 		tx_ring->netdev = repr->netdev;
174 		/* In switchdev mode, from OS stack perspective, there is only
175 		 * one queue for given netdev, so it needs to be indexed as 0.
176 		 */
177 		tx_ring->q_index = 0;
178 
179 		q_vector->num_ring_rx = 1;
180 		q_vector->rx.rx_ring = rx_ring;
181 		rx_ring->q_vector = q_vector;
182 		rx_ring->next = NULL;
183 		rx_ring->netdev = repr->netdev;
184 
185 		ice_put_vf(vf);
186 	}
187 }
188 
189 /**
190  * ice_eswitch_release_reprs - clear PR VSIs configuration
191  * @pf: poiner to PF struct
192  * @ctrl_vsi: pointer to switchdev control VSI
193  */
194 static void
195 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
196 {
197 	struct ice_vf *vf;
198 	unsigned int bkt;
199 
200 	lockdep_assert_held(&pf->vfs.table_lock);
201 
202 	ice_for_each_vf(pf, bkt, vf) {
203 		struct ice_vsi *vsi = vf->repr->src_vsi;
204 
205 		/* Skip VFs that aren't configured */
206 		if (!vf->repr->dst)
207 			continue;
208 
209 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
210 		metadata_dst_free(vf->repr->dst);
211 		vf->repr->dst = NULL;
212 		ice_eswitch_del_vf_sp_rule(vf);
213 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
214 					       ICE_FWD_TO_VSI);
215 
216 		netif_napi_del(&vf->repr->q_vector->napi);
217 	}
218 }
219 
220 /**
221  * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
222  * @pf: pointer to PF struct
223  */
224 static int ice_eswitch_setup_reprs(struct ice_pf *pf)
225 {
226 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
227 	int max_vsi_num = 0;
228 	struct ice_vf *vf;
229 	unsigned int bkt;
230 
231 	lockdep_assert_held(&pf->vfs.table_lock);
232 
233 	ice_for_each_vf(pf, bkt, vf) {
234 		struct ice_vsi *vsi = vf->repr->src_vsi;
235 
236 		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
237 		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
238 						   GFP_KERNEL);
239 		if (!vf->repr->dst) {
240 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
241 						       ICE_FWD_TO_VSI);
242 			goto err;
243 		}
244 
245 		if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
246 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
247 						       ICE_FWD_TO_VSI);
248 			goto err;
249 		}
250 
251 		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
252 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
253 						       ICE_FWD_TO_VSI);
254 			ice_eswitch_del_vf_sp_rule(vf);
255 			metadata_dst_free(vf->repr->dst);
256 			vf->repr->dst = NULL;
257 			goto err;
258 		}
259 
260 		if (ice_vsi_add_vlan_zero(vsi)) {
261 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
262 						       ICE_FWD_TO_VSI);
263 			ice_eswitch_del_vf_sp_rule(vf);
264 			metadata_dst_free(vf->repr->dst);
265 			vf->repr->dst = NULL;
266 			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
267 			goto err;
268 		}
269 
270 		if (max_vsi_num < vsi->vsi_num)
271 			max_vsi_num = vsi->vsi_num;
272 
273 		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
274 			       ice_napi_poll);
275 
276 		netif_keep_dst(vf->repr->netdev);
277 	}
278 
279 	ice_for_each_vf(pf, bkt, vf) {
280 		struct ice_repr *repr = vf->repr;
281 		struct ice_vsi *vsi = repr->src_vsi;
282 		struct metadata_dst *dst;
283 
284 		dst = repr->dst;
285 		dst->u.port_info.port_id = vsi->vsi_num;
286 		dst->u.port_info.lower_dev = repr->netdev;
287 		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
288 	}
289 
290 	return 0;
291 
292 err:
293 	ice_eswitch_release_reprs(pf, ctrl_vsi);
294 
295 	return -ENODEV;
296 }
297 
298 /**
299  * ice_eswitch_update_repr - reconfigure VF port representor
300  * @vsi: VF VSI for which port representor is configured
301  */
302 void ice_eswitch_update_repr(struct ice_vsi *vsi)
303 {
304 	struct ice_pf *pf = vsi->back;
305 	struct ice_repr *repr;
306 	struct ice_vf *vf;
307 	int ret;
308 
309 	if (!ice_is_switchdev_running(pf))
310 		return;
311 
312 	vf = vsi->vf;
313 	repr = vf->repr;
314 	repr->src_vsi = vsi;
315 	repr->dst->u.port_info.port_id = vsi->vsi_num;
316 
317 	if (repr->br_port)
318 		repr->br_port->vsi = vsi;
319 
320 	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
321 	if (ret) {
322 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
323 		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
324 			vsi->vf->vf_id);
325 	}
326 }
327 
328 /**
329  * ice_eswitch_port_start_xmit - callback for packets transmit
330  * @skb: send buffer
331  * @netdev: network interface device structure
332  *
333  * Returns NETDEV_TX_OK if sent, else an error code
334  */
335 netdev_tx_t
336 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
337 {
338 	struct ice_netdev_priv *np;
339 	struct ice_repr *repr;
340 	struct ice_vsi *vsi;
341 
342 	np = netdev_priv(netdev);
343 	vsi = np->vsi;
344 
345 	if (!vsi || !ice_is_switchdev_running(vsi->back))
346 		return NETDEV_TX_BUSY;
347 
348 	if (ice_is_reset_in_progress(vsi->back->state) ||
349 	    test_bit(ICE_VF_DIS, vsi->back->state))
350 		return NETDEV_TX_BUSY;
351 
352 	repr = ice_netdev_to_repr(netdev);
353 	skb_dst_drop(skb);
354 	dst_hold((struct dst_entry *)repr->dst);
355 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
356 	skb->queue_mapping = repr->vf->vf_id;
357 
358 	return ice_start_xmit(skb, netdev);
359 }
360 
361 /**
362  * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
363  * @skb: pointer to send buffer
364  * @off: pointer to offload struct
365  */
366 void
367 ice_eswitch_set_target_vsi(struct sk_buff *skb,
368 			   struct ice_tx_offload_params *off)
369 {
370 	struct metadata_dst *dst = skb_metadata_dst(skb);
371 	u64 cd_cmd, dst_vsi;
372 
373 	if (!dst) {
374 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
375 		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
376 	} else {
377 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
378 		dst_vsi = ((u64)dst->u.port_info.port_id <<
379 			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
380 		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
381 	}
382 }
383 
384 /**
385  * ice_eswitch_release_env - clear switchdev HW filters
386  * @pf: pointer to PF struct
387  *
388  * This function removes HW filters configuration specific for switchdev
389  * mode and restores default legacy mode settings.
390  */
391 static void ice_eswitch_release_env(struct ice_pf *pf)
392 {
393 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
394 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
395 	struct ice_vsi_vlan_ops *vlan_ops;
396 
397 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
398 
399 	ice_vsi_update_local_lb(uplink_vsi, false);
400 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
401 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
402 	vlan_ops->ena_rx_filtering(uplink_vsi);
403 	ice_clear_dflt_vsi(uplink_vsi);
404 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
405 				       uplink_vsi->port_info->mac.perm_addr,
406 				       ICE_FWD_TO_VSI);
407 }
408 
409 /**
410  * ice_eswitch_vsi_setup - configure switchdev control VSI
411  * @pf: pointer to PF structure
412  * @pi: pointer to port_info structure
413  */
414 static struct ice_vsi *
415 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
416 {
417 	struct ice_vsi_cfg_params params = {};
418 
419 	params.type = ICE_VSI_SWITCHDEV_CTRL;
420 	params.pi = pi;
421 	params.flags = ICE_VSI_FLAG_INIT;
422 
423 	return ice_vsi_setup(pf, &params);
424 }
425 
426 /**
427  * ice_eswitch_napi_del - remove NAPI handle for all port representors
428  * @pf: pointer to PF structure
429  */
430 static void ice_eswitch_napi_del(struct ice_pf *pf)
431 {
432 	struct ice_vf *vf;
433 	unsigned int bkt;
434 
435 	lockdep_assert_held(&pf->vfs.table_lock);
436 
437 	ice_for_each_vf(pf, bkt, vf)
438 		netif_napi_del(&vf->repr->q_vector->napi);
439 }
440 
441 /**
442  * ice_eswitch_napi_enable - enable NAPI for all port representors
443  * @pf: pointer to PF structure
444  */
445 static void ice_eswitch_napi_enable(struct ice_pf *pf)
446 {
447 	struct ice_vf *vf;
448 	unsigned int bkt;
449 
450 	lockdep_assert_held(&pf->vfs.table_lock);
451 
452 	ice_for_each_vf(pf, bkt, vf)
453 		napi_enable(&vf->repr->q_vector->napi);
454 }
455 
456 /**
457  * ice_eswitch_napi_disable - disable NAPI for all port representors
458  * @pf: pointer to PF structure
459  */
460 static void ice_eswitch_napi_disable(struct ice_pf *pf)
461 {
462 	struct ice_vf *vf;
463 	unsigned int bkt;
464 
465 	lockdep_assert_held(&pf->vfs.table_lock);
466 
467 	ice_for_each_vf(pf, bkt, vf)
468 		napi_disable(&vf->repr->q_vector->napi);
469 }
470 
471 /**
472  * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
473  * @pf: pointer to PF structure
474  */
475 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
476 {
477 	struct ice_vsi *ctrl_vsi, *uplink_vsi;
478 
479 	uplink_vsi = ice_get_main_vsi(pf);
480 	if (!uplink_vsi)
481 		return -ENODEV;
482 
483 	if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
484 		dev_err(ice_pf_to_dev(pf),
485 			"Uplink port cannot be a bridge port\n");
486 		return -EINVAL;
487 	}
488 
489 	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
490 	if (!pf->switchdev.control_vsi)
491 		return -ENODEV;
492 
493 	ctrl_vsi = pf->switchdev.control_vsi;
494 	pf->switchdev.uplink_vsi = uplink_vsi;
495 
496 	if (ice_eswitch_setup_env(pf))
497 		goto err_vsi;
498 
499 	if (ice_repr_add_for_all_vfs(pf))
500 		goto err_repr_add;
501 
502 	if (ice_eswitch_setup_reprs(pf))
503 		goto err_setup_reprs;
504 
505 	ice_eswitch_remap_rings_to_vectors(pf);
506 
507 	if (ice_vsi_open(ctrl_vsi))
508 		goto err_setup_reprs;
509 
510 	if (ice_eswitch_br_offloads_init(pf))
511 		goto err_br_offloads;
512 
513 	ice_eswitch_napi_enable(pf);
514 
515 	return 0;
516 
517 err_br_offloads:
518 	ice_vsi_close(ctrl_vsi);
519 err_setup_reprs:
520 	ice_repr_rem_from_all_vfs(pf);
521 err_repr_add:
522 	ice_eswitch_release_env(pf);
523 err_vsi:
524 	ice_vsi_release(ctrl_vsi);
525 	return -ENODEV;
526 }
527 
528 /**
529  * ice_eswitch_disable_switchdev - disable switchdev resources
530  * @pf: pointer to PF structure
531  */
532 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
533 {
534 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
535 
536 	ice_eswitch_napi_disable(pf);
537 	ice_eswitch_br_offloads_deinit(pf);
538 	ice_eswitch_release_env(pf);
539 	ice_eswitch_release_reprs(pf, ctrl_vsi);
540 	ice_vsi_release(ctrl_vsi);
541 	ice_repr_rem_from_all_vfs(pf);
542 }
543 
544 /**
545  * ice_eswitch_mode_set - set new eswitch mode
546  * @devlink: pointer to devlink structure
547  * @mode: eswitch mode to switch to
548  * @extack: pointer to extack structure
549  */
550 int
551 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
552 		     struct netlink_ext_ack *extack)
553 {
554 	struct ice_pf *pf = devlink_priv(devlink);
555 
556 	if (pf->eswitch_mode == mode)
557 		return 0;
558 
559 	if (ice_has_vfs(pf)) {
560 		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
561 		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
562 		return -EOPNOTSUPP;
563 	}
564 
565 	switch (mode) {
566 	case DEVLINK_ESWITCH_MODE_LEGACY:
567 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
568 			 pf->hw.pf_id);
569 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
570 		break;
571 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
572 	{
573 		if (ice_is_adq_active(pf)) {
574 			dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
575 			NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
576 			return -EOPNOTSUPP;
577 		}
578 
579 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
580 			 pf->hw.pf_id);
581 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
582 		break;
583 	}
584 	default:
585 		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
586 		return -EINVAL;
587 	}
588 
589 	pf->eswitch_mode = mode;
590 	return 0;
591 }
592 
593 /**
594  * ice_eswitch_mode_get - get current eswitch mode
595  * @devlink: pointer to devlink structure
596  * @mode: output parameter for current eswitch mode
597  */
598 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
599 {
600 	struct ice_pf *pf = devlink_priv(devlink);
601 
602 	*mode = pf->eswitch_mode;
603 	return 0;
604 }
605 
606 /**
607  * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
608  * @pf: pointer to PF structure
609  *
610  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
611  * false otherwise.
612  */
613 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
614 {
615 	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
616 }
617 
618 /**
619  * ice_eswitch_release - cleanup eswitch
620  * @pf: pointer to PF structure
621  */
622 void ice_eswitch_release(struct ice_pf *pf)
623 {
624 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
625 		return;
626 
627 	ice_eswitch_disable_switchdev(pf);
628 	pf->switchdev.is_running = false;
629 }
630 
631 /**
632  * ice_eswitch_configure - configure eswitch
633  * @pf: pointer to PF structure
634  */
635 int ice_eswitch_configure(struct ice_pf *pf)
636 {
637 	int status;
638 
639 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
640 		return 0;
641 
642 	status = ice_eswitch_enable_switchdev(pf);
643 	if (status)
644 		return status;
645 
646 	pf->switchdev.is_running = true;
647 	return 0;
648 }
649 
650 /**
651  * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
652  * @pf: pointer to PF structure
653  */
654 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
655 {
656 	struct ice_vf *vf;
657 	unsigned int bkt;
658 
659 	lockdep_assert_held(&pf->vfs.table_lock);
660 
661 	if (test_bit(ICE_DOWN, pf->state))
662 		return;
663 
664 	ice_for_each_vf(pf, bkt, vf) {
665 		if (vf->repr)
666 			ice_repr_start_tx_queues(vf->repr);
667 	}
668 }
669 
670 /**
671  * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
672  * @pf: pointer to PF structure
673  */
674 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
675 {
676 	struct ice_vf *vf;
677 	unsigned int bkt;
678 
679 	lockdep_assert_held(&pf->vfs.table_lock);
680 
681 	if (test_bit(ICE_DOWN, pf->state))
682 		return;
683 
684 	ice_for_each_vf(pf, bkt, vf) {
685 		if (vf->repr)
686 			ice_repr_stop_tx_queues(vf->repr);
687 	}
688 }
689 
690 /**
691  * ice_eswitch_rebuild - rebuild eswitch
692  * @pf: pointer to PF structure
693  */
694 int ice_eswitch_rebuild(struct ice_pf *pf)
695 {
696 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
697 	int status;
698 
699 	ice_eswitch_napi_disable(pf);
700 	ice_eswitch_napi_del(pf);
701 
702 	status = ice_eswitch_setup_env(pf);
703 	if (status)
704 		return status;
705 
706 	status = ice_eswitch_setup_reprs(pf);
707 	if (status)
708 		return status;
709 
710 	ice_eswitch_remap_rings_to_vectors(pf);
711 
712 	ice_replay_tc_fltrs(pf);
713 
714 	status = ice_vsi_open(ctrl_vsi);
715 	if (status)
716 		return status;
717 
718 	ice_eswitch_napi_enable(pf);
719 	ice_eswitch_start_all_tx_queues(pf);
720 
721 	return 0;
722 }
723