1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_eswitch.h" 7 #include "ice_fltr.h" 8 #include "ice_repr.h" 9 #include "ice_devlink.h" 10 #include "ice_tc_lib.h" 11 12 /** 13 * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index 14 * @pf: pointer to PF struct 15 * @vf: pointer to VF struct 16 * 17 * This function adds advanced rule that forwards packets with 18 * VF's VSI index to the corresponding switchdev ctrl VSI queue. 19 */ 20 static int 21 ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) 22 { 23 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 24 struct ice_adv_rule_info rule_info = { 0 }; 25 struct ice_adv_lkup_elem *list; 26 struct ice_hw *hw = &pf->hw; 27 const u16 lkups_cnt = 1; 28 int err; 29 30 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 31 if (!list) 32 return -ENOMEM; 33 34 ice_rule_add_src_vsi_metadata(list); 35 36 rule_info.sw_act.flag = ICE_FLTR_TX; 37 rule_info.sw_act.vsi_handle = ctrl_vsi->idx; 38 rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; 39 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + 40 ctrl_vsi->rxq_map[vf->vf_id]; 41 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; 42 rule_info.flags_info.act_valid = true; 43 rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; 44 rule_info.src_vsi = vf->lan_vsi_idx; 45 46 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, 47 &vf->repr->sp_rule); 48 if (err) 49 dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d", 50 vf->vf_id); 51 52 kfree(list); 53 return err; 54 } 55 56 /** 57 * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index 58 * @vf: pointer to the VF struct 59 * 60 * Delete the advanced rule that was used to forward packets with the VF's VSI 61 * index to the corresponding switchdev ctrl VSI queue. 62 */ 63 static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) 64 { 65 if (!vf->repr) 66 return; 67 68 ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule); 69 } 70 71 /** 72 * ice_eswitch_setup_env - configure switchdev HW filters 73 * @pf: pointer to PF struct 74 * 75 * This function adds HW filters configuration specific for switchdev 76 * mode. 77 */ 78 static int ice_eswitch_setup_env(struct ice_pf *pf) 79 { 80 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 81 struct net_device *uplink_netdev = uplink_vsi->netdev; 82 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 83 struct ice_vsi_vlan_ops *vlan_ops; 84 bool rule_added = false; 85 86 vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi); 87 if (vlan_ops->dis_stripping(ctrl_vsi)) 88 return -ENODEV; 89 90 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); 91 92 netif_addr_lock_bh(uplink_netdev); 93 __dev_uc_unsync(uplink_netdev, NULL); 94 __dev_mc_unsync(uplink_netdev, NULL); 95 netif_addr_unlock_bh(uplink_netdev); 96 97 if (ice_vsi_add_vlan_zero(uplink_vsi)) 98 goto err_def_rx; 99 100 if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) { 101 if (ice_set_dflt_vsi(uplink_vsi)) 102 goto err_def_rx; 103 rule_added = true; 104 } 105 106 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) 107 goto err_override_uplink; 108 109 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) 110 goto err_override_control; 111 112 return 0; 113 114 err_override_control: 115 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 116 err_override_uplink: 117 if (rule_added) 118 ice_clear_dflt_vsi(uplink_vsi); 119 err_def_rx: 120 ice_fltr_add_mac_and_broadcast(uplink_vsi, 121 uplink_vsi->port_info->mac.perm_addr, 122 ICE_FWD_TO_VSI); 123 return -ENODEV; 124 } 125 126 /** 127 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI 128 * @pf: pointer to PF struct 129 * 130 * In switchdev number of allocated Tx/Rx rings is equal. 131 * 132 * This function fills q_vectors structures associated with representor and 133 * move each ring pairs to port representor netdevs. Each port representor 134 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to 135 * number of VFs. 136 */ 137 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) 138 { 139 struct ice_vsi *vsi = pf->switchdev.control_vsi; 140 int q_id; 141 142 ice_for_each_txq(vsi, q_id) { 143 struct ice_q_vector *q_vector; 144 struct ice_tx_ring *tx_ring; 145 struct ice_rx_ring *rx_ring; 146 struct ice_repr *repr; 147 struct ice_vf *vf; 148 149 vf = ice_get_vf_by_id(pf, q_id); 150 if (WARN_ON(!vf)) 151 continue; 152 153 repr = vf->repr; 154 q_vector = repr->q_vector; 155 tx_ring = vsi->tx_rings[q_id]; 156 rx_ring = vsi->rx_rings[q_id]; 157 158 q_vector->vsi = vsi; 159 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; 160 161 q_vector->num_ring_tx = 1; 162 q_vector->tx.tx_ring = tx_ring; 163 tx_ring->q_vector = q_vector; 164 tx_ring->next = NULL; 165 tx_ring->netdev = repr->netdev; 166 /* In switchdev mode, from OS stack perspective, there is only 167 * one queue for given netdev, so it needs to be indexed as 0. 168 */ 169 tx_ring->q_index = 0; 170 171 q_vector->num_ring_rx = 1; 172 q_vector->rx.rx_ring = rx_ring; 173 rx_ring->q_vector = q_vector; 174 rx_ring->next = NULL; 175 rx_ring->netdev = repr->netdev; 176 177 ice_put_vf(vf); 178 } 179 } 180 181 /** 182 * ice_eswitch_release_reprs - clear PR VSIs configuration 183 * @pf: poiner to PF struct 184 * @ctrl_vsi: pointer to switchdev control VSI 185 */ 186 static void 187 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) 188 { 189 struct ice_vf *vf; 190 unsigned int bkt; 191 192 lockdep_assert_held(&pf->vfs.table_lock); 193 194 ice_for_each_vf(pf, bkt, vf) { 195 struct ice_vsi *vsi = vf->repr->src_vsi; 196 197 /* Skip VFs that aren't configured */ 198 if (!vf->repr->dst) 199 continue; 200 201 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 202 metadata_dst_free(vf->repr->dst); 203 vf->repr->dst = NULL; 204 ice_eswitch_del_vf_sp_rule(vf); 205 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, 206 ICE_FWD_TO_VSI); 207 208 netif_napi_del(&vf->repr->q_vector->napi); 209 } 210 } 211 212 /** 213 * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode 214 * @pf: pointer to PF struct 215 */ 216 static int ice_eswitch_setup_reprs(struct ice_pf *pf) 217 { 218 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 219 int max_vsi_num = 0; 220 struct ice_vf *vf; 221 unsigned int bkt; 222 223 lockdep_assert_held(&pf->vfs.table_lock); 224 225 ice_for_each_vf(pf, bkt, vf) { 226 struct ice_vsi *vsi = vf->repr->src_vsi; 227 228 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 229 vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 230 GFP_KERNEL); 231 if (!vf->repr->dst) { 232 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, 233 ICE_FWD_TO_VSI); 234 goto err; 235 } 236 237 if (ice_eswitch_add_vf_sp_rule(pf, vf)) { 238 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, 239 ICE_FWD_TO_VSI); 240 goto err; 241 } 242 243 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { 244 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, 245 ICE_FWD_TO_VSI); 246 ice_eswitch_del_vf_sp_rule(vf); 247 metadata_dst_free(vf->repr->dst); 248 vf->repr->dst = NULL; 249 goto err; 250 } 251 252 if (ice_vsi_add_vlan_zero(vsi)) { 253 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, 254 ICE_FWD_TO_VSI); 255 ice_eswitch_del_vf_sp_rule(vf); 256 metadata_dst_free(vf->repr->dst); 257 vf->repr->dst = NULL; 258 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 259 goto err; 260 } 261 262 if (max_vsi_num < vsi->vsi_num) 263 max_vsi_num = vsi->vsi_num; 264 265 netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, 266 ice_napi_poll); 267 268 netif_keep_dst(vf->repr->netdev); 269 } 270 271 ice_for_each_vf(pf, bkt, vf) { 272 struct ice_repr *repr = vf->repr; 273 struct ice_vsi *vsi = repr->src_vsi; 274 struct metadata_dst *dst; 275 276 dst = repr->dst; 277 dst->u.port_info.port_id = vsi->vsi_num; 278 dst->u.port_info.lower_dev = repr->netdev; 279 ice_repr_set_traffic_vsi(repr, ctrl_vsi); 280 } 281 282 return 0; 283 284 err: 285 ice_eswitch_release_reprs(pf, ctrl_vsi); 286 287 return -ENODEV; 288 } 289 290 /** 291 * ice_eswitch_update_repr - reconfigure VF port representor 292 * @vsi: VF VSI for which port representor is configured 293 */ 294 void ice_eswitch_update_repr(struct ice_vsi *vsi) 295 { 296 struct ice_pf *pf = vsi->back; 297 struct ice_repr *repr; 298 struct ice_vf *vf; 299 int ret; 300 301 if (!ice_is_switchdev_running(pf)) 302 return; 303 304 vf = vsi->vf; 305 repr = vf->repr; 306 repr->src_vsi = vsi; 307 repr->dst->u.port_info.port_id = vsi->vsi_num; 308 309 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); 310 if (ret) { 311 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); 312 dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", 313 vsi->vf->vf_id); 314 } 315 } 316 317 /** 318 * ice_eswitch_port_start_xmit - callback for packets transmit 319 * @skb: send buffer 320 * @netdev: network interface device structure 321 * 322 * Returns NETDEV_TX_OK if sent, else an error code 323 */ 324 netdev_tx_t 325 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) 326 { 327 struct ice_netdev_priv *np; 328 struct ice_repr *repr; 329 struct ice_vsi *vsi; 330 331 np = netdev_priv(netdev); 332 vsi = np->vsi; 333 334 if (ice_is_reset_in_progress(vsi->back->state) || 335 test_bit(ICE_VF_DIS, vsi->back->state)) 336 return NETDEV_TX_BUSY; 337 338 repr = ice_netdev_to_repr(netdev); 339 skb_dst_drop(skb); 340 dst_hold((struct dst_entry *)repr->dst); 341 skb_dst_set(skb, (struct dst_entry *)repr->dst); 342 skb->queue_mapping = repr->vf->vf_id; 343 344 return ice_start_xmit(skb, netdev); 345 } 346 347 /** 348 * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor 349 * @skb: pointer to send buffer 350 * @off: pointer to offload struct 351 */ 352 void 353 ice_eswitch_set_target_vsi(struct sk_buff *skb, 354 struct ice_tx_offload_params *off) 355 { 356 struct metadata_dst *dst = skb_metadata_dst(skb); 357 u64 cd_cmd, dst_vsi; 358 359 if (!dst) { 360 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; 361 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); 362 } else { 363 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; 364 dst_vsi = ((u64)dst->u.port_info.port_id << 365 ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; 366 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; 367 } 368 } 369 370 /** 371 * ice_eswitch_release_env - clear switchdev HW filters 372 * @pf: pointer to PF struct 373 * 374 * This function removes HW filters configuration specific for switchdev 375 * mode and restores default legacy mode settings. 376 */ 377 static void ice_eswitch_release_env(struct ice_pf *pf) 378 { 379 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 380 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 381 382 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); 383 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 384 ice_clear_dflt_vsi(uplink_vsi); 385 ice_fltr_add_mac_and_broadcast(uplink_vsi, 386 uplink_vsi->port_info->mac.perm_addr, 387 ICE_FWD_TO_VSI); 388 } 389 390 /** 391 * ice_eswitch_vsi_setup - configure switchdev control VSI 392 * @pf: pointer to PF structure 393 * @pi: pointer to port_info structure 394 */ 395 static struct ice_vsi * 396 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 397 { 398 struct ice_vsi_cfg_params params = {}; 399 400 params.type = ICE_VSI_SWITCHDEV_CTRL; 401 params.pi = pi; 402 params.flags = ICE_VSI_FLAG_INIT; 403 404 return ice_vsi_setup(pf, ¶ms); 405 } 406 407 /** 408 * ice_eswitch_napi_del - remove NAPI handle for all port representors 409 * @pf: pointer to PF structure 410 */ 411 static void ice_eswitch_napi_del(struct ice_pf *pf) 412 { 413 struct ice_vf *vf; 414 unsigned int bkt; 415 416 lockdep_assert_held(&pf->vfs.table_lock); 417 418 ice_for_each_vf(pf, bkt, vf) 419 netif_napi_del(&vf->repr->q_vector->napi); 420 } 421 422 /** 423 * ice_eswitch_napi_enable - enable NAPI for all port representors 424 * @pf: pointer to PF structure 425 */ 426 static void ice_eswitch_napi_enable(struct ice_pf *pf) 427 { 428 struct ice_vf *vf; 429 unsigned int bkt; 430 431 lockdep_assert_held(&pf->vfs.table_lock); 432 433 ice_for_each_vf(pf, bkt, vf) 434 napi_enable(&vf->repr->q_vector->napi); 435 } 436 437 /** 438 * ice_eswitch_napi_disable - disable NAPI for all port representors 439 * @pf: pointer to PF structure 440 */ 441 static void ice_eswitch_napi_disable(struct ice_pf *pf) 442 { 443 struct ice_vf *vf; 444 unsigned int bkt; 445 446 lockdep_assert_held(&pf->vfs.table_lock); 447 448 ice_for_each_vf(pf, bkt, vf) 449 napi_disable(&vf->repr->q_vector->napi); 450 } 451 452 /** 453 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode 454 * @pf: pointer to PF structure 455 */ 456 static int ice_eswitch_enable_switchdev(struct ice_pf *pf) 457 { 458 struct ice_vsi *ctrl_vsi; 459 460 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); 461 if (!pf->switchdev.control_vsi) 462 return -ENODEV; 463 464 ctrl_vsi = pf->switchdev.control_vsi; 465 pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); 466 if (!pf->switchdev.uplink_vsi) 467 goto err_vsi; 468 469 if (ice_eswitch_setup_env(pf)) 470 goto err_vsi; 471 472 if (ice_repr_add_for_all_vfs(pf)) 473 goto err_repr_add; 474 475 if (ice_eswitch_setup_reprs(pf)) 476 goto err_setup_reprs; 477 478 ice_eswitch_remap_rings_to_vectors(pf); 479 480 if (ice_vsi_open(ctrl_vsi)) 481 goto err_setup_reprs; 482 483 ice_eswitch_napi_enable(pf); 484 485 return 0; 486 487 err_setup_reprs: 488 ice_repr_rem_from_all_vfs(pf); 489 err_repr_add: 490 ice_eswitch_release_env(pf); 491 err_vsi: 492 ice_vsi_release(ctrl_vsi); 493 return -ENODEV; 494 } 495 496 /** 497 * ice_eswitch_disable_switchdev - disable switchdev resources 498 * @pf: pointer to PF structure 499 */ 500 static void ice_eswitch_disable_switchdev(struct ice_pf *pf) 501 { 502 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 503 504 ice_eswitch_napi_disable(pf); 505 ice_eswitch_release_env(pf); 506 ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); 507 ice_eswitch_release_reprs(pf, ctrl_vsi); 508 ice_vsi_release(ctrl_vsi); 509 ice_repr_rem_from_all_vfs(pf); 510 } 511 512 /** 513 * ice_eswitch_mode_set - set new eswitch mode 514 * @devlink: pointer to devlink structure 515 * @mode: eswitch mode to switch to 516 * @extack: pointer to extack structure 517 */ 518 int 519 ice_eswitch_mode_set(struct devlink *devlink, u16 mode, 520 struct netlink_ext_ack *extack) 521 { 522 struct ice_pf *pf = devlink_priv(devlink); 523 524 if (pf->eswitch_mode == mode) 525 return 0; 526 527 if (ice_has_vfs(pf)) { 528 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); 529 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); 530 return -EOPNOTSUPP; 531 } 532 533 switch (mode) { 534 case DEVLINK_ESWITCH_MODE_LEGACY: 535 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", 536 pf->hw.pf_id); 537 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); 538 break; 539 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 540 { 541 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", 542 pf->hw.pf_id); 543 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); 544 break; 545 } 546 default: 547 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); 548 return -EINVAL; 549 } 550 551 pf->eswitch_mode = mode; 552 return 0; 553 } 554 555 /** 556 * ice_eswitch_mode_get - get current eswitch mode 557 * @devlink: pointer to devlink structure 558 * @mode: output parameter for current eswitch mode 559 */ 560 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) 561 { 562 struct ice_pf *pf = devlink_priv(devlink); 563 564 *mode = pf->eswitch_mode; 565 return 0; 566 } 567 568 /** 569 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev 570 * @pf: pointer to PF structure 571 * 572 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV, 573 * false otherwise. 574 */ 575 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) 576 { 577 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; 578 } 579 580 /** 581 * ice_eswitch_release - cleanup eswitch 582 * @pf: pointer to PF structure 583 */ 584 void ice_eswitch_release(struct ice_pf *pf) 585 { 586 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) 587 return; 588 589 ice_eswitch_disable_switchdev(pf); 590 pf->switchdev.is_running = false; 591 } 592 593 /** 594 * ice_eswitch_configure - configure eswitch 595 * @pf: pointer to PF structure 596 */ 597 int ice_eswitch_configure(struct ice_pf *pf) 598 { 599 int status; 600 601 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) 602 return 0; 603 604 status = ice_eswitch_enable_switchdev(pf); 605 if (status) 606 return status; 607 608 pf->switchdev.is_running = true; 609 return 0; 610 } 611 612 /** 613 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors 614 * @pf: pointer to PF structure 615 */ 616 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) 617 { 618 struct ice_vf *vf; 619 unsigned int bkt; 620 621 lockdep_assert_held(&pf->vfs.table_lock); 622 623 if (test_bit(ICE_DOWN, pf->state)) 624 return; 625 626 ice_for_each_vf(pf, bkt, vf) { 627 if (vf->repr) 628 ice_repr_start_tx_queues(vf->repr); 629 } 630 } 631 632 /** 633 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors 634 * @pf: pointer to PF structure 635 */ 636 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) 637 { 638 struct ice_vf *vf; 639 unsigned int bkt; 640 641 lockdep_assert_held(&pf->vfs.table_lock); 642 643 if (test_bit(ICE_DOWN, pf->state)) 644 return; 645 646 ice_for_each_vf(pf, bkt, vf) { 647 if (vf->repr) 648 ice_repr_stop_tx_queues(vf->repr); 649 } 650 } 651 652 /** 653 * ice_eswitch_rebuild - rebuild eswitch 654 * @pf: pointer to PF structure 655 */ 656 int ice_eswitch_rebuild(struct ice_pf *pf) 657 { 658 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 659 int status; 660 661 ice_eswitch_napi_disable(pf); 662 ice_eswitch_napi_del(pf); 663 664 status = ice_eswitch_setup_env(pf); 665 if (status) 666 return status; 667 668 status = ice_eswitch_setup_reprs(pf); 669 if (status) 670 return status; 671 672 ice_eswitch_remap_rings_to_vectors(pf); 673 674 ice_replay_tc_fltrs(pf); 675 676 status = ice_vsi_open(ctrl_vsi); 677 if (status) 678 return status; 679 680 ice_eswitch_napi_enable(pf); 681 ice_eswitch_start_all_tx_queues(pf); 682 683 return 0; 684 } 685