Lines Matching refs:vsi
77 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
114 struct ice_vsi *vsi = NULL; in ice_check_for_hang_subtask() local
121 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
122 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
126 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) in ice_check_for_hang_subtask()
129 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask()
132 hw = &vsi->back->hw; in ice_check_for_hang_subtask()
134 ice_for_each_txq(vsi, i) { in ice_check_for_hang_subtask()
135 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
182 struct ice_vsi *vsi; in ice_init_mac_fltr() local
185 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
186 if (!vsi) in ice_init_mac_fltr()
189 perm_addr = vsi->port_info->mac.perm_addr; in ice_init_mac_fltr()
190 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); in ice_init_mac_fltr()
206 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_sync_list() local
208 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, in ice_add_mac_to_sync_list()
228 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_unsync_list() local
238 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, in ice_add_mac_to_unsync_list()
251 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) in ice_vsi_fltr_changed() argument
253 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || in ice_vsi_fltr_changed()
254 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_fltr_changed()
263 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) in ice_set_promisc() argument
267 if (vsi->type != ICE_VSI_PF) in ice_set_promisc()
270 if (ice_vsi_has_non_zero_vlans(vsi)) { in ice_set_promisc()
272 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_set_promisc()
275 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_set_promisc()
281 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", in ice_set_promisc()
282 vsi->vsi_num, promisc_m); in ice_set_promisc()
292 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) in ice_clear_promisc() argument
296 if (vsi->type != ICE_VSI_PF) in ice_clear_promisc()
299 if (ice_vsi_has_non_zero_vlans(vsi)) { in ice_clear_promisc()
301 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_clear_promisc()
304 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_clear_promisc()
308 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", in ice_clear_promisc()
309 vsi->vsi_num, promisc_m); in ice_clear_promisc()
319 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) in ice_vsi_sync_fltr() argument
321 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vsi_sync_fltr()
322 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_sync_fltr()
323 struct net_device *netdev = vsi->netdev; in ice_vsi_sync_fltr()
325 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr()
330 if (!vsi->netdev) in ice_vsi_sync_fltr()
333 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vsi_sync_fltr()
336 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in ice_vsi_sync_fltr()
337 vsi->current_netdev_flags = vsi->netdev->flags; in ice_vsi_sync_fltr()
339 INIT_LIST_HEAD(&vsi->tmp_sync_list); in ice_vsi_sync_fltr()
340 INIT_LIST_HEAD(&vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
342 if (ice_vsi_fltr_changed(vsi)) { in ice_vsi_sync_fltr()
343 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
344 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
357 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
358 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
367 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
368 ice_fltr_free_list(dev, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
381 vsi->state)) { in ice_vsi_sync_fltr()
384 vsi->vsi_num); in ice_vsi_sync_fltr()
392 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vsi_sync_fltr()
393 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); in ice_vsi_sync_fltr()
395 vsi->current_netdev_flags &= ~IFF_ALLMULTI; in ice_vsi_sync_fltr()
400 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); in ice_vsi_sync_fltr()
402 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vsi_sync_fltr()
409 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { in ice_vsi_sync_fltr()
410 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
411 if (vsi->current_netdev_flags & IFF_PROMISC) { in ice_vsi_sync_fltr()
413 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { in ice_vsi_sync_fltr()
414 err = ice_set_dflt_vsi(vsi); in ice_vsi_sync_fltr()
417 err, vsi->vsi_num); in ice_vsi_sync_fltr()
418 vsi->current_netdev_flags &= in ice_vsi_sync_fltr()
423 vlan_ops->dis_rx_filtering(vsi); in ice_vsi_sync_fltr()
430 err = ice_set_promisc(vsi, in ice_vsi_sync_fltr()
437 if (ice_is_vsi_dflt_vsi(vsi)) { in ice_vsi_sync_fltr()
438 err = ice_clear_dflt_vsi(vsi); in ice_vsi_sync_fltr()
441 err, vsi->vsi_num); in ice_vsi_sync_fltr()
442 vsi->current_netdev_flags |= in ice_vsi_sync_fltr()
446 if (vsi->netdev->features & in ice_vsi_sync_fltr()
448 vlan_ops->ena_rx_filtering(vsi); in ice_vsi_sync_fltr()
454 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { in ice_vsi_sync_fltr()
455 err = ice_clear_promisc(vsi, in ice_vsi_sync_fltr()
459 err, vsi->vsi_num); in ice_vsi_sync_fltr()
467 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
471 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
472 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
474 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vsi_sync_fltr()
492 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
493 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
511 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
512 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
532 struct ice_vsi *vsi; in ice_prepare_for_reset() local
563 vsi = ice_get_main_vsi(pf); in ice_prepare_for_reset()
564 if (!vsi) in ice_prepare_for_reset()
570 vsi->orig_rss_size = 0; in ice_prepare_for_reset()
574 vsi->old_ena_tc = vsi->all_enatc; in ice_prepare_for_reset()
575 vsi->old_numtc = vsi->all_numtc; in ice_prepare_for_reset()
577 ice_remove_q_channels(vsi, true); in ice_prepare_for_reset()
582 vsi->old_ena_tc = 0; in ice_prepare_for_reset()
583 vsi->all_enatc = 0; in ice_prepare_for_reset()
584 vsi->old_numtc = 0; in ice_prepare_for_reset()
585 vsi->all_numtc = 0; in ice_prepare_for_reset()
586 vsi->req_txq = 0; in ice_prepare_for_reset()
587 vsi->req_rxq = 0; in ice_prepare_for_reset()
589 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); in ice_prepare_for_reset()
593 if (vsi->netdev) in ice_prepare_for_reset()
594 netif_device_detach(vsi->netdev); in ice_prepare_for_reset()
743 static void ice_print_topo_conflict(struct ice_vsi *vsi) in ice_print_topo_conflict() argument
745 switch (vsi->port_info->phy.link_info.topo_media_conflict) { in ice_print_topo_conflict()
751 …netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not … in ice_print_topo_conflict()
754 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) in ice_print_topo_conflict()
755 …netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet … in ice_print_topo_conflict()
757 …netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was d… in ice_print_topo_conflict()
769 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) in ice_print_link_msg() argument
780 if (!vsi) in ice_print_link_msg()
783 if (vsi->current_isup == isup) in ice_print_link_msg()
786 vsi->current_isup = isup; in ice_print_link_msg()
789 netdev_info(vsi->netdev, "NIC Link is Down\n"); in ice_print_link_msg()
793 switch (vsi->port_info->phy.link_info.link_speed) { in ice_print_link_msg()
829 switch (vsi->port_info->fc.current_mode) { in ice_print_link_msg()
848 switch (vsi->port_info->phy.link_info.fec_info) { in ice_print_link_msg()
862 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) in ice_print_link_msg()
875 status = ice_aq_get_phy_caps(vsi->port_info, false, in ice_print_link_msg()
878 netdev_info(vsi->netdev, "Get phy capability failed.\n"); in ice_print_link_msg()
894 …netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s,… in ice_print_link_msg()
896 ice_print_topo_conflict(vsi); in ice_print_link_msg()
904 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) in ice_vsi_link_event() argument
906 if (!vsi) in ice_vsi_link_event()
909 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) in ice_vsi_link_event()
912 if (vsi->type == ICE_VSI_PF) { in ice_vsi_link_event()
913 if (link_up == netif_carrier_ok(vsi->netdev)) in ice_vsi_link_event()
917 netif_carrier_on(vsi->netdev); in ice_vsi_link_event()
918 netif_tx_wake_all_queues(vsi->netdev); in ice_vsi_link_event()
920 netif_carrier_off(vsi->netdev); in ice_vsi_link_event()
921 netif_tx_stop_all_queues(vsi->netdev); in ice_vsi_link_event()
1098 struct ice_vsi *vsi; in ice_link_event() local
1126 vsi = ice_get_main_vsi(pf); in ice_link_event()
1127 if (!vsi || !vsi->port_info) in ice_link_event()
1134 ice_set_link(vsi, false); in ice_link_event()
1150 ice_vsi_link_event(vsi, link_up); in ice_link_event()
1151 ice_print_link_msg(vsi, link_up); in ice_link_event()
1183 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1184 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1881 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) in ice_force_phys_link_state() argument
1889 if (!vsi || !vsi->port_info || !vsi->back) in ice_force_phys_link_state()
1891 if (vsi->type != ICE_VSI_PF) in ice_force_phys_link_state()
1894 dev = ice_pf_to_dev(vsi->back); in ice_force_phys_link_state()
1896 pi = vsi->port_info; in ice_force_phys_link_state()
1906 vsi->vsi_num, retcode); in ice_force_phys_link_state()
1932 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); in ice_force_phys_link_state()
1935 vsi->vsi_num, retcode); in ice_force_phys_link_state()
2126 static int ice_configure_phy(struct ice_vsi *vsi) in ice_configure_phy() argument
2128 struct device *dev = ice_pf_to_dev(vsi->back); in ice_configure_phy()
2129 struct ice_port_info *pi = vsi->port_info; in ice_configure_phy()
2133 struct ice_pf *pf = vsi->back; in ice_configure_phy()
2140 ice_print_topo_conflict(vsi); in ice_configure_phy()
2147 return ice_force_phys_link_state(vsi, true); in ice_configure_phy()
2158 vsi->vsi_num, err); in ice_configure_phy()
2179 vsi->vsi_num, err); in ice_configure_phy()
2195 vsi->back->state)) { in ice_configure_phy()
2235 vsi->vsi_num, err); in ice_configure_phy()
2253 struct ice_vsi *vsi; in ice_check_media_subtask() local
2260 vsi = ice_get_main_vsi(pf); in ice_check_media_subtask()
2261 if (!vsi) in ice_check_media_subtask()
2265 pi = vsi->port_info; in ice_check_media_subtask()
2279 if (test_bit(ICE_VSI_DOWN, vsi->state) && in ice_check_media_subtask()
2280 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) in ice_check_media_subtask()
2283 err = ice_configure_phy(vsi); in ice_check_media_subtask()
2474 static int ice_vsi_ena_irq(struct ice_vsi *vsi) in ice_vsi_ena_irq() argument
2476 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_ena_irq()
2479 ice_for_each_q_vector(vsi, i) in ice_vsi_ena_irq()
2480 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); in ice_vsi_ena_irq()
2491 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) in ice_vsi_req_irq_msix() argument
2493 int q_vectors = vsi->num_q_vectors; in ice_vsi_req_irq_msix()
2494 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix()
2503 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix()
2521 if (vsi->type == ICE_VSI_CTRL && vsi->vf) in ice_vsi_req_irq_msix()
2522 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2526 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2529 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", in ice_vsi_req_irq_msix()
2548 err = ice_set_cpu_rx_rmap(vsi); in ice_vsi_req_irq_msix()
2550 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", in ice_vsi_req_irq_msix()
2551 vsi->vsi_num, ERR_PTR(err)); in ice_vsi_req_irq_msix()
2555 vsi->irqs_ready = true; in ice_vsi_req_irq_msix()
2560 irq_num = vsi->q_vectors[vector]->irq.virq; in ice_vsi_req_irq_msix()
2564 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); in ice_vsi_req_irq_msix()
2575 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) in ice_xdp_alloc_setup_rings() argument
2577 struct device *dev = ice_pf_to_dev(vsi->back); in ice_xdp_alloc_setup_rings()
2581 ice_for_each_xdp_txq(vsi, i) { in ice_xdp_alloc_setup_rings()
2582 u16 xdp_q_idx = vsi->alloc_txq + i; in ice_xdp_alloc_setup_rings()
2598 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; in ice_xdp_alloc_setup_rings()
2599 xdp_ring->vsi = vsi; in ice_xdp_alloc_setup_rings()
2602 xdp_ring->count = vsi->num_tx_desc; in ice_xdp_alloc_setup_rings()
2603 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2618 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { in ice_xdp_alloc_setup_rings()
2619 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_xdp_alloc_setup_rings()
2620 vsi->xdp_rings[i]->ring_stats = NULL; in ice_xdp_alloc_setup_rings()
2621 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2632 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) in ice_vsi_assign_bpf_prog() argument
2637 old_prog = xchg(&vsi->xdp_prog, prog); in ice_vsi_assign_bpf_prog()
2638 ice_for_each_rxq(vsi, i) in ice_vsi_assign_bpf_prog()
2639 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in ice_vsi_assign_bpf_prog()
2653 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, in ice_prepare_xdp_rings() argument
2657 int xdp_rings_rem = vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2658 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings()
2663 .q_count = vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2665 .vsi_map = vsi->txq_map, in ice_prepare_xdp_rings()
2666 .vsi_map_offset = vsi->alloc_txq, in ice_prepare_xdp_rings()
2674 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2675 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2676 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2679 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; in ice_prepare_xdp_rings()
2684 netdev_warn(vsi->netdev, in ice_prepare_xdp_rings()
2687 if (ice_xdp_alloc_setup_rings(vsi)) in ice_prepare_xdp_rings()
2691 ice_for_each_q_vector(vsi, v_idx) { in ice_prepare_xdp_rings()
2692 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_prepare_xdp_rings()
2696 vsi->num_q_vectors - v_idx); in ice_prepare_xdp_rings()
2697 q_base = vsi->num_xdp_txq - xdp_rings_rem; in ice_prepare_xdp_rings()
2700 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; in ice_prepare_xdp_rings()
2709 ice_for_each_rxq(vsi, i) { in ice_prepare_xdp_rings()
2711 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; in ice_prepare_xdp_rings()
2713 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; in ice_prepare_xdp_rings()
2718 vsi->rx_rings[i]->xdp_ring = ring; in ice_prepare_xdp_rings()
2723 ice_tx_xsk_pool(vsi, i); in ice_prepare_xdp_rings()
2736 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_prepare_xdp_rings()
2737 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2739 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_prepare_xdp_rings()
2756 if (!ice_is_xdp_ena_vsi(vsi)) in ice_prepare_xdp_rings()
2757 ice_vsi_assign_bpf_prog(vsi, prog); in ice_prepare_xdp_rings()
2761 ice_for_each_xdp_txq(vsi, i) in ice_prepare_xdp_rings()
2762 if (vsi->xdp_rings[i]) { in ice_prepare_xdp_rings()
2763 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_prepare_xdp_rings()
2764 vsi->xdp_rings[i] = NULL; in ice_prepare_xdp_rings()
2769 ice_for_each_xdp_txq(vsi, i) { in ice_prepare_xdp_rings()
2770 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2771 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_prepare_xdp_rings()
2775 devm_kfree(dev, vsi->xdp_rings); in ice_prepare_xdp_rings()
2787 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) in ice_destroy_xdp_rings() argument
2790 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings()
2799 ice_for_each_q_vector(vsi, v_idx) { in ice_destroy_xdp_rings()
2800 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_destroy_xdp_rings()
2813 ice_for_each_xdp_txq(vsi, i) { in ice_destroy_xdp_rings()
2814 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2815 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_destroy_xdp_rings()
2819 ice_for_each_xdp_txq(vsi, i) in ice_destroy_xdp_rings()
2820 if (vsi->xdp_rings[i]) { in ice_destroy_xdp_rings()
2821 if (vsi->xdp_rings[i]->desc) { in ice_destroy_xdp_rings()
2823 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_destroy_xdp_rings()
2825 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_destroy_xdp_rings()
2826 vsi->xdp_rings[i]->ring_stats = NULL; in ice_destroy_xdp_rings()
2827 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_destroy_xdp_rings()
2828 vsi->xdp_rings[i] = NULL; in ice_destroy_xdp_rings()
2831 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
2832 vsi->xdp_rings = NULL; in ice_destroy_xdp_rings()
2840 ice_vsi_assign_bpf_prog(vsi, NULL); in ice_destroy_xdp_rings()
2845 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_destroy_xdp_rings()
2846 max_txqs[i] = vsi->num_txq; in ice_destroy_xdp_rings()
2849 vsi->num_xdp_txq = 0; in ice_destroy_xdp_rings()
2851 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_destroy_xdp_rings()
2859 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) in ice_vsi_rx_napi_schedule() argument
2863 ice_for_each_rxq(vsi, i) { in ice_vsi_rx_napi_schedule()
2864 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; in ice_vsi_rx_napi_schedule()
2878 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) in ice_vsi_determine_xdp_res() argument
2880 u16 avail = ice_get_avail_txq_count(vsi->back); in ice_vsi_determine_xdp_res()
2886 vsi->num_xdp_txq = min_t(u16, avail, cpus); in ice_vsi_determine_xdp_res()
2888 if (vsi->num_xdp_txq < cpus) in ice_vsi_determine_xdp_res()
2898 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) in ice_max_xdp_frame_size() argument
2900 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) in ice_max_xdp_frame_size()
2913 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, in ice_xdp_setup_prog() argument
2916 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; in ice_xdp_setup_prog()
2921 if (frame_size > ice_max_xdp_frame_size(vsi)) { in ice_xdp_setup_prog()
2929 if (ice_is_xdp_ena_vsi(vsi) == !!prog || in ice_xdp_setup_prog()
2930 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { in ice_xdp_setup_prog()
2931 ice_vsi_assign_bpf_prog(vsi, prog); in ice_xdp_setup_prog()
2935 if_running = netif_running(vsi->netdev) && in ice_xdp_setup_prog()
2936 !test_and_set_bit(ICE_VSI_DOWN, vsi->state); in ice_xdp_setup_prog()
2940 ret = ice_down(vsi); in ice_xdp_setup_prog()
2947 if (!ice_is_xdp_ena_vsi(vsi) && prog) { in ice_xdp_setup_prog()
2948 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); in ice_xdp_setup_prog()
2952 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, in ice_xdp_setup_prog()
2957 xdp_features_set_redirect_target(vsi->netdev, true); in ice_xdp_setup_prog()
2959 xdp_ring_err = ice_realloc_zc_buf(vsi, true); in ice_xdp_setup_prog()
2962 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { in ice_xdp_setup_prog()
2963 xdp_features_clear_redirect_target(vsi->netdev); in ice_xdp_setup_prog()
2964 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); in ice_xdp_setup_prog()
2968 xdp_ring_err = ice_realloc_zc_buf(vsi, false); in ice_xdp_setup_prog()
2974 ret = ice_up(vsi); in ice_xdp_setup_prog()
2977 ice_vsi_rx_napi_schedule(vsi); in ice_xdp_setup_prog()
3004 struct ice_vsi *vsi = np->vsi; in ice_xdp() local
3007 if (vsi->type != ICE_VSI_PF) { in ice_xdp()
3012 mutex_lock(&vsi->xdp_state_lock); in ice_xdp()
3016 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); in ice_xdp()
3019 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); in ice_xdp()
3025 mutex_unlock(&vsi->xdp_state_lock); in ice_xdp()
3373 static void ice_napi_add(struct ice_vsi *vsi) in ice_napi_add() argument
3377 if (!vsi->netdev) in ice_napi_add()
3380 ice_for_each_q_vector(vsi, v_idx) in ice_napi_add()
3381 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, in ice_napi_add()
3389 static void ice_set_ops(struct ice_vsi *vsi) in ice_set_ops() argument
3391 struct net_device *netdev = vsi->netdev; in ice_set_ops()
3404 if (vsi->type != ICE_VSI_PF) in ice_set_ops()
3604 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_add_vid() local
3612 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_add_vid()
3618 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_add_vid()
3619 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3626 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vlan_rx_add_vid()
3632 ret = vlan_ops->add_vlan(vsi, &vlan); in ice_vlan_rx_add_vid()
3640 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && in ice_vlan_rx_add_vid()
3641 ice_vsi_num_non_zero_vlans(vsi) == 1) { in ice_vlan_rx_add_vid()
3642 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3644 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3649 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_add_vid()
3667 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_kill_vid() local
3675 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_kill_vid()
3678 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3682 vsi->vsi_num); in ice_vlan_rx_kill_vid()
3683 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vlan_rx_kill_vid()
3686 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vlan_rx_kill_vid()
3692 ret = vlan_ops->del_vlan(vsi, &vlan); in ice_vlan_rx_kill_vid()
3699 if (vsi->current_netdev_flags & IFF_ALLMULTI) in ice_vlan_rx_kill_vid()
3700 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3703 if (!ice_vsi_has_non_zero_vlans(vsi)) { in ice_vlan_rx_kill_vid()
3708 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_kill_vid()
3709 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3712 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3718 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_kill_vid()
3739 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) in ice_tc_indir_block_unregister() argument
3741 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); in ice_tc_indir_block_unregister()
3753 static int ice_tc_indir_block_register(struct ice_vsi *vsi) in ice_tc_indir_block_register() argument
3757 if (!vsi || !vsi->netdev) in ice_tc_indir_block_register()
3760 np = netdev_priv(vsi->netdev); in ice_tc_indir_block_register()
3958 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) in ice_vsi_recfg_qs() argument
3960 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs()
3974 vsi->req_txq = (u16)new_tx; in ice_vsi_recfg_qs()
3976 vsi->req_rxq = (u16)new_rx; in ice_vsi_recfg_qs()
3979 if (!netif_running(vsi->netdev)) { in ice_vsi_recfg_qs()
3980 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); in ice_vsi_recfg_qs()
3987 ice_vsi_close(vsi); in ice_vsi_recfg_qs()
3988 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); in ice_vsi_recfg_qs()
3993 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_recfg_qs()
3994 netdev_set_tc_queue(vsi->netdev, in ice_vsi_recfg_qs()
3995 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_recfg_qs()
3996 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_recfg_qs()
3997 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_recfg_qs()
4000 ice_vsi_open(vsi); in ice_vsi_recfg_qs()
4020 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_set_safe_mode_vlan_cfg() local
4025 if (!vsi) in ice_set_safe_mode_vlan_cfg()
4033 ctxt->info = vsi->info; in ice_set_safe_mode_vlan_cfg()
4051 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_set_safe_mode_vlan_cfg()
4053 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", in ice_set_safe_mode_vlan_cfg()
4056 vsi->info.sec_flags = ctxt->info.sec_flags; in ice_set_safe_mode_vlan_cfg()
4057 vsi->info.sw_flags2 = ctxt->info.sw_flags2; in ice_set_safe_mode_vlan_cfg()
4058 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_set_safe_mode_vlan_cfg()
4260 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4268 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); in ice_deinit_fdir() local
4270 if (!vsi) in ice_deinit_fdir()
4273 ice_vsi_manage_fdir(vsi, false); in ice_deinit_fdir()
4274 ice_vsi_release(vsi); in ice_deinit_fdir()
4276 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4385 static int ice_register_netdev(struct ice_vsi *vsi) in ice_register_netdev() argument
4389 if (!vsi || !vsi->netdev) in ice_register_netdev()
4392 err = register_netdev(vsi->netdev); in ice_register_netdev()
4396 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_register_netdev()
4397 netif_carrier_off(vsi->netdev); in ice_register_netdev()
4398 netif_tx_stop_all_queues(vsi->netdev); in ice_register_netdev()
4403 static void ice_unregister_netdev(struct ice_vsi *vsi) in ice_unregister_netdev() argument
4405 if (!vsi || !vsi->netdev) in ice_unregister_netdev()
4408 unregister_netdev(vsi->netdev); in ice_unregister_netdev()
4409 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_unregister_netdev()
4418 static int ice_cfg_netdev(struct ice_vsi *vsi) in ice_cfg_netdev() argument
4424 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, in ice_cfg_netdev()
4425 vsi->alloc_rxq); in ice_cfg_netdev()
4429 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_cfg_netdev()
4430 vsi->netdev = netdev; in ice_cfg_netdev()
4432 np->vsi = vsi; in ice_cfg_netdev()
4435 ice_set_ops(vsi); in ice_cfg_netdev()
4437 if (vsi->type == ICE_VSI_PF) { in ice_cfg_netdev()
4438 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); in ice_cfg_netdev()
4439 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_cfg_netdev()
4446 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_cfg_netdev()
4453 static void ice_decfg_netdev(struct ice_vsi *vsi) in ice_decfg_netdev() argument
4455 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_decfg_netdev()
4456 free_netdev(vsi->netdev); in ice_decfg_netdev()
4457 vsi->netdev = NULL; in ice_decfg_netdev()
4460 static int ice_start_eth(struct ice_vsi *vsi) in ice_start_eth() argument
4464 err = ice_init_mac_fltr(vsi->back); in ice_start_eth()
4468 err = ice_vsi_open(vsi); in ice_start_eth()
4470 ice_fltr_remove_all(vsi); in ice_start_eth()
4475 static void ice_stop_eth(struct ice_vsi *vsi) in ice_stop_eth() argument
4477 ice_fltr_remove_all(vsi); in ice_stop_eth()
4478 ice_vsi_close(vsi); in ice_stop_eth()
4483 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_init_eth() local
4486 if (!vsi) in ice_init_eth()
4490 INIT_LIST_HEAD(&vsi->ch_list); in ice_init_eth()
4492 err = ice_cfg_netdev(vsi); in ice_init_eth()
4496 ice_dcbnl_setup(vsi); in ice_init_eth()
4506 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_init_eth()
4508 err = ice_register_netdev(vsi); in ice_init_eth()
4512 err = ice_tc_indir_block_register(vsi); in ice_init_eth()
4516 ice_napi_add(vsi); in ice_init_eth()
4521 ice_unregister_netdev(vsi); in ice_init_eth()
4526 ice_decfg_netdev(vsi); in ice_init_eth()
4532 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_deinit_eth() local
4534 if (!vsi) in ice_deinit_eth()
4537 ice_vsi_close(vsi); in ice_deinit_eth()
4538 ice_unregister_netdev(vsi); in ice_deinit_eth()
4540 ice_tc_indir_block_unregister(vsi); in ice_deinit_eth()
4541 ice_decfg_netdev(vsi); in ice_deinit_eth()
4769 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_init_link() local
4771 if (vsi) in ice_init_link()
4772 ice_configure_phy(vsi); in ice_init_link()
4784 struct ice_vsi *vsi; in ice_init_pf_sw() local
4806 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4807 if (!vsi) { in ice_init_pf_sw()
4822 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_deinit_pf_sw() local
4824 if (!vsi) in ice_deinit_pf_sw()
4827 ice_vsi_release(vsi); in ice_deinit_pf_sw()
4846 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
4848 if (!pf->vsi) in ice_alloc_vsis()
4854 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
4867 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
4868 pf->vsi = NULL; in ice_dealloc_vsis()
4961 struct ice_vsi *vsi; in ice_load() local
4968 vsi = ice_get_main_vsi(pf); in ice_load()
4970 params = ice_vsi_to_params(vsi); in ice_load()
4974 err = ice_vsi_cfg(vsi, ¶ms); in ice_load()
5176 struct ice_vsi *vsi; in ice_setup_mc_magic_wake() local
5183 vsi = ice_get_main_vsi(pf); in ice_setup_mc_magic_wake()
5184 if (!vsi) in ice_setup_mc_magic_wake()
5188 if (vsi->netdev) in ice_setup_mc_magic_wake()
5189 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); in ice_setup_mc_magic_wake()
5191 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_setup_mc_magic_wake()
5281 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5282 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5314 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5317 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5320 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5334 if (pf->vsi[v]) in ice_reinit_interrupt_scheme()
5335 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5398 if (!pf->vsi[v]) in ice_suspend()
5400 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5725 struct ice_vsi *vsi = np->vsi; in ice_set_mac_address() local
5726 struct ice_pf *pf = vsi->back; in ice_set_mac_address()
5759 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); in ice_set_mac_address()
5766 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); in ice_set_mac_address()
5791 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", in ice_set_mac_address()
5811 struct ice_vsi *vsi = np->vsi; in ice_set_rx_mode() local
5813 if (!vsi || ice_is_switchdev_running(vsi->back)) in ice_set_rx_mode()
5820 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
5821 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
5822 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); in ice_set_rx_mode()
5827 ice_service_task_schedule(vsi->back); in ice_set_rx_mode()
5840 struct ice_vsi *vsi = np->vsi; in ice_set_tx_maxrate() local
5852 q_handle = vsi->tx_rings[queue_index]->q_handle; in ice_set_tx_maxrate()
5853 tc = ice_dcb_get_tc(vsi, queue_index); in ice_set_tx_maxrate()
5855 vsi = ice_locate_vsi_using_queue(vsi, queue_index); in ice_set_tx_maxrate()
5856 if (!vsi) { in ice_set_tx_maxrate()
5864 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
5867 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6011 if (ice_is_dvm_ena(&np->vsi->back->hw)) { in ice_fix_features()
6044 !ice_vsi_has_non_zero_vlans(np->vsi)) { in ice_fix_features()
6062 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) in ice_set_vlan_offload_features() argument
6069 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_set_vlan_offload_features()
6082 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6084 strip_err = vlan_ops->dis_stripping(vsi); in ice_set_vlan_offload_features()
6087 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6089 insert_err = vlan_ops->dis_insertion(vsi); in ice_set_vlan_offload_features()
6106 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) in ice_set_vlan_filtering_features() argument
6108 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_set_vlan_filtering_features()
6116 err = vlan_ops->ena_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6118 err = vlan_ops->dis_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6136 struct ice_vsi *vsi = np->vsi; in ice_set_vlan_features() local
6144 dev_err(ice_pf_to_dev(vsi->back), in ice_set_vlan_features()
6149 err = ice_set_vlan_offload_features(vsi, features); in ice_set_vlan_features()
6158 err = ice_set_vlan_filtering_features(vsi, features); in ice_set_vlan_features()
6171 static int ice_set_loopback(struct ice_vsi *vsi, bool ena) in ice_set_loopback() argument
6173 bool if_running = netif_running(vsi->netdev); in ice_set_loopback()
6176 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_set_loopback()
6177 ret = ice_down(vsi); in ice_set_loopback()
6179 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); in ice_set_loopback()
6183 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in ice_set_loopback()
6185 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in ice_set_loopback()
6187 ret = ice_up(vsi); in ice_set_loopback()
6202 struct ice_vsi *vsi = np->vsi; in ice_set_features() local
6203 struct ice_pf *pf = vsi->back; in ice_set_features()
6224 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); in ice_set_features()
6236 dev_err(ice_pf_to_dev(vsi->back), in ice_set_features()
6241 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); in ice_set_features()
6242 ret = ice_down_up(vsi); in ice_set_features()
6250 ice_vsi_manage_fdir(vsi, ena); in ice_set_features()
6251 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); in ice_set_features()
6268 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); in ice_set_features()
6277 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) in ice_vsi_vlan_setup() argument
6281 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6285 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6289 return ice_vsi_add_vlan_zero(vsi); in ice_vsi_vlan_setup()
6298 int ice_vsi_cfg_lan(struct ice_vsi *vsi) in ice_vsi_cfg_lan() argument
6302 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_vsi_cfg_lan()
6303 ice_set_rx_mode(vsi->netdev); in ice_vsi_cfg_lan()
6305 err = ice_vsi_vlan_setup(vsi); in ice_vsi_cfg_lan()
6309 ice_vsi_cfg_dcb_rings(vsi); in ice_vsi_cfg_lan()
6311 err = ice_vsi_cfg_lan_txqs(vsi); in ice_vsi_cfg_lan()
6312 if (!err && ice_is_xdp_ena_vsi(vsi)) in ice_vsi_cfg_lan()
6313 err = ice_vsi_cfg_xdp_txqs(vsi); in ice_vsi_cfg_lan()
6315 err = ice_vsi_cfg_rxqs(vsi); in ice_vsi_cfg_lan()
6444 static void ice_napi_enable_all(struct ice_vsi *vsi) in ice_napi_enable_all() argument
6448 if (!vsi->netdev) in ice_napi_enable_all()
6451 ice_for_each_q_vector(vsi, q_idx) { in ice_napi_enable_all()
6452 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_enable_all()
6467 static int ice_up_complete(struct ice_vsi *vsi) in ice_up_complete() argument
6469 struct ice_pf *pf = vsi->back; in ice_up_complete()
6472 ice_vsi_cfg_msix(vsi); in ice_up_complete()
6478 err = ice_vsi_start_all_rx_rings(vsi); in ice_up_complete()
6482 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_up_complete()
6483 ice_napi_enable_all(vsi); in ice_up_complete()
6484 ice_vsi_ena_irq(vsi); in ice_up_complete()
6486 if (vsi->port_info && in ice_up_complete()
6487 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && in ice_up_complete()
6488 vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_up_complete()
6489 ice_print_link_msg(vsi, true); in ice_up_complete()
6490 netif_tx_start_all_queues(vsi->netdev); in ice_up_complete()
6491 netif_carrier_on(vsi->netdev); in ice_up_complete()
6498 ice_update_eth_stats(vsi); in ice_up_complete()
6500 if (vsi->type == ICE_VSI_PF) in ice_up_complete()
6510 int ice_up(struct ice_vsi *vsi) in ice_up() argument
6514 err = ice_vsi_cfg_lan(vsi); in ice_up()
6516 err = ice_up_complete(vsi); in ice_up()
6552 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, in ice_update_vsi_tx_ring_stats() argument
6570 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; in ice_update_vsi_tx_ring_stats()
6571 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; in ice_update_vsi_tx_ring_stats()
6572 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; in ice_update_vsi_tx_ring_stats()
6580 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) in ice_update_vsi_ring_stats() argument
6584 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats()
6593 vsi->tx_restart = 0; in ice_update_vsi_ring_stats()
6594 vsi->tx_busy = 0; in ice_update_vsi_ring_stats()
6595 vsi->tx_linearize = 0; in ice_update_vsi_ring_stats()
6596 vsi->rx_buf_failed = 0; in ice_update_vsi_ring_stats()
6597 vsi->rx_page_failed = 0; in ice_update_vsi_ring_stats()
6602 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, in ice_update_vsi_ring_stats()
6603 vsi->num_txq); in ice_update_vsi_ring_stats()
6606 ice_for_each_rxq(vsi, i) { in ice_update_vsi_ring_stats()
6607 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); in ice_update_vsi_ring_stats()
6616 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; in ice_update_vsi_ring_stats()
6617 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; in ice_update_vsi_ring_stats()
6621 if (ice_is_xdp_ena_vsi(vsi)) in ice_update_vsi_ring_stats()
6622 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, in ice_update_vsi_ring_stats()
6623 vsi->num_xdp_txq); in ice_update_vsi_ring_stats()
6627 net_stats = &vsi->net_stats; in ice_update_vsi_ring_stats()
6628 stats_prev = &vsi->net_stats_prev; in ice_update_vsi_ring_stats()
6654 void ice_update_vsi_stats(struct ice_vsi *vsi) in ice_update_vsi_stats() argument
6656 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; in ice_update_vsi_stats()
6657 struct ice_eth_stats *cur_es = &vsi->eth_stats; in ice_update_vsi_stats()
6658 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats()
6660 if (test_bit(ICE_VSI_DOWN, vsi->state) || in ice_update_vsi_stats()
6665 ice_update_vsi_ring_stats(vsi); in ice_update_vsi_stats()
6668 ice_update_eth_stats(vsi); in ice_update_vsi_stats()
6676 if (vsi->type == ICE_VSI_PF) { in ice_update_vsi_stats()
6856 struct ice_vsi *vsi = np->vsi; in ice_get_stats64() local
6858 vsi_stats = &vsi->net_stats; in ice_get_stats64()
6860 if (!vsi->num_txq || !vsi->num_rxq) in ice_get_stats64()
6868 if (!test_bit(ICE_VSI_DOWN, vsi->state)) in ice_get_stats64()
6869 ice_update_vsi_ring_stats(vsi); in ice_get_stats64()
6892 static void ice_napi_disable_all(struct ice_vsi *vsi) in ice_napi_disable_all() argument
6896 if (!vsi->netdev) in ice_napi_disable_all()
6899 ice_for_each_q_vector(vsi, q_idx) { in ice_napi_disable_all()
6900 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_disable_all()
6916 int ice_down(struct ice_vsi *vsi) in ice_down() argument
6920 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); in ice_down()
6922 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_down()
6923 vlan_err = ice_vsi_del_vlan_zero(vsi); in ice_down()
6924 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); in ice_down()
6925 netif_carrier_off(vsi->netdev); in ice_down()
6926 netif_tx_disable(vsi->netdev); in ice_down()
6927 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { in ice_down()
6928 ice_eswitch_stop_all_tx_queues(vsi->back); in ice_down()
6931 ice_vsi_dis_irq(vsi); in ice_down()
6933 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); in ice_down()
6935 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", in ice_down()
6936 vsi->vsi_num, tx_err); in ice_down()
6937 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { in ice_down()
6938 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); in ice_down()
6940 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", in ice_down()
6941 vsi->vsi_num, tx_err); in ice_down()
6944 rx_err = ice_vsi_stop_all_rx_rings(vsi); in ice_down()
6946 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", in ice_down()
6947 vsi->vsi_num, rx_err); in ice_down()
6949 ice_napi_disable_all(vsi); in ice_down()
6951 ice_for_each_txq(vsi, i) in ice_down()
6952 ice_clean_tx_ring(vsi->tx_rings[i]); in ice_down()
6954 if (ice_is_xdp_ena_vsi(vsi)) in ice_down()
6955 ice_for_each_xdp_txq(vsi, i) in ice_down()
6956 ice_clean_tx_ring(vsi->xdp_rings[i]); in ice_down()
6958 ice_for_each_rxq(vsi, i) in ice_down()
6959 ice_clean_rx_ring(vsi->rx_rings[i]); in ice_down()
6962 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", in ice_down()
6963 vsi->vsi_num, vsi->vsw->sw_id); in ice_down()
6974 int ice_down_up(struct ice_vsi *vsi) in ice_down_up() argument
6979 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_down_up()
6982 ret = ice_down(vsi); in ice_down_up()
6986 ret = ice_up(vsi); in ice_down_up()
6988 …netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to … in ice_down_up()
7001 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) in ice_vsi_setup_tx_rings() argument
7005 if (!vsi->num_txq) { in ice_vsi_setup_tx_rings()
7006 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", in ice_vsi_setup_tx_rings()
7007 vsi->vsi_num); in ice_vsi_setup_tx_rings()
7011 ice_for_each_txq(vsi, i) { in ice_vsi_setup_tx_rings()
7012 struct ice_tx_ring *ring = vsi->tx_rings[i]; in ice_vsi_setup_tx_rings()
7017 if (vsi->netdev) in ice_vsi_setup_tx_rings()
7018 ring->netdev = vsi->netdev; in ice_vsi_setup_tx_rings()
7033 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) in ice_vsi_setup_rx_rings() argument
7037 if (!vsi->num_rxq) { in ice_vsi_setup_rx_rings()
7038 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", in ice_vsi_setup_rx_rings()
7039 vsi->vsi_num); in ice_vsi_setup_rx_rings()
7043 ice_for_each_rxq(vsi, i) { in ice_vsi_setup_rx_rings()
7044 struct ice_rx_ring *ring = vsi->rx_rings[i]; in ice_vsi_setup_rx_rings()
7049 if (vsi->netdev) in ice_vsi_setup_rx_rings()
7050 ring->netdev = vsi->netdev; in ice_vsi_setup_rx_rings()
7067 int ice_vsi_open_ctrl(struct ice_vsi *vsi) in ice_vsi_open_ctrl() argument
7070 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl()
7076 err = ice_vsi_setup_tx_rings(vsi); in ice_vsi_open_ctrl()
7080 err = ice_vsi_setup_rx_rings(vsi); in ice_vsi_open_ctrl()
7084 err = ice_vsi_cfg_lan(vsi); in ice_vsi_open_ctrl()
7090 err = ice_vsi_req_irq_msix(vsi, int_name); in ice_vsi_open_ctrl()
7094 ice_vsi_cfg_msix(vsi); in ice_vsi_open_ctrl()
7096 err = ice_vsi_start_all_rx_rings(vsi); in ice_vsi_open_ctrl()
7100 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_open_ctrl()
7101 ice_vsi_ena_irq(vsi); in ice_vsi_open_ctrl()
7106 ice_down(vsi); in ice_vsi_open_ctrl()
7108 ice_vsi_free_rx_rings(vsi); in ice_vsi_open_ctrl()
7110 ice_vsi_free_tx_rings(vsi); in ice_vsi_open_ctrl()
7123 int ice_vsi_open(struct ice_vsi *vsi) in ice_vsi_open() argument
7126 struct ice_pf *pf = vsi->back; in ice_vsi_open()
7130 err = ice_vsi_setup_tx_rings(vsi); in ice_vsi_open()
7134 err = ice_vsi_setup_rx_rings(vsi); in ice_vsi_open()
7138 err = ice_vsi_cfg_lan(vsi); in ice_vsi_open()
7143 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7144 err = ice_vsi_req_irq_msix(vsi, int_name); in ice_vsi_open()
7148 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_vsi_open()
7150 if (vsi->type == ICE_VSI_PF) { in ice_vsi_open()
7152 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); in ice_vsi_open()
7156 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); in ice_vsi_open()
7161 err = ice_up_complete(vsi); in ice_vsi_open()
7168 ice_down(vsi); in ice_vsi_open()
7170 ice_vsi_free_irq(vsi); in ice_vsi_open()
7172 ice_vsi_free_rx_rings(vsi); in ice_vsi_open()
7174 ice_vsi_free_tx_rings(vsi); in ice_vsi_open()
7187 if (!pf->vsi) in ice_vsi_release_all()
7191 if (!pf->vsi[i]) in ice_vsi_release_all()
7194 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7197 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7200 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7217 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type() local
7219 if (!vsi || vsi->type != type) in ice_vsi_rebuild_by_type()
7223 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); in ice_vsi_rebuild_by_type()
7226 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7231 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7234 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7241 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7244 err = ice_ena_vsi(vsi, false); in ice_vsi_rebuild_by_type()
7247 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7251 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, in ice_vsi_rebuild_by_type()
7268 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link() local
7270 if (!vsi || vsi->type != ICE_VSI_PF) in ice_update_pf_netdev_link()
7273 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7275 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7276 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7278 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7279 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7296 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_rebuild() local
7449 if (vsi && vsi->netdev) in ice_rebuild()
7450 netif_device_attach(vsi->netdev); in ice_rebuild()
7494 struct ice_vsi *vsi = np->vsi; in ice_change_mtu() local
7495 struct ice_pf *pf = vsi->back; in ice_change_mtu()
7505 prog = vsi->xdp_prog; in ice_change_mtu()
7507 int frame_size = ice_max_xdp_frame_size(vsi); in ice_change_mtu()
7539 err = ice_down_up(vsi); in ice_change_mtu()
7558 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl()
7620 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) in ice_set_rss_lut() argument
7623 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_lut()
7629 params.vsi_handle = vsi->idx; in ice_set_rss_lut()
7631 params.lut_type = vsi->rss_lut_type; in ice_set_rss_lut()
7636 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", in ice_set_rss_lut()
7649 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) in ice_set_rss_key() argument
7651 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_key()
7657 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_set_rss_key()
7659 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", in ice_set_rss_key()
7673 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) in ice_get_rss_lut() argument
7676 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_lut()
7682 params.vsi_handle = vsi->idx; in ice_get_rss_lut()
7684 params.lut_type = vsi->rss_lut_type; in ice_get_rss_lut()
7689 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", in ice_get_rss_lut()
7702 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) in ice_get_rss_key() argument
7704 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_key()
7710 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_get_rss_key()
7712 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", in ice_get_rss_key()
7734 struct ice_vsi *vsi = np->vsi; in ice_bridge_getlink() local
7735 struct ice_pf *pf = vsi->back; in ice_bridge_getlink()
7751 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) in ice_vsi_update_bridge_mode() argument
7754 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_update_bridge_mode()
7758 vsi_props = &vsi->info; in ice_vsi_update_bridge_mode()
7764 ctxt->info = vsi->info; in ice_vsi_update_bridge_mode()
7774 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_update_bridge_mode()
7776 …dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\… in ice_vsi_update_bridge_mode()
7806 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink()
7833 if (!pf->vsi[v]) in ice_bridge_setlink()
7835 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
7869 struct ice_vsi *vsi = np->vsi; in ice_tx_timeout() local
7870 struct ice_pf *pf = vsi->back; in ice_tx_timeout()
7886 ice_for_each_txq(vsi, i) in ice_tx_timeout()
7887 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_tx_timeout()
7888 if (txqueue == vsi->tx_rings[i]->q_index) { in ice_tx_timeout()
7889 tx_ring = vsi->tx_rings[i]; in ice_tx_timeout()
7906 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & in ice_tx_timeout()
7912 vsi->vsi_num, txqueue, tx_ring->next_to_clean, in ice_tx_timeout()
7933 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_tx_timeout()
7953 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_cls_flower() local
7960 return ice_add_cls_flower(filter_dev, vsi, cls_flower); in ice_setup_tc_cls_flower()
7962 return ice_del_cls_flower(vsi, cls_flower); in ice_setup_tc_cls_flower()
7981 return ice_setup_tc_cls_flower(np, np->vsi->netdev, in ice_setup_tc_block_cb()
7998 ice_validate_mqprio_qopt(struct ice_vsi *vsi, in ice_validate_mqprio_qopt() argument
8002 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt()
8009 if (vsi->type != ICE_VSI_PF) in ice_validate_mqprio_qopt()
8018 vsi->ch_rss_size = 0; in ice_validate_mqprio_qopt()
8020 speed = ice_get_link_speed_kbps(vsi); in ice_validate_mqprio_qopt()
8109 if (vsi->num_rxq < in ice_validate_mqprio_qopt()
8112 if (vsi->num_txq < in ice_validate_mqprio_qopt()
8123 vsi->ch_rss_size = max_rss_q_cnt; in ice_validate_mqprio_qopt()
8133 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) in ice_add_vsi_to_fdir() argument
8140 if (!(vsi->num_gfltr || vsi->num_bfltr)) in ice_add_vsi_to_fdir()
8162 prof->vsi_h[0], vsi->idx, in ice_add_vsi_to_fdir()
8167 vsi->idx, flow); in ice_add_vsi_to_fdir()
8175 prof->vsi_h[prof->cnt] = vsi->idx; in ice_add_vsi_to_fdir()
8179 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, in ice_add_vsi_to_fdir()
8184 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); in ice_add_vsi_to_fdir()
8200 struct ice_vsi *vsi; in ice_add_channel() local
8207 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8208 if (!vsi || vsi->type != ICE_VSI_CHNL) { in ice_add_channel()
8213 ice_add_vsi_to_fdir(pf, vsi); in ice_add_channel()
8216 ch->vsi_num = vsi->vsi_num; in ice_add_channel()
8217 ch->info.mapping_flags = vsi->info.mapping_flags; in ice_add_channel()
8218 ch->ch_vsi = vsi; in ice_add_channel()
8220 vsi->ch = ch; in ice_add_channel()
8222 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, in ice_add_channel()
8223 sizeof(vsi->info.q_mapping)); in ice_add_channel()
8224 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, in ice_add_channel()
8225 sizeof(vsi->info.tc_mapping)); in ice_add_channel()
8237 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) in ice_chnl_cfg_res() argument
8247 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8248 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8283 ice_flush(&vsi->back->hw); in ice_chnl_cfg_res()
8295 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) in ice_cfg_chnl_all_res() argument
8300 ice_chnl_cfg_res(vsi, ch); in ice_cfg_chnl_all_res()
8315 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_hw_channel() argument
8321 ch->base_q = vsi->next_base_q; in ice_setup_hw_channel()
8331 ice_cfg_chnl_all_res(vsi, ch); in ice_setup_hw_channel()
8336 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; in ice_setup_hw_channel()
8353 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_channel() argument
8360 if (vsi->type != ICE_VSI_PF) { in ice_setup_channel()
8361 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); in ice_setup_channel()
8368 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); in ice_setup_channel()
8385 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) in ice_set_bw_limit() argument
8389 err = ice_set_min_bw_limit(vsi, min_tx_rate); in ice_set_bw_limit()
8393 return ice_set_max_bw_limit(vsi, max_tx_rate); in ice_set_bw_limit()
8404 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) in ice_create_q_channel() argument
8406 struct ice_pf *pf = vsi->back; in ice_create_q_channel()
8418 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { in ice_create_q_channel()
8420 vsi->cnt_q_avail, ch->num_txq); in ice_create_q_channel()
8424 if (!ice_setup_channel(pf, vsi, ch)) { in ice_create_q_channel()
8442 vsi->cnt_q_avail -= ch->num_txq; in ice_create_q_channel()
8505 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) in ice_remove_q_channels() argument
8508 struct ice_pf *pf = vsi->back; in ice_remove_q_channels()
8516 if (vsi->netdev->features & NETIF_F_NTUPLE) { in ice_remove_q_channels()
8520 ice_fdir_del_all_fltrs(vsi); in ice_remove_q_channels()
8525 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in ice_remove_q_channels()
8540 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_remove_q_channels()
8541 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_remove_q_channels()
8569 vsi->tc_map_vsi[i] = NULL; in ice_remove_q_channels()
8572 vsi->all_enatc = 0; in ice_remove_q_channels()
8573 vsi->all_numtc = 0; in ice_remove_q_channels()
8588 struct ice_vsi *vsi; in ice_rebuild_channels() local
8614 vsi = pf->vsi[i]; in ice_rebuild_channels()
8615 if (!vsi || vsi->type != ICE_VSI_CHNL) in ice_rebuild_channels()
8618 type = vsi->type; in ice_rebuild_channels()
8621 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); in ice_rebuild_channels()
8624 ice_vsi_type_str(type), vsi->idx, err); in ice_rebuild_channels()
8631 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
8634 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
8637 ice_vsi_type_str(type), err, vsi->idx); in ice_rebuild_channels()
8642 ice_vsi_type_str(type), vsi->idx); in ice_rebuild_channels()
8647 main_vsi->tc_map_vsi[tc_idx++] = vsi; in ice_rebuild_channels()
8696 static int ice_create_q_channels(struct ice_vsi *vsi) in ice_create_q_channels() argument
8698 struct ice_pf *pf = vsi->back; in ice_create_q_channels()
8703 if (!(vsi->all_enatc & BIT(i))) in ice_create_q_channels()
8712 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
8713 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
8714 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; in ice_create_q_channels()
8715 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; in ice_create_q_channels()
8716 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; in ice_create_q_channels()
8726 ret = ice_create_q_channel(vsi, ch); in ice_create_q_channels()
8733 list_add_tail(&ch->list, &vsi->ch_list); in ice_create_q_channels()
8734 vsi->tc_map_vsi[i] = ch->ch_vsi; in ice_create_q_channels()
8741 ice_remove_q_channels(vsi, false); in ice_create_q_channels()
8755 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_mqprio_qdisc() local
8756 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc()
8769 vsi->ch_rss_size = 0; in ice_setup_tc_mqprio_qdisc()
8770 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
8787 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); in ice_setup_tc_mqprio_qdisc()
8793 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
8799 if (vsi->netdev->features & NETIF_F_HW_TC) in ice_setup_tc_mqprio_qdisc()
8809 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && in ice_setup_tc_mqprio_qdisc()
8814 ice_dis_vsi(vsi, true); in ice_setup_tc_mqprio_qdisc()
8817 ice_remove_q_channels(vsi, true); in ice_setup_tc_mqprio_qdisc()
8820 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
8822 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
8832 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_setup_tc_mqprio_qdisc()
8833 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
8834 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
8836 vsi->req_txq = offset + qcount_tx; in ice_setup_tc_mqprio_qdisc()
8837 vsi->req_rxq = offset + qcount_rx; in ice_setup_tc_mqprio_qdisc()
8843 vsi->orig_rss_size = vsi->rss_size; in ice_setup_tc_mqprio_qdisc()
8849 cur_txq = vsi->num_txq; in ice_setup_tc_mqprio_qdisc()
8850 cur_rxq = vsi->num_rxq; in ice_setup_tc_mqprio_qdisc()
8853 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); in ice_setup_tc_mqprio_qdisc()
8857 vsi->req_txq = cur_txq; in ice_setup_tc_mqprio_qdisc()
8858 vsi->req_rxq = cur_rxq; in ice_setup_tc_mqprio_qdisc()
8860 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { in ice_setup_tc_mqprio_qdisc()
8866 vsi->all_numtc = num_tcf; in ice_setup_tc_mqprio_qdisc()
8867 vsi->all_enatc = ena_tc_qdisc; in ice_setup_tc_mqprio_qdisc()
8868 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); in ice_setup_tc_mqprio_qdisc()
8871 vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
8876 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in ice_setup_tc_mqprio_qdisc()
8877 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; in ice_setup_tc_mqprio_qdisc()
8887 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); in ice_setup_tc_mqprio_qdisc()
8890 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
8893 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
8897 ret = ice_create_q_channels(vsi); in ice_setup_tc_mqprio_qdisc()
8906 if (vsi->ch_rss_size) in ice_setup_tc_mqprio_qdisc()
8907 ice_vsi_cfg_rss_lut_key(vsi); in ice_setup_tc_mqprio_qdisc()
8912 vsi->all_numtc = 0; in ice_setup_tc_mqprio_qdisc()
8913 vsi->all_enatc = 0; in ice_setup_tc_mqprio_qdisc()
8916 ice_ena_vsi(vsi, true); in ice_setup_tc_mqprio_qdisc()
8928 struct ice_pf *pf = np->vsi->back; in ice_setup_tc()
9015 vlan_dev_real_dev(netdev) == np->vsi->netdev)) in ice_indr_setup_tc_block()
9102 struct ice_pf *pf = np->vsi->back; in ice_open()
9124 struct ice_vsi *vsi = np->vsi; in ice_open_internal() local
9125 struct ice_pf *pf = vsi->back; in ice_open_internal()
9136 pi = vsi->port_info; in ice_open_internal()
9157 err = ice_configure_phy(vsi); in ice_open_internal()
9165 ice_set_link(vsi, false); in ice_open_internal()
9168 err = ice_vsi_open(vsi); in ice_open_internal()
9171 vsi->vsi_num, vsi->vsw->sw_id); in ice_open_internal()
9192 struct ice_vsi *vsi = np->vsi; in ice_stop() local
9193 struct ice_pf *pf = vsi->back; in ice_stop()
9200 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { in ice_stop()
9201 int link_err = ice_force_phys_link_state(vsi, false); in ice_stop()
9205 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", in ice_stop()
9206 vsi->vsi_num); in ice_stop()
9208 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", in ice_stop()
9209 vsi->vsi_num, link_err); in ice_stop()
9211 ice_vsi_close(vsi); in ice_stop()
9216 ice_vsi_close(vsi); in ice_stop()