Lines Matching refs:vsi
34 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
36 static int i40e_add_vsi(struct i40e_vsi *vsi);
37 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { in i40e_get_lump()
316 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
317 return pf->vsi[i]; in i40e_find_vsi_from_id()
348 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout() local
349 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
357 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
358 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
360 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
361 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout()
383 tx_ring->vsi->base_vector - 1)); in i40e_tx_timeout()
388 vsi->seid, txqueue, tx_ring->next_to_clean, in i40e_tx_timeout()
410 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); in i40e_tx_timeout()
425 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) in i40e_get_vsi_stats_struct() argument
427 return &vsi->net_stats; in i40e_get_vsi_stats_struct()
463 struct i40e_vsi *vsi = np->vsi; in i40e_get_netdev_stats_struct() local
464 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); in i40e_get_netdev_stats_struct()
468 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_get_netdev_stats_struct()
471 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct()
475 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_get_netdev_stats_struct()
479 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct()
484 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_get_netdev_stats_struct()
485 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
491 ring = READ_ONCE(vsi->rx_rings[i]); in i40e_get_netdev_stats_struct()
520 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) in i40e_vsi_reset_stats() argument
525 if (!vsi) in i40e_vsi_reset_stats()
528 ns = i40e_get_vsi_stats_struct(vsi); in i40e_vsi_reset_stats()
530 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); in i40e_vsi_reset_stats()
531 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); in i40e_vsi_reset_stats()
532 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); in i40e_vsi_reset_stats()
533 if (vsi->rx_rings && vsi->rx_rings[0]) { in i40e_vsi_reset_stats()
534 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_reset_stats()
535 memset(&vsi->rx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
536 sizeof(vsi->rx_rings[i]->stats)); in i40e_vsi_reset_stats()
537 memset(&vsi->rx_rings[i]->rx_stats, 0, in i40e_vsi_reset_stats()
538 sizeof(vsi->rx_rings[i]->rx_stats)); in i40e_vsi_reset_stats()
539 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
540 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats()
541 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
542 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats()
545 vsi->stat_offsets_loaded = false; in i40e_vsi_reset_stats()
581 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) in i40e_compute_pci_to_hw_id() argument
585 if (vsi->type == I40E_VSI_SRIOV) in i40e_compute_pci_to_hw_id()
586 return (hw->port * BIT(7)) / pf_count + vsi->vf_id; in i40e_compute_pci_to_hw_id()
698 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, in i40e_stats_update_rx_discards() argument
708 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), in i40e_stats_update_rx_discards()
709 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), in i40e_stats_update_rx_discards()
720 void i40e_update_eth_stats(struct i40e_vsi *vsi) in i40e_update_eth_stats() argument
722 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); in i40e_update_eth_stats()
723 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats()
728 es = &vsi->eth_stats; in i40e_update_eth_stats()
729 oes = &vsi->eth_stats_offsets; in i40e_update_eth_stats()
733 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
736 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
739 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
744 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
748 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
752 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
756 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
761 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
765 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
769 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
773 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
776 i40e_stats_update_rx_discards(vsi, hw, stat_idx, in i40e_update_eth_stats()
777 vsi->stat_offsets_loaded, oes, es); in i40e_update_eth_stats()
779 vsi->stat_offsets_loaded = true; in i40e_update_eth_stats()
871 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) in i40e_update_vsi_stats() argument
874 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats()
890 if (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_update_vsi_stats()
894 ns = i40e_get_vsi_stats_struct(vsi); in i40e_update_vsi_stats()
895 ons = &vsi->net_stats_offsets; in i40e_update_vsi_stats()
896 es = &vsi->eth_stats; in i40e_update_vsi_stats()
897 oes = &vsi->eth_stats_offsets; in i40e_update_vsi_stats()
913 for (q = 0; q < vsi->num_queue_pairs; q++) { in i40e_update_vsi_stats()
915 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats()
933 p = READ_ONCE(vsi->rx_rings[q]); in i40e_update_vsi_stats()
951 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_update_vsi_stats()
953 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
971 vsi->tx_restart = tx_restart; in i40e_update_vsi_stats()
972 vsi->tx_busy = tx_busy; in i40e_update_vsi_stats()
973 vsi->tx_linearize = tx_linearize; in i40e_update_vsi_stats()
974 vsi->tx_force_wb = tx_force_wb; in i40e_update_vsi_stats()
975 vsi->tx_stopped = tx_stopped; in i40e_update_vsi_stats()
976 vsi->rx_page_failed = rx_page; in i40e_update_vsi_stats()
977 vsi->rx_buf_failed = rx_buf; in i40e_update_vsi_stats()
978 vsi->rx_page_reuse = rx_reuse; in i40e_update_vsi_stats()
979 vsi->rx_page_alloc = rx_alloc; in i40e_update_vsi_stats()
980 vsi->rx_page_waive = rx_waive; in i40e_update_vsi_stats()
981 vsi->rx_page_busy = rx_busy; in i40e_update_vsi_stats()
989 i40e_update_eth_stats(vsi); in i40e_update_vsi_stats()
1000 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
1243 void i40e_update_stats(struct i40e_vsi *vsi) in i40e_update_stats() argument
1245 struct i40e_pf *pf = vsi->back; in i40e_update_stats()
1247 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1250 i40e_update_vsi_stats(vsi); in i40e_update_stats()
1259 int i40e_count_filters(struct i40e_vsi *vsi) in i40e_count_filters() argument
1266 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_count_filters()
1284 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, in i40e_find_filter() argument
1290 if (!vsi || !macaddr) in i40e_find_filter()
1294 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_filter()
1310 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) in i40e_find_mac() argument
1315 if (!vsi || !macaddr) in i40e_find_mac()
1319 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_mac()
1332 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) in i40e_is_vsi_in_vlan() argument
1335 if (vsi->info.pvid) in i40e_is_vsi_in_vlan()
1358 return vsi->has_vlan_filter; in i40e_is_vsi_in_vlan()
1390 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, in i40e_correct_mac_vlan_filters() argument
1395 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_correct_mac_vlan_filters()
1426 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_mac_vlan_filters()
1444 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_mac_vlan_filters()
1468 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_mac_vlan_filters()
1487 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, in i40e_get_vf_new_vlan() argument
1493 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_get_vf_new_vlan()
1494 struct i40e_pf *pf = vsi->back; in i40e_get_vf_new_vlan()
1537 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, in i40e_correct_vf_mac_vlan_filters() argument
1549 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL, in i40e_correct_vf_mac_vlan_filters()
1553 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_vf_mac_vlan_filters()
1554 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters, in i40e_correct_vf_mac_vlan_filters()
1557 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_vf_mac_vlan_filters()
1579 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_vf_mac_vlan_filters()
1591 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) in i40e_rm_default_mac_filter() argument
1594 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter()
1597 if (vsi->type != I40E_VSI_MAIN) in i40e_rm_default_mac_filter()
1605 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1613 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1627 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, in i40e_add_filter() argument
1633 if (!vsi || !macaddr) in i40e_add_filter()
1636 f = i40e_find_filter(vsi, macaddr, vlan); in i40e_add_filter()
1646 vsi->has_vlan_filter = true; in i40e_add_filter()
1654 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_add_filter()
1656 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_filter()
1657 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_add_filter()
1689 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) in __i40e_del_filter() argument
1706 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in __i40e_del_filter()
1707 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in __i40e_del_filter()
1722 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) in i40e_del_filter() argument
1726 if (!vsi || !macaddr) in i40e_del_filter()
1729 f = i40e_find_filter(vsi, macaddr, vlan); in i40e_del_filter()
1730 __i40e_del_filter(vsi, f); in i40e_del_filter()
1745 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, in i40e_add_mac_filter() argument
1752 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_add_mac_filter()
1753 if (vsi->info.pvid) in i40e_add_mac_filter()
1754 return i40e_add_filter(vsi, macaddr, in i40e_add_mac_filter()
1755 le16_to_cpu(vsi->info.pvid)); in i40e_add_mac_filter()
1757 if (!i40e_is_vsi_in_vlan(vsi)) in i40e_add_mac_filter()
1758 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); in i40e_add_mac_filter()
1760 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_mac_filter()
1763 add = i40e_add_filter(vsi, macaddr, f->vlan); in i40e_add_mac_filter()
1781 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) in i40e_del_mac_filter() argument
1788 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_del_mac_filter()
1789 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_del_mac_filter()
1791 __i40e_del_filter(vsi, f); in i40e_del_mac_filter()
1812 struct i40e_vsi *vsi = np->vsi; in i40e_set_mac() local
1813 struct i40e_pf *pf = vsi->back; in i40e_set_mac()
1836 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1837 i40e_del_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1839 i40e_add_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1840 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1842 if (vsi->type == I40E_VSI_MAIN) { in i40e_set_mac()
1867 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, in i40e_config_rss_aq() argument
1870 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq()
1877 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); in i40e_config_rss_aq()
1887 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_config_rss_aq()
1889 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_config_rss_aq()
1905 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) in i40e_vsi_config_rss() argument
1907 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss()
1914 if (!vsi->rss_size) in i40e_vsi_config_rss()
1915 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1916 vsi->num_queue_pairs); in i40e_vsi_config_rss()
1917 if (!vsi->rss_size) in i40e_vsi_config_rss()
1919 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_config_rss()
1926 if (vsi->rss_lut_user) in i40e_vsi_config_rss()
1927 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_vsi_config_rss()
1929 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
1930 if (vsi->rss_hkey_user) in i40e_vsi_config_rss()
1931 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_config_rss()
1934 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_config_rss()
1947 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, in i40e_vsi_setup_queue_map_mqprio() argument
1955 if (vsi->type != I40E_VSI_MAIN) in i40e_vsi_setup_queue_map_mqprio()
1959 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; in i40e_vsi_setup_queue_map_mqprio()
1960 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map_mqprio()
1961 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1971 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1974 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map_mqprio()
1975 offset = vsi->mqprio_qopt.qopt.offset[i]; in i40e_vsi_setup_queue_map_mqprio()
1976 qcount = vsi->mqprio_qopt.qopt.count[i]; in i40e_vsi_setup_queue_map_mqprio()
1979 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map_mqprio()
1980 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map_mqprio()
1981 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map_mqprio()
1987 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map_mqprio()
1988 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map_mqprio()
1989 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map_mqprio()
1994 vsi->num_queue_pairs = offset + qcount; in i40e_vsi_setup_queue_map_mqprio()
1999 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
2003 vsi->rss_size = max_qcount; in i40e_vsi_setup_queue_map_mqprio()
2004 ret = i40e_vsi_config_rss(vsi); in i40e_vsi_setup_queue_map_mqprio()
2006 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
2011 vsi->reconfig_rss = true; in i40e_vsi_setup_queue_map_mqprio()
2012 dev_dbg(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
2018 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
2019 if (override_q && override_q < vsi->num_queue_pairs) { in i40e_vsi_setup_queue_map_mqprio()
2020 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; in i40e_vsi_setup_queue_map_mqprio()
2021 vsi->next_base_queue = override_q; in i40e_vsi_setup_queue_map_mqprio()
2035 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, in i40e_vsi_setup_queue_map() argument
2040 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map()
2055 if (vsi->type == I40E_VSI_MAIN) { in i40e_vsi_setup_queue_map()
2065 if (vsi->req_queue_pairs > 0) in i40e_vsi_setup_queue_map()
2066 vsi->num_queue_pairs = vsi->req_queue_pairs; in i40e_vsi_setup_queue_map()
2068 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2070 vsi->num_queue_pairs = 1; in i40e_vsi_setup_queue_map()
2074 if (vsi->type == I40E_VSI_MAIN || in i40e_vsi_setup_queue_map()
2075 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) in i40e_vsi_setup_queue_map()
2076 num_tc_qps = vsi->num_queue_pairs; in i40e_vsi_setup_queue_map()
2078 num_tc_qps = vsi->alloc_queue_pairs; in i40e_vsi_setup_queue_map()
2080 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_setup_queue_map()
2095 vsi->tc_config.numtc = numtc; in i40e_vsi_setup_queue_map()
2096 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map()
2105 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map()
2109 switch (vsi->type) { in i40e_vsi_setup_queue_map()
2113 vsi->tc_config.enabled_tc != 1) { in i40e_vsi_setup_queue_map()
2127 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map()
2128 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map()
2138 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map()
2149 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map()
2150 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map()
2151 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map()
2158 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || in i40e_vsi_setup_queue_map()
2159 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || in i40e_vsi_setup_queue_map()
2160 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) in i40e_vsi_setup_queue_map()
2161 vsi->num_queue_pairs = offset; in i40e_vsi_setup_queue_map()
2169 if (vsi->type == I40E_VSI_SRIOV) { in i40e_vsi_setup_queue_map()
2172 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_setup_queue_map()
2174 cpu_to_le16(vsi->base_queue + i); in i40e_vsi_setup_queue_map()
2178 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map()
2194 struct i40e_vsi *vsi = np->vsi; in i40e_addr_sync() local
2196 if (i40e_add_mac_filter(vsi, addr)) in i40e_addr_sync()
2213 struct i40e_vsi *vsi = np->vsi; in i40e_addr_unsync() local
2223 i40e_del_mac_filter(vsi, addr); in i40e_addr_unsync()
2235 struct i40e_vsi *vsi = np->vsi; in i40e_set_rx_mode() local
2237 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2242 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2245 if (vsi->current_netdev_flags != vsi->netdev->flags) { in i40e_set_rx_mode()
2246 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_set_rx_mode()
2247 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_set_rx_mode()
2259 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, in i40e_undo_del_filter_entries() argument
2270 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_undo_del_filter_entries()
2282 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, in i40e_undo_add_filter_entries() argument
2291 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_undo_add_filter_entries()
2369 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, in i40e_aqc_del_filters() argument
2373 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_del_filters()
2377 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, in i40e_aqc_del_filters()
2383 dev_info(&vsi->back->pdev->dev, in i40e_aqc_del_filters()
2403 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, in i40e_aqc_add_filters() argument
2408 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_add_filters()
2412 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status); in i40e_aqc_add_filters()
2416 if (vsi->type == I40E_VSI_MAIN) { in i40e_aqc_add_filters()
2417 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_add_filters()
2418 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2421 } else if (vsi->type == I40E_VSI_SRIOV || in i40e_aqc_add_filters()
2422 vsi->type == I40E_VSI_VMDQ1 || in i40e_aqc_add_filters()
2423 vsi->type == I40E_VSI_VMDQ2) { in i40e_aqc_add_filters()
2424 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2429 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2432 vsi->type); in i40e_aqc_add_filters()
2450 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, in i40e_aqc_broadcast_filter() argument
2455 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_broadcast_filter()
2460 vsi->seid, in i40e_aqc_broadcast_filter()
2465 vsi->seid, in i40e_aqc_broadcast_filter()
2472 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_broadcast_filter()
2473 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_broadcast_filter()
2493 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous() local
2497 if (vsi->type == I40E_VSI_MAIN && in i40e_set_promiscuous()
2507 vsi->seid, in i40e_set_promiscuous()
2511 vsi->seid, in i40e_set_promiscuous()
2522 vsi->seid, in i40e_set_promiscuous()
2533 vsi->seid, in i40e_set_promiscuous()
2557 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) in i40e_sync_vsi_filters() argument
2562 struct i40e_hw *hw = &vsi->back->hw; in i40e_sync_vsi_filters()
2583 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) in i40e_sync_vsi_filters()
2585 pf = vsi->back; in i40e_sync_vsi_filters()
2587 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2589 if (vsi->netdev) { in i40e_sync_vsi_filters()
2590 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in i40e_sync_vsi_filters()
2591 vsi->current_netdev_flags = vsi->netdev->flags; in i40e_sync_vsi_filters()
2597 if (vsi->type == I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2598 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); in i40e_sync_vsi_filters()
2599 else if (vsi->type != I40E_VSI_MAIN) in i40e_sync_vsi_filters()
2600 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); in i40e_sync_vsi_filters()
2602 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { in i40e_sync_vsi_filters()
2603 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2605 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2607 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_sync_vsi_filters()
2639 if (vsi->type != I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2641 (vsi, &tmp_add_list, &tmp_del_list, in i40e_sync_vsi_filters()
2645 (vsi, &tmp_add_list, &tmp_del_list, in i40e_sync_vsi_filters()
2646 vlan_filters, pf->vf[vsi->vf_id].trusted); in i40e_sync_vsi_filters()
2649 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); in i40e_sync_vsi_filters()
2654 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2674 i40e_aqc_broadcast_filter(vsi, vsi_name, f); in i40e_sync_vsi_filters()
2697 i40e_aqc_del_filters(vsi, vsi_name, del_list, in i40e_sync_vsi_filters()
2710 i40e_aqc_del_filters(vsi, vsi_name, del_list, in i40e_sync_vsi_filters()
2734 if (i40e_aqc_broadcast_filter(vsi, vsi_name, in i40e_sync_vsi_filters()
2764 i40e_aqc_add_filters(vsi, vsi_name, add_list, in i40e_sync_vsi_filters()
2771 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, in i40e_sync_vsi_filters()
2777 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2784 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_sync_vsi_filters()
2787 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2793 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2794 vsi->active_filters = 0; in i40e_sync_vsi_filters()
2795 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { in i40e_sync_vsi_filters()
2797 vsi->active_filters++; in i40e_sync_vsi_filters()
2801 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2808 vsi->active_filters < vsi->promisc_threshold) { in i40e_sync_vsi_filters()
2812 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2813 vsi->promisc_threshold = 0; in i40e_sync_vsi_filters()
2817 if (vsi->type == I40E_VSI_SRIOV && pf->vf && in i40e_sync_vsi_filters()
2818 !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2819 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2823 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2829 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; in i40e_sync_vsi_filters()
2835 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); in i40e_sync_vsi_filters()
2836 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, in i40e_sync_vsi_filters()
2837 vsi->seid, in i40e_sync_vsi_filters()
2857 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || in i40e_sync_vsi_filters()
2874 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2876 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2881 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2883 i40e_undo_del_filter_entries(vsi, &tmp_del_list); in i40e_sync_vsi_filters()
2884 i40e_undo_add_filter_entries(vsi, &tmp_add_list); in i40e_sync_vsi_filters()
2885 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2887 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2888 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2910 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2911 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && in i40e_sync_filters_subtask()
2912 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { in i40e_sync_filters_subtask()
2913 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2930 static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) in i40e_calculate_vsi_rx_buf_len() argument
2932 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) in i40e_calculate_vsi_rx_buf_len()
2943 static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi, in i40e_max_vsi_frame_size() argument
2946 u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); in i40e_max_vsi_frame_size()
2967 struct i40e_vsi *vsi = np->vsi; in i40e_change_mtu() local
2968 struct i40e_pf *pf = vsi->back; in i40e_change_mtu()
2971 frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); in i40e_change_mtu()
2982 i40e_vsi_reinit_locked(vsi); in i40e_change_mtu()
2997 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl()
3013 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) in i40e_vlan_stripping_enable() argument
3019 if (vsi->info.pvid) in i40e_vlan_stripping_enable()
3022 if ((vsi->info.valid_sections & in i40e_vlan_stripping_enable()
3024 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) in i40e_vlan_stripping_enable()
3027 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_enable()
3028 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_enable()
3031 ctxt.seid = vsi->seid; in i40e_vlan_stripping_enable()
3032 ctxt.info = vsi->info; in i40e_vlan_stripping_enable()
3033 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_enable()
3035 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_enable()
3038 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_enable()
3039 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_enable()
3047 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) in i40e_vlan_stripping_disable() argument
3053 if (vsi->info.pvid) in i40e_vlan_stripping_disable()
3056 if ((vsi->info.valid_sections & in i40e_vlan_stripping_disable()
3058 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == in i40e_vlan_stripping_disable()
3062 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_disable()
3063 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_disable()
3066 ctxt.seid = vsi->seid; in i40e_vlan_stripping_disable()
3067 ctxt.info = vsi->info; in i40e_vlan_stripping_disable()
3068 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_disable()
3070 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_disable()
3073 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_disable()
3074 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_disable()
3091 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) in i40e_add_vlan_all_mac() argument
3097 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vlan_all_mac()
3113 add_f = i40e_add_filter(vsi, f->macaddr, vid); in i40e_add_vlan_all_mac()
3115 dev_info(&vsi->back->pdev->dev, in i40e_add_vlan_all_mac()
3130 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) in i40e_vsi_add_vlan() argument
3134 if (vsi->info.pvid) in i40e_vsi_add_vlan()
3149 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3150 err = i40e_add_vlan_all_mac(vsi, vid); in i40e_vsi_add_vlan()
3151 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3158 i40e_service_event_schedule(vsi->back); in i40e_vsi_add_vlan()
3175 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) in i40e_rm_vlan_all_mac() argument
3181 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_rm_vlan_all_mac()
3183 __i40e_del_filter(vsi, f); in i40e_rm_vlan_all_mac()
3192 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) in i40e_vsi_kill_vlan() argument
3194 if (!vid || vsi->info.pvid) in i40e_vsi_kill_vlan()
3197 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3198 i40e_rm_vlan_all_mac(vsi, vid); in i40e_vsi_kill_vlan()
3199 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3204 i40e_service_event_schedule(vsi->back); in i40e_vsi_kill_vlan()
3219 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid() local
3225 ret = i40e_vsi_add_vlan(vsi, vid); in i40e_vlan_rx_add_vid()
3227 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid()
3242 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid_up() local
3246 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid_up()
3261 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_kill_vid() local
3267 i40e_vsi_kill_vlan(vsi, vid); in i40e_vlan_rx_kill_vid()
3269 clear_bit(vid, vsi->active_vlans); in i40e_vlan_rx_kill_vid()
3278 static void i40e_restore_vlan(struct i40e_vsi *vsi) in i40e_restore_vlan() argument
3282 if (!vsi->netdev) in i40e_restore_vlan()
3285 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in i40e_restore_vlan()
3286 i40e_vlan_stripping_enable(vsi); in i40e_restore_vlan()
3288 i40e_vlan_stripping_disable(vsi); in i40e_restore_vlan()
3290 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) in i40e_restore_vlan()
3291 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), in i40e_restore_vlan()
3300 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) in i40e_vsi_add_pvid() argument
3305 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vsi_add_pvid()
3306 vsi->info.pvid = cpu_to_le16(vid); in i40e_vsi_add_pvid()
3307 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | in i40e_vsi_add_pvid()
3311 ctxt.seid = vsi->seid; in i40e_vsi_add_pvid()
3312 ctxt.info = vsi->info; in i40e_vsi_add_pvid()
3313 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vsi_add_pvid()
3315 dev_info(&vsi->back->pdev->dev, in i40e_vsi_add_pvid()
3318 i40e_aq_str(&vsi->back->hw, in i40e_vsi_add_pvid()
3319 vsi->back->hw.aq.asq_last_status)); in i40e_vsi_add_pvid()
3332 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) in i40e_vsi_remove_pvid() argument
3334 vsi->info.pvid = 0; in i40e_vsi_remove_pvid()
3336 i40e_vlan_stripping_disable(vsi); in i40e_vsi_remove_pvid()
3349 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) in i40e_vsi_setup_tx_resources() argument
3353 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3354 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); in i40e_vsi_setup_tx_resources()
3356 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_setup_tx_resources()
3359 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3360 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3371 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) in i40e_vsi_free_tx_resources() argument
3375 if (vsi->tx_rings) { in i40e_vsi_free_tx_resources()
3376 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3377 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in i40e_vsi_free_tx_resources()
3378 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_vsi_free_tx_resources()
3381 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3382 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3383 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3384 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3398 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) in i40e_vsi_setup_rx_resources() argument
3402 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_rx_resources()
3403 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); in i40e_vsi_setup_rx_resources()
3413 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) in i40e_vsi_free_rx_resources() argument
3417 if (!vsi->rx_rings) in i40e_vsi_free_rx_resources()
3420 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_rx_resources()
3421 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in i40e_vsi_free_rx_resources()
3422 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_vsi_free_rx_resources()
3456 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); in i40e_xsk_pool()
3460 qid -= ring->vsi->alloc_queue_pairs; in i40e_xsk_pool()
3462 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) in i40e_xsk_pool()
3465 return xsk_get_pool_from_qid(ring->vsi->netdev, qid); in i40e_xsk_pool()
3476 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_tx_ring() local
3477 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_tx_ring()
3478 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_tx_ring()
3487 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { in i40e_configure_tx_ring()
3488 ring->atr_sample_rate = vsi->back->atr_sample_rate; in i40e_configure_tx_ring()
3503 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_configure_tx_ring()
3505 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); in i40e_configure_tx_ring()
3507 if (vsi->type != I40E_VSI_FDIR) in i40e_configure_tx_ring()
3528 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3535 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3544 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3561 if (vsi->type == I40E_VSI_VMDQ2) { in i40e_configure_tx_ring()
3563 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & in i40e_configure_tx_ring()
3600 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_rx_ring() local
3601 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; in i40e_configure_rx_ring()
3602 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_rx_ring()
3603 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_rx_ring()
3613 ring->rx_buf_len = vsi->rx_buf_len; in i40e_configure_rx_ring()
3616 if (ring->vsi->type != I40E_VSI_MAIN) in i40e_configure_rx_ring()
3643 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3672 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); in i40e_configure_rx_ring()
3687 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3696 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3703 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { in i40e_configure_rx_ring()
3705 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3730 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3745 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) in i40e_vsi_configure_tx() argument
3750 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3751 err = i40e_configure_tx_ring(vsi->tx_rings[i]); in i40e_vsi_configure_tx()
3753 if (err || !i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_configure_tx()
3756 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3757 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
3768 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) in i40e_vsi_configure_rx() argument
3773 vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); in i40e_vsi_configure_rx()
3774 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); in i40e_vsi_configure_rx()
3777 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && in i40e_vsi_configure_rx()
3778 vsi->netdev->mtu <= ETH_DATA_LEN) { in i40e_vsi_configure_rx()
3779 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3780 vsi->max_frame = vsi->rx_buf_len; in i40e_vsi_configure_rx()
3785 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_configure_rx()
3786 err = i40e_configure_rx_ring(vsi->rx_rings[i]); in i40e_vsi_configure_rx()
3795 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) in i40e_vsi_config_dcb_rings() argument
3801 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_config_dcb_rings()
3803 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_config_dcb_rings()
3804 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3805 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3813 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) in i40e_vsi_config_dcb_rings()
3816 qoffset = vsi->tc_config.tc_info[n].qoffset; in i40e_vsi_config_dcb_rings()
3817 qcount = vsi->tc_config.tc_info[n].qcount; in i40e_vsi_config_dcb_rings()
3819 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3820 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3831 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) in i40e_set_vsi_rx_mode() argument
3833 if (vsi->netdev) in i40e_set_vsi_rx_mode()
3834 i40e_set_rx_mode(vsi->netdev); in i40e_set_vsi_rx_mode()
3862 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) in i40e_fdir_filter_restore() argument
3865 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore()
3876 i40e_add_del_fdir(vsi, filter, true); in i40e_fdir_filter_restore()
3884 static int i40e_vsi_configure(struct i40e_vsi *vsi) in i40e_vsi_configure() argument
3888 i40e_set_vsi_rx_mode(vsi); in i40e_vsi_configure()
3889 i40e_restore_vlan(vsi); in i40e_vsi_configure()
3890 i40e_vsi_config_dcb_rings(vsi); in i40e_vsi_configure()
3891 err = i40e_vsi_configure_tx(vsi); in i40e_vsi_configure()
3893 err = i40e_vsi_configure_rx(vsi); in i40e_vsi_configure()
3902 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) in i40e_vsi_configure_msix() argument
3904 bool has_xdp = i40e_enabled_xdp_vsi(vsi); in i40e_vsi_configure_msix()
3905 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix()
3915 qp = vsi->base_queue; in i40e_vsi_configure_msix()
3916 vector = vsi->base_vector; in i40e_vsi_configure_msix()
3917 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { in i40e_vsi_configure_msix()
3918 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix()
3922 ITR_TO_REG(vsi->rx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3929 ITR_TO_REG(vsi->tx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3941 i40e_intrl_usec_to_reg(vsi->int_rate_limit)); in i40e_vsi_configure_msix()
3946 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; in i40e_vsi_configure_msix()
4032 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) in i40e_configure_msi_and_legacy() argument
4034 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; in i40e_configure_msi_and_legacy()
4035 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_configure_msi_and_legacy()
4036 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy()
4041 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
4045 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
4059 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_configure_msi_and_legacy()
4152 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) in i40e_vsi_request_irq_msix() argument
4154 int q_vectors = vsi->num_q_vectors; in i40e_vsi_request_irq_msix()
4155 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix()
4156 int base = vsi->base_vector; in i40e_vsi_request_irq_msix()
4164 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; in i40e_vsi_request_irq_msix()
4183 vsi->irq_handler, in i40e_vsi_request_irq_msix()
4208 vsi->irqs_ready = true; in i40e_vsi_request_irq_msix()
4217 free_irq(irq_num, &vsi->q_vectors[vector]); in i40e_vsi_request_irq_msix()
4226 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) in i40e_vsi_disable_irq() argument
4228 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq()
4230 int base = vsi->base_vector; in i40e_vsi_disable_irq()
4234 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_disable_irq()
4237 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4239 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4241 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4243 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4245 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_disable_irq()
4247 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4252 for (i = vsi->base_vector; in i40e_vsi_disable_irq()
4253 i < (vsi->num_q_vectors + vsi->base_vector); i++) in i40e_vsi_disable_irq()
4257 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_disable_irq()
4272 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) in i40e_vsi_enable_irq() argument
4274 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq()
4278 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_enable_irq()
4279 i40e_irq_dynamic_enable(vsi, i); in i40e_vsi_enable_irq()
4342 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr() local
4343 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_intr()
4456 struct i40e_vsi *vsi = tx_ring->vsi; in i40e_clean_fdir_tx_irq() local
4526 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) in i40e_clean_fdir_tx_irq()
4527 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); in i40e_clean_fdir_tx_irq()
4540 struct i40e_vsi *vsi; in i40e_fdir_clean_ring() local
4545 vsi = q_vector->tx.ring->vsi; in i40e_fdir_clean_ring()
4546 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); in i40e_fdir_clean_ring()
4557 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) in i40e_map_vector_to_qp() argument
4559 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_map_vector_to_qp()
4560 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; in i40e_map_vector_to_qp()
4561 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; in i40e_map_vector_to_qp()
4569 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_map_vector_to_qp()
4570 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; in i40e_map_vector_to_qp()
4593 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) in i40e_vsi_map_rings_to_vectors() argument
4595 int qp_remaining = vsi->num_queue_pairs; in i40e_vsi_map_rings_to_vectors()
4596 int q_vectors = vsi->num_q_vectors; in i40e_vsi_map_rings_to_vectors()
4609 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; in i40e_vsi_map_rings_to_vectors()
4614 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; in i40e_vsi_map_rings_to_vectors()
4622 i40e_map_vector_to_qp(vsi, v_start, qp_idx); in i40e_vsi_map_rings_to_vectors()
4634 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) in i40e_vsi_request_irq() argument
4636 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq()
4640 err = i40e_vsi_request_irq_msix(vsi, basename); in i40e_vsi_request_irq()
4665 struct i40e_vsi *vsi = np->vsi; in i40e_netpoll() local
4666 struct i40e_pf *pf = vsi->back; in i40e_netpoll()
4670 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_netpoll()
4674 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_netpoll()
4675 i40e_msix_clean_rings(0, vsi->q_vectors[i]); in i40e_netpoll()
4788 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi) in i40e_vsi_enable_tx() argument
4790 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx()
4793 pf_q = vsi->base_queue; in i40e_vsi_enable_tx()
4794 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_tx()
4795 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4801 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_enable_tx()
4804 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4805 pf_q + vsi->alloc_queue_pairs, in i40e_vsi_enable_tx()
4907 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi) in i40e_vsi_enable_rx() argument
4909 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx()
4912 pf_q = vsi->base_queue; in i40e_vsi_enable_rx()
4913 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_rx()
4918 vsi->seid, pf_q); in i40e_vsi_enable_rx()
4930 int i40e_vsi_start_rings(struct i40e_vsi *vsi) in i40e_vsi_start_rings() argument
4935 ret = i40e_vsi_enable_rx(vsi); in i40e_vsi_start_rings()
4938 ret = i40e_vsi_enable_tx(vsi); in i40e_vsi_start_rings()
4949 void i40e_vsi_stop_rings(struct i40e_vsi *vsi) in i40e_vsi_stop_rings() argument
4951 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings()
4955 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) in i40e_vsi_stop_rings()
4956 return i40e_vsi_stop_rings_no_wait(vsi); in i40e_vsi_stop_rings()
4958 q_end = vsi->base_queue + vsi->num_queue_pairs; in i40e_vsi_stop_rings()
4959 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) in i40e_vsi_stop_rings()
4962 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { in i40e_vsi_stop_rings()
4967 vsi->seid, pf_q); in i40e_vsi_stop_rings()
4971 pf_q = vsi->base_queue; in i40e_vsi_stop_rings()
4972 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) in i40e_vsi_stop_rings()
4975 i40e_vsi_wait_queues_disabled(vsi); in i40e_vsi_stop_rings()
4989 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) in i40e_vsi_stop_rings_no_wait() argument
4991 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait()
4994 pf_q = vsi->base_queue; in i40e_vsi_stop_rings_no_wait()
4995 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_stop_rings_no_wait()
5005 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) in i40e_vsi_free_irq() argument
5007 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq()
5009 int base = vsi->base_vector; in i40e_vsi_free_irq()
5014 if (!vsi->q_vectors) in i40e_vsi_free_irq()
5017 if (!vsi->irqs_ready) in i40e_vsi_free_irq()
5020 vsi->irqs_ready = false; in i40e_vsi_free_irq()
5021 for (i = 0; i < vsi->num_q_vectors; i++) { in i40e_vsi_free_irq()
5029 if (!vsi->q_vectors[i] || in i40e_vsi_free_irq()
5030 !vsi->q_vectors[i]->num_ringpairs) in i40e_vsi_free_irq()
5037 free_irq(irq_num, vsi->q_vectors[i]); in i40e_vsi_free_irq()
5129 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) in i40e_free_q_vector() argument
5131 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_free_q_vector()
5145 if (vsi->netdev) in i40e_free_q_vector()
5148 vsi->q_vectors[v_idx] = NULL; in i40e_free_q_vector()
5160 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) in i40e_vsi_free_q_vectors() argument
5164 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) in i40e_vsi_free_q_vectors()
5165 i40e_free_q_vector(vsi, v_idx); in i40e_vsi_free_q_vectors()
5206 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
5207 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
5215 static void i40e_napi_enable_all(struct i40e_vsi *vsi) in i40e_napi_enable_all() argument
5219 if (!vsi->netdev) in i40e_napi_enable_all()
5222 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_enable_all()
5223 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_enable_all()
5234 static void i40e_napi_disable_all(struct i40e_vsi *vsi) in i40e_napi_disable_all() argument
5238 if (!vsi->netdev) in i40e_napi_disable_all()
5241 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_disable_all()
5242 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_disable_all()
5253 static void i40e_vsi_close(struct i40e_vsi *vsi) in i40e_vsi_close() argument
5255 struct i40e_pf *pf = vsi->back; in i40e_vsi_close()
5256 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_close()
5257 i40e_down(vsi); in i40e_vsi_close()
5258 i40e_vsi_free_irq(vsi); in i40e_vsi_close()
5259 i40e_vsi_free_tx_resources(vsi); in i40e_vsi_close()
5260 i40e_vsi_free_rx_resources(vsi); in i40e_vsi_close()
5261 vsi->current_netdev_flags = 0; in i40e_vsi_close()
5271 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) in i40e_quiesce_vsi() argument
5273 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_quiesce_vsi()
5276 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); in i40e_quiesce_vsi()
5277 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_quiesce_vsi()
5278 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); in i40e_quiesce_vsi()
5280 i40e_vsi_close(vsi); in i40e_quiesce_vsi()
5287 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) in i40e_unquiesce_vsi() argument
5289 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) in i40e_unquiesce_vsi()
5292 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_unquiesce_vsi()
5293 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); in i40e_unquiesce_vsi()
5295 i40e_vsi_open(vsi); /* this clears the DOWN bit */ in i40e_unquiesce_vsi()
5307 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
5308 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
5321 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
5322 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
5332 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) in i40e_vsi_wait_queues_disabled() argument
5334 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled()
5337 pf_q = vsi->base_queue; in i40e_vsi_wait_queues_disabled()
5338 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_wait_queues_disabled()
5344 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5348 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_vsi_wait_queues_disabled()
5352 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5357 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5366 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5387 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5388 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5498 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc() local
5499 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; in i40e_mqprio_get_enabled_tc()
5521 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5578 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) in i40e_vsi_get_bw_info() argument
5582 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info()
5589 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); in i40e_vsi_get_bw_info()
5599 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, in i40e_vsi_get_bw_info()
5617 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); in i40e_vsi_get_bw_info()
5618 vsi->bw_max_quanta = bw_config.max_bw; in i40e_vsi_get_bw_info()
5622 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; in i40e_vsi_get_bw_info()
5623 vsi->bw_ets_limit_credits[i] = in i40e_vsi_get_bw_info()
5626 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); in i40e_vsi_get_bw_info()
5640 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, in i40e_vsi_configure_bw_alloc() argument
5644 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc()
5651 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_configure_bw_alloc()
5652 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); in i40e_vsi_configure_bw_alloc()
5656 vsi->seid); in i40e_vsi_configure_bw_alloc()
5664 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5673 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_vsi_configure_bw_alloc()
5684 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) in i40e_vsi_config_netdev_tc() argument
5686 struct net_device *netdev = vsi->netdev; in i40e_vsi_config_netdev_tc()
5687 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc()
5702 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) in i40e_vsi_config_netdev_tc()
5714 if (vsi->tc_config.enabled_tc & BIT(i)) in i40e_vsi_config_netdev_tc()
5716 vsi->tc_config.tc_info[i].netdev_tc, in i40e_vsi_config_netdev_tc()
5717 vsi->tc_config.tc_info[i].qcount, in i40e_vsi_config_netdev_tc()
5718 vsi->tc_config.tc_info[i].qoffset); in i40e_vsi_config_netdev_tc()
5729 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; in i40e_vsi_config_netdev_tc()
5739 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, in i40e_vsi_update_queue_map() argument
5746 vsi->info.mapping_flags = ctxt->info.mapping_flags; in i40e_vsi_update_queue_map()
5747 memcpy(&vsi->info.queue_mapping, in i40e_vsi_update_queue_map()
5748 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); in i40e_vsi_update_queue_map()
5749 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, in i40e_vsi_update_queue_map()
5750 sizeof(vsi->info.tc_mapping)); in i40e_vsi_update_queue_map()
5758 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) in i40e_update_adq_vsi_queues() argument
5765 if (!vsi) in i40e_update_adq_vsi_queues()
5767 pf = vsi->back; in i40e_update_adq_vsi_queues()
5770 ctxt.seid = vsi->seid; in i40e_update_adq_vsi_queues()
5772 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; in i40e_update_adq_vsi_queues()
5773 ctxt.uplink_seid = vsi->uplink_seid; in i40e_update_adq_vsi_queues()
5776 ctxt.info = vsi->info; in i40e_update_adq_vsi_queues()
5778 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, in i40e_update_adq_vsi_queues()
5780 if (vsi->reconfig_rss) { in i40e_update_adq_vsi_queues()
5781 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_update_adq_vsi_queues()
5782 vsi->num_queue_pairs); in i40e_update_adq_vsi_queues()
5783 ret = i40e_vsi_config_rss(vsi); in i40e_update_adq_vsi_queues()
5788 vsi->reconfig_rss = false; in i40e_update_adq_vsi_queues()
5799 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_update_adq_vsi_queues()
5800 vsi->info.valid_sections = 0; in i40e_update_adq_vsi_queues()
5818 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) in i40e_vsi_config_tc() argument
5821 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc()
5828 if (vsi->tc_config.enabled_tc == enabled_tc && in i40e_vsi_config_tc()
5829 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in i40e_vsi_config_tc()
5838 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); in i40e_vsi_config_tc()
5844 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5845 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, in i40e_vsi_config_tc()
5867 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); in i40e_vsi_config_tc()
5871 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5877 ctxt.seid = vsi->seid; in i40e_vsi_config_tc()
5878 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_vsi_config_tc()
5880 ctxt.uplink_seid = vsi->uplink_seid; in i40e_vsi_config_tc()
5881 ctxt.info = vsi->info; in i40e_vsi_config_tc()
5883 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); in i40e_vsi_config_tc()
5887 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); in i40e_vsi_config_tc()
5893 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { in i40e_vsi_config_tc()
5894 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, in i40e_vsi_config_tc()
5895 vsi->num_queue_pairs); in i40e_vsi_config_tc()
5896 ret = i40e_vsi_config_rss(vsi); in i40e_vsi_config_tc()
5898 dev_info(&vsi->back->pdev->dev, in i40e_vsi_config_tc()
5902 vsi->reconfig_rss = false; in i40e_vsi_config_tc()
5904 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_vsi_config_tc()
5922 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_vsi_config_tc()
5923 vsi->info.valid_sections = 0; in i40e_vsi_config_tc()
5926 ret = i40e_vsi_get_bw_info(vsi); in i40e_vsi_config_tc()
5936 i40e_vsi_config_netdev_tc(vsi, enabled_tc); in i40e_vsi_config_tc()
5946 static int i40e_get_link_speed(struct i40e_vsi *vsi) in i40e_get_link_speed() argument
5948 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed()
5973 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) in i40e_bw_bytes_to_mbits() argument
5976 dev_warn(&vsi->back->pdev->dev, in i40e_bw_bytes_to_mbits()
5994 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) in i40e_set_bw_limit() argument
5996 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit()
6001 speed = i40e_get_link_speed(vsi); in i40e_set_bw_limit()
6033 static void i40e_remove_queue_channels(struct i40e_vsi *vsi) in i40e_remove_queue_channels() argument
6038 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels()
6045 vsi->current_rss_size = 0; in i40e_remove_queue_channels()
6048 if (list_empty(&vsi->ch_list)) in i40e_remove_queue_channels()
6051 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_remove_queue_channels()
6066 tx_ring = vsi->tx_rings[pf_q]; in i40e_remove_queue_channels()
6069 rx_ring = vsi->rx_rings[pf_q]; in i40e_remove_queue_channels()
6074 ret = i40e_set_bw_limit(vsi, ch->seid, 0); in i40e_remove_queue_channels()
6076 dev_info(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6088 ret = i40e_add_del_cloud_filter_big_buf(vsi, in i40e_remove_queue_channels()
6092 ret = i40e_add_del_cloud_filter(vsi, cfilter, in i40e_remove_queue_channels()
6104 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_remove_queue_channels()
6107 dev_err(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6112 INIT_LIST_HEAD(&vsi->ch_list); in i40e_remove_queue_channels()
6122 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi) in i40e_get_max_queues_for_channel() argument
6127 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_get_max_queues_for_channel()
6149 struct i40e_vsi *vsi, bool *reconfig_rss) in i40e_validate_num_queues() argument
6157 if (vsi->current_rss_size) { in i40e_validate_num_queues()
6158 if (num_queues > vsi->current_rss_size) { in i40e_validate_num_queues()
6161 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6163 } else if ((num_queues < vsi->current_rss_size) && in i40e_validate_num_queues()
6167 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6178 max_ch_queues = i40e_get_max_queues_for_channel(vsi); in i40e_validate_num_queues()
6198 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) in i40e_vsi_reconfig_rss() argument
6200 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss()
6207 if (!vsi->rss_size) in i40e_vsi_reconfig_rss()
6210 if (rss_size > vsi->rss_size) in i40e_vsi_reconfig_rss()
6213 local_rss_size = min_t(int, vsi->rss_size, rss_size); in i40e_vsi_reconfig_rss()
6214 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_reconfig_rss()
6219 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
6224 if (vsi->rss_hkey_user) in i40e_vsi_reconfig_rss()
6225 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_reconfig_rss()
6229 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_reconfig_rss()
6241 if (!vsi->orig_rss_size) in i40e_vsi_reconfig_rss()
6242 vsi->orig_rss_size = vsi->rss_size; in i40e_vsi_reconfig_rss()
6243 vsi->current_rss_size = local_rss_size; in i40e_vsi_reconfig_rss()
6359 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, in i40e_channel_config_bw() argument
6371 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, in i40e_channel_config_bw()
6374 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_bw()
6376 vsi->back->hw.aq.asq_last_status, ch->seid); in i40e_channel_config_bw()
6396 struct i40e_vsi *vsi, in i40e_channel_config_tx_ring() argument
6410 ret = i40e_channel_config_bw(vsi, ch, bw_share); in i40e_channel_config_tx_ring()
6412 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_tx_ring()
6427 tx_ring = vsi->tx_rings[pf_q]; in i40e_channel_config_tx_ring()
6431 rx_ring = vsi->rx_rings[pf_q]; in i40e_channel_config_tx_ring()
6450 struct i40e_vsi *vsi, in i40e_setup_hw_channel() argument
6457 ch->base_queue = vsi->next_base_queue; in i40e_setup_hw_channel()
6473 ret = i40e_channel_config_tx_ring(pf, vsi, ch); in i40e_setup_hw_channel()
6482 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; in i40e_setup_hw_channel()
6487 vsi->next_base_queue); in i40e_setup_hw_channel()
6500 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, in i40e_setup_channel() argument
6507 if (vsi->type == I40E_VSI_MAIN) { in i40e_setup_channel()
6511 vsi->type); in i40e_setup_channel()
6516 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6519 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); in i40e_setup_channel()
6535 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) in i40e_validate_and_set_switch_mode() argument
6538 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode()
6593 int i40e_create_queue_channel(struct i40e_vsi *vsi, in i40e_create_queue_channel() argument
6596 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel()
6610 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6625 if (vsi->type == I40E_VSI_MAIN) { in i40e_create_queue_channel()
6639 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { in i40e_create_queue_channel()
6642 vsi->cnt_q_avail, ch->num_queue_pairs); in i40e_create_queue_channel()
6647 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { in i40e_create_queue_channel()
6648 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); in i40e_create_queue_channel()
6657 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_create_queue_channel()
6670 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) in i40e_create_queue_channel()
6682 ch->parent_vsi = vsi; in i40e_create_queue_channel()
6685 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_create_queue_channel()
6696 static int i40e_configure_queue_channels(struct i40e_vsi *vsi) in i40e_configure_queue_channels() argument
6703 vsi->tc_seid_map[0] = vsi->seid; in i40e_configure_queue_channels()
6705 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_configure_queue_channels()
6714 vsi->tc_config.tc_info[i].qcount; in i40e_configure_queue_channels()
6716 vsi->tc_config.tc_info[i].qoffset; in i40e_configure_queue_channels()
6721 max_rate = vsi->mqprio_qopt.max_rate[i]; in i40e_configure_queue_channels()
6725 list_add_tail(&ch->list, &vsi->ch_list); in i40e_configure_queue_channels()
6727 ret = i40e_create_queue_channel(vsi, ch); in i40e_configure_queue_channels()
6729 dev_err(&vsi->back->pdev->dev, in i40e_configure_queue_channels()
6734 vsi->tc_seid_map[i] = ch->seid; in i40e_configure_queue_channels()
6739 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); in i40e_configure_queue_channels()
6743 i40e_remove_queue_channels(vsi); in i40e_configure_queue_channels()
6831 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6842 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6846 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6850 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6851 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6852 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
7076 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; in i40e_hw_dcb_config()
7271 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) in i40e_print_link_message() argument
7274 struct i40e_pf *pf = vsi->back; in i40e_print_link_message()
7286 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) in i40e_print_link_message()
7288 vsi->current_isup = isup; in i40e_print_link_message()
7289 vsi->current_speed = new_speed; in i40e_print_link_message()
7291 netdev_info(vsi->netdev, "NIC Link is Down\n"); in i40e_print_link_message()
7301 netdev_warn(vsi->netdev, in i40e_print_link_message()
7366 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7368 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7374 netdev_info(vsi->netdev, in i40e_print_link_message()
7393 netdev_info(vsi->netdev, in i40e_print_link_message()
7397 netdev_info(vsi->netdev, in i40e_print_link_message()
7408 static int i40e_up_complete(struct i40e_vsi *vsi) in i40e_up_complete() argument
7410 struct i40e_pf *pf = vsi->back; in i40e_up_complete()
7414 i40e_vsi_configure_msix(vsi); in i40e_up_complete()
7416 i40e_configure_msi_and_legacy(vsi); in i40e_up_complete()
7419 err = i40e_vsi_start_rings(vsi); in i40e_up_complete()
7423 clear_bit(__I40E_VSI_DOWN, vsi->state); in i40e_up_complete()
7424 i40e_napi_enable_all(vsi); in i40e_up_complete()
7425 i40e_vsi_enable_irq(vsi); in i40e_up_complete()
7428 (vsi->netdev)) { in i40e_up_complete()
7429 i40e_print_link_message(vsi, true); in i40e_up_complete()
7430 netif_tx_start_all_queues(vsi->netdev); in i40e_up_complete()
7431 netif_carrier_on(vsi->netdev); in i40e_up_complete()
7435 if (vsi->type == I40E_VSI_FDIR) { in i40e_up_complete()
7439 i40e_fdir_filter_restore(vsi); in i40e_up_complete()
7458 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) in i40e_vsi_reinit_locked() argument
7460 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked()
7464 i40e_down(vsi); in i40e_vsi_reinit_locked()
7466 i40e_up(vsi); in i40e_vsi_reinit_locked()
7578 int i40e_up(struct i40e_vsi *vsi) in i40e_up() argument
7582 if (vsi->type == I40E_VSI_MAIN && in i40e_up()
7583 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || in i40e_up()
7584 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) in i40e_up()
7585 i40e_force_link_state(vsi->back, true); in i40e_up()
7587 err = i40e_vsi_configure(vsi); in i40e_up()
7589 err = i40e_up_complete(vsi); in i40e_up()
7598 void i40e_down(struct i40e_vsi *vsi) in i40e_down() argument
7605 if (vsi->netdev) { in i40e_down()
7606 netif_carrier_off(vsi->netdev); in i40e_down()
7607 netif_tx_disable(vsi->netdev); in i40e_down()
7609 i40e_vsi_disable_irq(vsi); in i40e_down()
7610 i40e_vsi_stop_rings(vsi); in i40e_down()
7611 if (vsi->type == I40E_VSI_MAIN && in i40e_down()
7612 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || in i40e_down()
7613 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) in i40e_down()
7614 i40e_force_link_state(vsi->back, false); in i40e_down()
7615 i40e_napi_disable_all(vsi); in i40e_down()
7617 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_down()
7618 i40e_clean_tx_ring(vsi->tx_rings[i]); in i40e_down()
7619 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_down()
7624 i40e_clean_tx_ring(vsi->xdp_rings[i]); in i40e_down()
7626 i40e_clean_rx_ring(vsi->rx_rings[i]); in i40e_down()
7636 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, in i40e_validate_mqprio_qopt() argument
7651 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7665 if (vsi->num_queue_pairs < in i40e_validate_mqprio_qopt()
7667 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7671 if (sum_max_rate > i40e_get_link_speed(vsi)) { in i40e_validate_mqprio_qopt()
7672 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7683 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) in i40e_vsi_set_default_tc_config() argument
7689 vsi->tc_config.numtc = 1; in i40e_vsi_set_default_tc_config()
7690 vsi->tc_config.enabled_tc = 1; in i40e_vsi_set_default_tc_config()
7691 qcount = min_t(int, vsi->alloc_queue_pairs, in i40e_vsi_set_default_tc_config()
7692 i40e_pf_get_max_q_per_tc(vsi->back)); in i40e_vsi_set_default_tc_config()
7697 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_set_default_tc_config()
7699 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_set_default_tc_config()
7701 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_set_default_tc_config()
7702 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_set_default_tc_config()
7766 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) in i40e_reset_ch_rings() argument
7774 tx_ring = vsi->tx_rings[pf_q]; in i40e_reset_ch_rings()
7776 rx_ring = vsi->rx_rings[pf_q]; in i40e_reset_ch_rings()
7789 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) in i40e_free_macvlan_channels() argument
7794 if (list_empty(&vsi->macvlan_list)) in i40e_free_macvlan_channels()
7797 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_free_macvlan_channels()
7801 i40e_reset_ch_rings(vsi, ch); in i40e_free_macvlan_channels()
7802 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_free_macvlan_channels()
7803 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); in i40e_free_macvlan_channels()
7817 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_free_macvlan_channels()
7820 dev_err(&vsi->back->pdev->dev, in i40e_free_macvlan_channels()
7825 vsi->macvlan_cnt = 0; in i40e_free_macvlan_channels()
7834 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, in i40e_fwd_ring_up() argument
7839 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up()
7843 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_ring_up()
7848 netdev_bind_sb_channel_queue(vsi->netdev, vdev, in i40e_fwd_ring_up()
7859 tx_ring = vsi->tx_rings[pf_q]; in i40e_fwd_ring_up()
7863 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7889 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7909 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, in i40e_setup_macvlans() argument
7912 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans()
7920 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) in i40e_setup_macvlans()
7923 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); in i40e_setup_macvlans()
7935 ctxt.seid = vsi->seid; in i40e_setup_macvlans()
7936 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_setup_macvlans()
7938 ctxt.uplink_seid = vsi->uplink_seid; in i40e_setup_macvlans()
7939 ctxt.info = vsi->info; in i40e_setup_macvlans()
7942 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_setup_macvlans()
7946 vsi->rss_size = max_t(u16, num_qps, qcnt); in i40e_setup_macvlans()
7947 ret = i40e_vsi_config_rss(vsi); in i40e_setup_macvlans()
7951 vsi->rss_size); in i40e_setup_macvlans()
7954 vsi->reconfig_rss = true; in i40e_setup_macvlans()
7955 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_macvlans()
7956 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); in i40e_setup_macvlans()
7957 vsi->next_base_queue = num_qps; in i40e_setup_macvlans()
7958 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; in i40e_setup_macvlans()
7972 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_setup_macvlans()
7973 vsi->info.valid_sections = 0; in i40e_setup_macvlans()
7976 INIT_LIST_HEAD(&vsi->macvlan_list); in i40e_setup_macvlans()
7985 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_setup_macvlans()
7990 ch->parent_vsi = vsi; in i40e_setup_macvlans()
7991 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_setup_macvlans()
7992 vsi->macvlan_cnt++; in i40e_setup_macvlans()
7993 list_add_tail(&ch->list, &vsi->macvlan_list); in i40e_setup_macvlans()
8000 i40e_free_macvlan_channels(vsi); in i40e_setup_macvlans()
8014 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_add() local
8015 struct i40e_pf *pf = vsi->back; in i40e_fwd_add()
8038 if (!vsi->macvlan_cnt) { in i40e_fwd_add()
8040 set_bit(0, vsi->fwd_bitmask); in i40e_fwd_add()
8073 i40e_quiesce_vsi(vsi); in i40e_fwd_add()
8076 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, in i40e_fwd_add()
8082 i40e_unquiesce_vsi(vsi); in i40e_fwd_add()
8084 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, in i40e_fwd_add()
8085 vsi->macvlan_cnt); in i40e_fwd_add()
8094 set_bit(avail_macvlan, vsi->fwd_bitmask); in i40e_fwd_add()
8103 ret = i40e_fwd_ring_up(vsi, vdev, fwd); in i40e_fwd_add()
8120 static void i40e_del_all_macvlans(struct i40e_vsi *vsi) in i40e_del_all_macvlans() argument
8123 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans()
8127 if (list_empty(&vsi->macvlan_list)) in i40e_del_all_macvlans()
8130 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_del_all_macvlans()
8137 i40e_reset_ch_rings(vsi, ch); in i40e_del_all_macvlans()
8138 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_del_all_macvlans()
8139 netdev_unbind_sb_channel(vsi->netdev, in i40e_del_all_macvlans()
8159 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_del() local
8160 struct i40e_pf *pf = vsi->back; in i40e_fwd_del()
8165 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_del()
8174 i40e_reset_ch_rings(vsi, ch); in i40e_fwd_del()
8175 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_fwd_del()
8200 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc() local
8201 struct i40e_pf *pf = vsi->back; in i40e_setup_tc()
8209 old_queue_pairs = vsi->num_queue_pairs; in i40e_setup_tc()
8215 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in i40e_setup_tc()
8251 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); in i40e_setup_tc()
8254 memcpy(&vsi->mqprio_qopt, mqprio_qopt, in i40e_setup_tc()
8269 if (enabled_tc == vsi->tc_config.enabled_tc && in i40e_setup_tc()
8274 i40e_quiesce_vsi(vsi); in i40e_setup_tc()
8277 i40e_remove_queue_channels(vsi); in i40e_setup_tc()
8280 ret = i40e_vsi_config_tc(vsi, enabled_tc); in i40e_setup_tc()
8283 vsi->seid); in i40e_setup_tc()
8287 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { in i40e_setup_tc()
8290 vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8296 dev_info(&vsi->back->pdev->dev, in i40e_setup_tc()
8298 vsi->seid, vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8301 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_setup_tc()
8302 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, in i40e_setup_tc()
8303 vsi->mqprio_qopt.max_rate[0]); in i40e_setup_tc()
8305 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_setup_tc()
8310 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_tc()
8314 vsi->seid); in i40e_setup_tc()
8320 ret = i40e_configure_queue_channels(vsi); in i40e_setup_tc()
8322 vsi->num_queue_pairs = old_queue_pairs; in i40e_setup_tc()
8333 i40e_vsi_set_default_tc_config(vsi); in i40e_setup_tc()
8338 i40e_unquiesce_vsi(vsi); in i40e_setup_tc()
8394 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, in i40e_add_del_cloud_filter() argument
8398 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter()
8463 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, in i40e_add_del_cloud_filter_big_buf() argument
8468 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf()
8537 ret = i40e_validate_and_set_switch_mode(vsi); in i40e_add_del_cloud_filter_big_buf()
8571 static int i40e_parse_cls_flower(struct i40e_vsi *vsi, in i40e_parse_cls_flower() argument
8578 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower()
8778 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, in i40e_handle_tclass() argument
8785 filter->seid = vsi->seid; in i40e_handle_tclass()
8787 } else if (vsi->tc_config.enabled_tc & BIT(tc)) { in i40e_handle_tclass()
8789 dev_err(&vsi->back->pdev->dev, in i40e_handle_tclass()
8793 if (list_empty(&vsi->ch_list)) in i40e_handle_tclass()
8795 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, in i40e_handle_tclass()
8797 if (ch->seid == vsi->tc_seid_map[tc]) in i40e_handle_tclass()
8802 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); in i40e_handle_tclass()
8812 static int i40e_configure_clsflower(struct i40e_vsi *vsi, in i40e_configure_clsflower() argument
8815 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); in i40e_configure_clsflower()
8817 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower()
8821 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); in i40e_configure_clsflower()
8836 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8841 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_configure_clsflower()
8842 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8844 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_configure_clsflower()
8845 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_configure_clsflower()
8854 err = i40e_parse_cls_flower(vsi, cls_flower, filter); in i40e_configure_clsflower()
8858 err = i40e_handle_tclass(vsi, tc, filter); in i40e_configure_clsflower()
8864 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true); in i40e_configure_clsflower()
8866 err = i40e_add_del_cloud_filter(vsi, filter, true); in i40e_configure_clsflower()
8893 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, in i40e_find_cloud_filter() argument
8900 &vsi->back->cloud_filter_list, cloud_node) in i40e_find_cloud_filter()
8912 static int i40e_delete_clsflower(struct i40e_vsi *vsi, in i40e_delete_clsflower() argument
8916 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower()
8919 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); in i40e_delete_clsflower()
8927 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false); in i40e_delete_clsflower()
8929 err = i40e_add_del_cloud_filter(vsi, filter, false); in i40e_delete_clsflower()
8958 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc_cls_flower() local
8962 return i40e_configure_clsflower(vsi, cls_flower); in i40e_setup_tc_cls_flower()
8964 return i40e_delete_clsflower(vsi, cls_flower); in i40e_setup_tc_cls_flower()
8977 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) in i40e_setup_tc_block_cb()
9024 struct i40e_vsi *vsi = np->vsi; in i40e_open() local
9025 struct i40e_pf *pf = vsi->back; in i40e_open()
9038 err = i40e_vsi_open(vsi); in i40e_open()
9062 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) in i40e_netif_set_realnum_tx_rx_queues() argument
9066 ret = netif_set_real_num_rx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9067 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9071 return netif_set_real_num_tx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9072 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9085 int i40e_vsi_open(struct i40e_vsi *vsi) in i40e_vsi_open() argument
9087 struct i40e_pf *pf = vsi->back; in i40e_vsi_open()
9092 err = i40e_vsi_setup_tx_resources(vsi); in i40e_vsi_open()
9095 err = i40e_vsi_setup_rx_resources(vsi); in i40e_vsi_open()
9099 err = i40e_vsi_configure(vsi); in i40e_vsi_open()
9103 if (vsi->netdev) { in i40e_vsi_open()
9105 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
9106 err = i40e_vsi_request_irq(vsi, int_name); in i40e_vsi_open()
9111 err = i40e_netif_set_realnum_tx_rx_queues(vsi); in i40e_vsi_open()
9115 } else if (vsi->type == I40E_VSI_FDIR) { in i40e_vsi_open()
9119 err = i40e_vsi_request_irq(vsi, int_name); in i40e_vsi_open()
9128 err = i40e_up_complete(vsi); in i40e_vsi_open()
9135 i40e_down(vsi); in i40e_vsi_open()
9137 i40e_vsi_free_irq(vsi); in i40e_vsi_open()
9139 i40e_vsi_free_rx_resources(vsi); in i40e_vsi_open()
9141 i40e_vsi_free_tx_resources(vsi); in i40e_vsi_open()
9142 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
9267 struct i40e_vsi *vsi = np->vsi; in i40e_close() local
9269 i40e_vsi_close(vsi); in i40e_close()
9349 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset() local
9351 if (vsi != NULL && in i40e_do_reset()
9353 vsi->state)) in i40e_do_reset()
9354 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
9362 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset() local
9364 if (vsi != NULL && in i40e_do_reset()
9366 vsi->state)) { in i40e_do_reset()
9367 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_do_reset()
9368 i40e_down(vsi); in i40e_do_reset()
9842 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
9883 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) in i40e_vsi_link_event() argument
9885 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_link_event()
9888 switch (vsi->type) { in i40e_vsi_link_event()
9890 if (!vsi->netdev || !vsi->netdev_registered) in i40e_vsi_link_event()
9894 netif_carrier_on(vsi->netdev); in i40e_vsi_link_event()
9895 netif_tx_wake_all_queues(vsi->netdev); in i40e_vsi_link_event()
9897 netif_carrier_off(vsi->netdev); in i40e_vsi_link_event()
9898 netif_tx_stop_all_queues(vsi->netdev); in i40e_vsi_link_event()
9934 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9935 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9944 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event() local
9975 (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_link_event()
9976 new_link == netif_carrier_ok(vsi->netdev))) in i40e_link_event()
9979 i40e_print_link_message(vsi, new_link); in i40e_link_event()
9987 i40e_vsi_link_event(vsi, new_link); in i40e_link_event()
10048 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
10049 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
10314 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb() local
10333 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10350 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb() local
10369 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10417 if (pf->vsi[v] && in i40e_reconstitute_veb()
10418 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
10419 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
10420 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10430 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
10431 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
10454 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
10457 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10458 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb() local
10460 vsi->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10461 ret = i40e_add_vsi(vsi); in i40e_reconstitute_veb()
10468 i40e_vsi_reset_stats(vsi); in i40e_reconstitute_veb()
10566 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10574 struct i40e_vsi *vsi; in i40e_fdir_sb_setup() local
10595 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_sb_setup()
10598 if (!vsi) { in i40e_fdir_sb_setup()
10599 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, in i40e_fdir_sb_setup()
10600 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10601 if (!vsi) { in i40e_fdir_sb_setup()
10609 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); in i40e_fdir_sb_setup()
10618 struct i40e_vsi *vsi; in i40e_fdir_teardown() local
10621 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_teardown()
10622 if (vsi) in i40e_fdir_teardown()
10623 i40e_vsi_release(vsi); in i40e_fdir_teardown()
10634 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) in i40e_rebuild_cloud_filters() argument
10637 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters()
10648 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, in i40e_rebuild_cloud_filters()
10651 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); in i40e_rebuild_cloud_filters()
10671 static int i40e_rebuild_channels(struct i40e_vsi *vsi) in i40e_rebuild_channels() argument
10676 if (list_empty(&vsi->ch_list)) in i40e_rebuild_channels()
10679 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_rebuild_channels()
10683 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); in i40e_rebuild_channels()
10685 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10687 vsi->uplink_seid); in i40e_rebuild_channels()
10691 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); in i40e_rebuild_channels()
10693 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10699 vsi->next_base_queue = vsi->next_base_queue + in i40e_rebuild_channels()
10704 if (i40e_set_bw_limit(vsi, ch->seid, in i40e_rebuild_channels()
10709 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10715 ret = i40e_rebuild_cloud_filters(vsi, ch->seid); in i40e_rebuild_channels()
10717 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10730 static void i40e_clean_xps_state(struct i40e_vsi *vsi) in i40e_clean_xps_state() argument
10734 if (vsi->tx_rings) in i40e_clean_xps_state()
10735 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_clean_xps_state()
10736 if (vsi->tx_rings[i]) in i40e_clean_xps_state()
10738 vsi->tx_rings[i]->state); in i40e_clean_xps_state()
10765 if (pf->vsi[v]) { in i40e_prep_for_reset()
10766 i40e_clean_xps_state(pf->vsi[v]); in i40e_prep_for_reset()
10767 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10878 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild() local
10886 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
11019 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
11043 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
11054 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11057 ret = i40e_add_vsi(vsi); in i40e_rebuild()
11065 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_rebuild()
11066 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, in i40e_rebuild()
11067 vsi->mqprio_qopt.max_rate[0]); in i40e_rebuild()
11070 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_rebuild()
11076 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild()
11080 vsi->seid); in i40e_rebuild()
11083 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); in i40e_rebuild()
11090 ret = i40e_rebuild_channels(vsi); in i40e_rebuild()
11323 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
11332 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
11339 pf->vsi[pf->lan_vsi]); in i40e_service_task()
11380 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) in i40e_set_num_rings_in_vsi() argument
11382 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi()
11384 switch (vsi->type) { in i40e_set_num_rings_in_vsi()
11386 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
11387 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11388 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11390 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11391 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11394 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
11396 vsi->num_q_vectors = 1; in i40e_set_num_rings_in_vsi()
11401 vsi->alloc_queue_pairs = 1; in i40e_set_num_rings_in_vsi()
11402 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11404 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11406 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
11410 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
11411 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11412 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11414 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11415 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11417 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11421 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11422 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11423 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11425 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11426 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11436 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11437 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11451 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) in i40e_vsi_alloc_arrays() argument
11458 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * in i40e_vsi_alloc_arrays()
11459 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2); in i40e_vsi_alloc_arrays()
11460 vsi->tx_rings = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11461 if (!vsi->tx_rings) in i40e_vsi_alloc_arrays()
11463 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11464 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_vsi_alloc_arrays()
11465 vsi->xdp_rings = next_rings; in i40e_vsi_alloc_arrays()
11466 next_rings += vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11468 vsi->rx_rings = next_rings; in i40e_vsi_alloc_arrays()
11472 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; in i40e_vsi_alloc_arrays()
11473 vsi->q_vectors = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11474 if (!vsi->q_vectors) { in i40e_vsi_alloc_arrays()
11482 kfree(vsi->tx_rings); in i40e_vsi_alloc_arrays()
11497 struct i40e_vsi *vsi; in i40e_vsi_mem_alloc() local
11511 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11515 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11519 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11527 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); in i40e_vsi_mem_alloc()
11528 if (!vsi) { in i40e_vsi_mem_alloc()
11532 vsi->type = type; in i40e_vsi_mem_alloc()
11533 vsi->back = pf; in i40e_vsi_mem_alloc()
11534 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_vsi_mem_alloc()
11535 vsi->flags = 0; in i40e_vsi_mem_alloc()
11536 vsi->idx = vsi_idx; in i40e_vsi_mem_alloc()
11537 vsi->int_rate_limit = 0; in i40e_vsi_mem_alloc()
11538 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? in i40e_vsi_mem_alloc()
11540 vsi->netdev_registered = false; in i40e_vsi_mem_alloc()
11541 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; in i40e_vsi_mem_alloc()
11542 hash_init(vsi->mac_filter_hash); in i40e_vsi_mem_alloc()
11543 vsi->irqs_ready = false; in i40e_vsi_mem_alloc()
11546 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11547 if (!vsi->af_xdp_zc_qps) in i40e_vsi_mem_alloc()
11551 ret = i40e_set_num_rings_in_vsi(vsi); in i40e_vsi_mem_alloc()
11555 ret = i40e_vsi_alloc_arrays(vsi, true); in i40e_vsi_mem_alloc()
11560 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); in i40e_vsi_mem_alloc()
11563 spin_lock_init(&vsi->mac_filter_hash_lock); in i40e_vsi_mem_alloc()
11564 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11569 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_mem_alloc()
11571 kfree(vsi); in i40e_vsi_mem_alloc()
11585 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) in i40e_vsi_free_arrays() argument
11589 kfree(vsi->q_vectors); in i40e_vsi_free_arrays()
11590 vsi->q_vectors = NULL; in i40e_vsi_free_arrays()
11592 kfree(vsi->tx_rings); in i40e_vsi_free_arrays()
11593 vsi->tx_rings = NULL; in i40e_vsi_free_arrays()
11594 vsi->rx_rings = NULL; in i40e_vsi_free_arrays()
11595 vsi->xdp_rings = NULL; in i40e_vsi_free_arrays()
11603 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) in i40e_clear_rss_config_user() argument
11605 if (!vsi) in i40e_clear_rss_config_user()
11608 kfree(vsi->rss_hkey_user); in i40e_clear_rss_config_user()
11609 vsi->rss_hkey_user = NULL; in i40e_clear_rss_config_user()
11611 kfree(vsi->rss_lut_user); in i40e_clear_rss_config_user()
11612 vsi->rss_lut_user = NULL; in i40e_clear_rss_config_user()
11619 static int i40e_vsi_clear(struct i40e_vsi *vsi) in i40e_vsi_clear() argument
11623 if (!vsi) in i40e_vsi_clear()
11626 if (!vsi->back) in i40e_vsi_clear()
11628 pf = vsi->back; in i40e_vsi_clear()
11631 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11633 vsi->idx, vsi->idx, vsi->type); in i40e_vsi_clear()
11637 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11640 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11641 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11642 vsi->idx, vsi->type); in i40e_vsi_clear()
11647 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11648 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11650 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_clear()
11651 i40e_vsi_free_arrays(vsi, true); in i40e_vsi_clear()
11652 i40e_clear_rss_config_user(vsi); in i40e_vsi_clear()
11654 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11655 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11656 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11661 kfree(vsi); in i40e_vsi_clear()
11670 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) in i40e_vsi_clear_rings() argument
11674 if (vsi->tx_rings && vsi->tx_rings[0]) { in i40e_vsi_clear_rings()
11675 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_vsi_clear_rings()
11676 kfree_rcu(vsi->tx_rings[i], rcu); in i40e_vsi_clear_rings()
11677 WRITE_ONCE(vsi->tx_rings[i], NULL); in i40e_vsi_clear_rings()
11678 WRITE_ONCE(vsi->rx_rings[i], NULL); in i40e_vsi_clear_rings()
11679 if (vsi->xdp_rings) in i40e_vsi_clear_rings()
11680 WRITE_ONCE(vsi->xdp_rings[i], NULL); in i40e_vsi_clear_rings()
11689 static int i40e_alloc_rings(struct i40e_vsi *vsi) in i40e_alloc_rings() argument
11691 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2; in i40e_alloc_rings()
11692 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings()
11696 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_alloc_rings()
11703 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11705 ring->vsi = vsi; in i40e_alloc_rings()
11706 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11708 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11711 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) in i40e_alloc_rings()
11714 WRITE_ONCE(vsi->tx_rings[i], ring++); in i40e_alloc_rings()
11716 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_alloc_rings()
11719 ring->queue_index = vsi->alloc_queue_pairs + i; in i40e_alloc_rings()
11720 ring->reg_idx = vsi->base_queue + ring->queue_index; in i40e_alloc_rings()
11722 ring->vsi = vsi; in i40e_alloc_rings()
11725 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11728 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) in i40e_alloc_rings()
11732 WRITE_ONCE(vsi->xdp_rings[i], ring++); in i40e_alloc_rings()
11736 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11738 ring->vsi = vsi; in i40e_alloc_rings()
11739 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11741 ring->count = vsi->num_rx_desc; in i40e_alloc_rings()
11745 WRITE_ONCE(vsi->rx_rings[i], ring); in i40e_alloc_rings()
11751 i40e_vsi_clear_rings(vsi); in i40e_alloc_rings()
12012 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) in i40e_vsi_alloc_q_vector() argument
12021 q_vector->vsi = vsi; in i40e_vsi_alloc_q_vector()
12025 if (vsi->netdev) in i40e_vsi_alloc_q_vector()
12026 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll); in i40e_vsi_alloc_q_vector()
12029 vsi->q_vectors[v_idx] = q_vector; in i40e_vsi_alloc_q_vector()
12041 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) in i40e_vsi_alloc_q_vectors() argument
12043 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors()
12048 num_q_vectors = vsi->num_q_vectors; in i40e_vsi_alloc_q_vectors()
12049 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
12055 err = i40e_vsi_alloc_q_vector(vsi, v_idx); in i40e_vsi_alloc_q_vectors()
12064 i40e_free_q_vector(vsi, v_idx); in i40e_vsi_alloc_q_vectors()
12152 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
12153 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12156 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12171 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
12172 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12268 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, in i40e_get_rss_aq() argument
12271 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq()
12276 ret = i40e_aq_get_rss_key(hw, vsi->id, in i40e_get_rss_aq()
12289 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_get_rss_aq()
12291 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_get_rss_aq()
12314 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, in i40e_config_rss_reg() argument
12317 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg()
12319 u16 vf_id = vsi->vf_id; in i40e_config_rss_reg()
12326 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12329 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12340 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12345 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12368 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, in i40e_get_rss_reg() argument
12371 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg()
12402 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) in i40e_config_rss() argument
12404 struct i40e_pf *pf = vsi->back; in i40e_config_rss()
12407 return i40e_config_rss_aq(vsi, seed, lut, lut_size); in i40e_config_rss()
12409 return i40e_config_rss_reg(vsi, seed, lut, lut_size); in i40e_config_rss()
12421 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) in i40e_get_rss() argument
12423 struct i40e_pf *pf = vsi->back; in i40e_get_rss()
12426 return i40e_get_rss_aq(vsi, seed, lut, lut_size); in i40e_get_rss()
12428 return i40e_get_rss_reg(vsi, seed, lut, lut_size); in i40e_get_rss()
12453 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss() local
12477 if (!vsi->rss_size) { in i40e_pf_config_rss()
12484 qcount = vsi->num_queue_pairs / in i40e_pf_config_rss()
12485 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); in i40e_pf_config_rss()
12486 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12488 if (!vsi->rss_size) in i40e_pf_config_rss()
12491 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_pf_config_rss()
12496 if (vsi->rss_lut_user) in i40e_pf_config_rss()
12497 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_pf_config_rss()
12499 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12504 if (vsi->rss_hkey_user) in i40e_pf_config_rss()
12505 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_pf_config_rss()
12508 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_pf_config_rss()
12525 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues() local
12534 if (queue_count != vsi->num_queue_pairs) { in i40e_reconfig_rss_queues()
12537 vsi->req_queue_pairs = queue_count; in i40e_reconfig_rss_queues()
12549 if (queue_count < vsi->rss_size) { in i40e_reconfig_rss_queues()
12550 i40e_clear_rss_config_user(vsi); in i40e_reconfig_rss_queues()
12556 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; in i40e_reconfig_rss_queues()
12557 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12562 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12993 static void i40e_clear_rss_lut(struct i40e_vsi *vsi) in i40e_clear_rss_lut() argument
12995 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut()
12997 u16 vf_id = vsi->vf_id; in i40e_clear_rss_lut()
13000 if (vsi->type == I40E_VSI_MAIN) { in i40e_clear_rss_lut()
13003 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_clear_rss_lut()
13016 static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena) in i40e_set_loopback() argument
13018 bool if_running = netif_running(vsi->netdev) && in i40e_set_loopback()
13019 !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_set_loopback()
13023 i40e_down(vsi); in i40e_set_loopback()
13025 ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in i40e_set_loopback()
13027 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in i40e_set_loopback()
13029 i40e_up(vsi); in i40e_set_loopback()
13044 struct i40e_vsi *vsi = np->vsi; in i40e_set_features() local
13045 struct i40e_pf *pf = vsi->back; in i40e_set_features()
13052 i40e_clear_rss_lut(vsi); in i40e_set_features()
13055 i40e_vlan_stripping_enable(vsi); in i40e_set_features()
13057 i40e_vlan_stripping_disable(vsi); in i40e_set_features()
13066 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) in i40e_set_features()
13067 i40e_del_all_macvlans(vsi); in i40e_set_features()
13075 return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); in i40e_set_features()
13085 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_set_port()
13110 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_unset_port()
13128 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id()
13157 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add()
13212 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_setlink() local
13213 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink()
13219 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
13224 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
13245 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13246 vsi->tc_config.enabled_tc); in i40e_ndo_bridge_setlink()
13289 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_getlink() local
13290 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink()
13295 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
13300 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
13375 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, in i40e_xdp_setup() argument
13378 int frame_size = i40e_max_vsi_frame_size(vsi, prog); in i40e_xdp_setup()
13379 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup()
13389 if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { in i40e_xdp_setup()
13395 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); in i40e_xdp_setup()
13399 old_prog = xchg(&vsi->xdp_prog, prog); in i40e_xdp_setup()
13403 xdp_features_clear_redirect_target(vsi->netdev); in i40e_xdp_setup()
13410 if (!i40e_enabled_xdp_vsi(vsi) && prog) { in i40e_xdp_setup()
13411 if (i40e_realloc_rx_bi_zc(vsi, true)) in i40e_xdp_setup()
13413 } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { in i40e_xdp_setup()
13414 if (i40e_realloc_rx_bi_zc(vsi, false)) in i40e_xdp_setup()
13418 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13419 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in i40e_xdp_setup()
13428 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13429 if (vsi->xdp_rings[i]->xsk_pool) in i40e_xdp_setup()
13430 (void)i40e_xsk_wakeup(vsi->netdev, i, in i40e_xdp_setup()
13432 xdp_features_set_redirect_target(vsi->netdev, true); in i40e_xdp_setup()
13444 static int i40e_enter_busy_conf(struct i40e_vsi *vsi) in i40e_enter_busy_conf() argument
13446 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf()
13463 static void i40e_exit_busy_conf(struct i40e_vsi *vsi) in i40e_exit_busy_conf() argument
13465 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf()
13475 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_reset_stats() argument
13477 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, in i40e_queue_pair_reset_stats()
13478 sizeof(vsi->rx_rings[queue_pair]->rx_stats)); in i40e_queue_pair_reset_stats()
13479 memset(&vsi->tx_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13480 sizeof(vsi->tx_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13481 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_queue_pair_reset_stats()
13482 memset(&vsi->xdp_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13483 sizeof(vsi->xdp_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13492 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_clean_rings() argument
13494 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13495 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_queue_pair_clean_rings()
13500 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13502 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13511 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, in i40e_queue_pair_toggle_napi() argument
13514 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_toggle_napi()
13517 if (!vsi->netdev) in i40e_queue_pair_toggle_napi()
13537 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, in i40e_queue_pair_toggle_rings() argument
13540 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings()
13543 pf_q = vsi->base_queue + queue_pair; in i40e_queue_pair_toggle_rings()
13544 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13549 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13558 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13568 if (!i40e_enabled_xdp_vsi(vsi)) in i40e_queue_pair_toggle_rings()
13571 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13572 pf_q + vsi->alloc_queue_pairs, in i40e_queue_pair_toggle_rings()
13577 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13588 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_enable_irq() argument
13590 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_enable_irq()
13591 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq()
13596 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); in i40e_queue_pair_enable_irq()
13608 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_disable_irq() argument
13610 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_disable_irq()
13611 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq()
13621 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; in i40e_queue_pair_disable_irq()
13642 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_disable() argument
13646 err = i40e_enter_busy_conf(vsi); in i40e_queue_pair_disable()
13650 i40e_queue_pair_disable_irq(vsi, queue_pair); in i40e_queue_pair_disable()
13651 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); in i40e_queue_pair_disable()
13652 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); in i40e_queue_pair_disable()
13653 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_disable()
13654 i40e_queue_pair_clean_rings(vsi, queue_pair); in i40e_queue_pair_disable()
13655 i40e_queue_pair_reset_stats(vsi, queue_pair); in i40e_queue_pair_disable()
13667 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) in i40e_queue_pair_enable() argument
13671 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_enable()
13675 if (i40e_enabled_xdp_vsi(vsi)) { in i40e_queue_pair_enable()
13676 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_enable()
13681 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_enable()
13685 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); in i40e_queue_pair_enable()
13686 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); in i40e_queue_pair_enable()
13687 i40e_queue_pair_enable_irq(vsi, queue_pair); in i40e_queue_pair_enable()
13689 i40e_exit_busy_conf(vsi); in i40e_queue_pair_enable()
13703 struct i40e_vsi *vsi = np->vsi; in i40e_xdp() local
13705 if (vsi->type != I40E_VSI_MAIN) in i40e_xdp()
13710 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack); in i40e_xdp()
13712 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, in i40e_xdp()
13764 static int i40e_config_netdev(struct i40e_vsi *vsi) in i40e_config_netdev() argument
13766 struct i40e_pf *pf = vsi->back; in i40e_config_netdev()
13777 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); in i40e_config_netdev()
13781 vsi->netdev = netdev; in i40e_config_netdev()
13783 np->vsi = vsi; in i40e_config_netdev()
13851 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_netdev()
13864 i40e_rm_default_mac_filter(vsi, mac_addr); in i40e_config_netdev()
13865 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13866 i40e_add_mac_filter(vsi, mac_addr); in i40e_config_netdev()
13867 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13882 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
13885 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13886 i40e_add_mac_filter(vsi, mac_addr); in i40e_config_netdev()
13887 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13904 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13905 i40e_add_mac_filter(vsi, broadcast); in i40e_config_netdev()
13906 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13917 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); in i40e_config_netdev()
13936 static void i40e_vsi_delete(struct i40e_vsi *vsi) in i40e_vsi_delete() argument
13939 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) in i40e_vsi_delete()
13942 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); in i40e_vsi_delete()
13951 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) in i40e_is_vsi_uplink_mode_veb() argument
13954 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb()
13957 if (vsi->veb_idx >= I40E_MAX_VEB) in i40e_is_vsi_uplink_mode_veb()
13960 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13986 static int i40e_add_vsi(struct i40e_vsi *vsi) in i40e_add_vsi() argument
13989 struct i40e_pf *pf = vsi->back; in i40e_add_vsi()
14000 switch (vsi->type) { in i40e_add_vsi()
14020 vsi->info = ctxt.info; in i40e_add_vsi()
14021 vsi->info.valid_sections = 0; in i40e_add_vsi()
14023 vsi->seid = ctxt.seid; in i40e_add_vsi()
14024 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
14060 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); in i40e_add_vsi()
14072 i40e_vsi_update_queue_map(vsi, &ctxt); in i40e_add_vsi()
14073 vsi->info.valid_sections = 0; in i40e_add_vsi()
14081 ret = i40e_vsi_config_tc(vsi, enabled_tc); in i40e_add_vsi()
14099 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14103 (i40e_is_vsi_uplink_mode_veb(vsi))) { in i40e_add_vsi()
14109 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); in i40e_add_vsi()
14115 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14122 if (i40e_is_vsi_uplink_mode_veb(vsi)) { in i40e_add_vsi()
14130 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); in i40e_add_vsi()
14135 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; in i40e_add_vsi()
14136 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14143 if (i40e_is_vsi_uplink_mode_veb(vsi)) { in i40e_add_vsi()
14150 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_add_vsi()
14160 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
14168 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); in i40e_add_vsi()
14179 if (vsi->type != I40E_VSI_MAIN) { in i40e_add_vsi()
14182 dev_info(&vsi->back->pdev->dev, in i40e_add_vsi()
14190 vsi->info = ctxt.info; in i40e_add_vsi()
14191 vsi->info.valid_sections = 0; in i40e_add_vsi()
14192 vsi->seid = ctxt.seid; in i40e_add_vsi()
14193 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
14196 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14197 vsi->active_filters = 0; in i40e_add_vsi()
14199 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vsi()
14203 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14204 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_add_vsi()
14207 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_vsi()
14212 ret = i40e_vsi_get_bw_info(vsi); in i40e_add_vsi()
14232 int i40e_vsi_release(struct i40e_vsi *vsi) in i40e_vsi_release() argument
14241 pf = vsi->back; in i40e_vsi_release()
14244 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_vsi_release()
14246 vsi->seid, vsi->uplink_seid); in i40e_vsi_release()
14249 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
14254 set_bit(__I40E_VSI_RELEASING, vsi->state); in i40e_vsi_release()
14255 uplink_seid = vsi->uplink_seid; in i40e_vsi_release()
14256 if (vsi->type != I40E_VSI_SRIOV) { in i40e_vsi_release()
14257 if (vsi->netdev_registered) { in i40e_vsi_release()
14258 vsi->netdev_registered = false; in i40e_vsi_release()
14259 if (vsi->netdev) { in i40e_vsi_release()
14261 unregister_netdev(vsi->netdev); in i40e_vsi_release()
14264 i40e_vsi_close(vsi); in i40e_vsi_release()
14266 i40e_vsi_disable_irq(vsi); in i40e_vsi_release()
14269 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14272 if (vsi->netdev) { in i40e_vsi_release()
14273 __dev_uc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14274 __dev_mc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14278 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_vsi_release()
14279 __i40e_del_filter(vsi, f); in i40e_vsi_release()
14281 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14283 i40e_sync_vsi_filters(vsi); in i40e_vsi_release()
14285 i40e_vsi_delete(vsi); in i40e_vsi_release()
14286 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_release()
14287 if (vsi->netdev) { in i40e_vsi_release()
14288 free_netdev(vsi->netdev); in i40e_vsi_release()
14289 vsi->netdev = NULL; in i40e_vsi_release()
14291 i40e_vsi_clear_rings(vsi); in i40e_vsi_release()
14292 i40e_vsi_clear(vsi); in i40e_vsi_release()
14303 if (pf->vsi[i] && in i40e_vsi_release()
14304 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
14305 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
14333 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) in i40e_vsi_setup_vectors() argument
14336 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors()
14338 if (vsi->q_vectors[0]) { in i40e_vsi_setup_vectors()
14340 vsi->seid); in i40e_vsi_setup_vectors()
14344 if (vsi->base_vector) { in i40e_vsi_setup_vectors()
14346 vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14350 ret = i40e_vsi_alloc_q_vectors(vsi); in i40e_vsi_setup_vectors()
14354 vsi->num_q_vectors, vsi->seid, ret); in i40e_vsi_setup_vectors()
14355 vsi->num_q_vectors = 0; in i40e_vsi_setup_vectors()
14364 if (vsi->num_q_vectors) in i40e_vsi_setup_vectors()
14365 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
14366 vsi->num_q_vectors, vsi->idx); in i40e_vsi_setup_vectors()
14367 if (vsi->base_vector < 0) { in i40e_vsi_setup_vectors()
14370 vsi->num_q_vectors, vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14371 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_setup_vectors()
14389 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) in i40e_vsi_reinit_setup() argument
14396 if (!vsi) in i40e_vsi_reinit_setup()
14399 pf = vsi->back; in i40e_vsi_reinit_setup()
14401 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
14402 i40e_vsi_clear_rings(vsi); in i40e_vsi_reinit_setup()
14404 i40e_vsi_free_arrays(vsi, false); in i40e_vsi_reinit_setup()
14405 i40e_set_num_rings_in_vsi(vsi); in i40e_vsi_reinit_setup()
14406 ret = i40e_vsi_alloc_arrays(vsi, false); in i40e_vsi_reinit_setup()
14410 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_reinit_setup()
14411 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); in i40e_vsi_reinit_setup()
14413 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
14417 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_reinit_setup()
14420 vsi->base_queue = ret; in i40e_vsi_reinit_setup()
14425 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
14426 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
14427 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
14428 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
14429 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
14430 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
14433 ret = i40e_alloc_rings(vsi); in i40e_vsi_reinit_setup()
14438 i40e_vsi_map_rings_to_vectors(vsi); in i40e_vsi_reinit_setup()
14439 return vsi; in i40e_vsi_reinit_setup()
14442 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_reinit_setup()
14443 if (vsi->netdev_registered) { in i40e_vsi_reinit_setup()
14444 vsi->netdev_registered = false; in i40e_vsi_reinit_setup()
14445 unregister_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14446 free_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14447 vsi->netdev = NULL; in i40e_vsi_reinit_setup()
14449 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
14451 i40e_vsi_clear(vsi); in i40e_vsi_reinit_setup()
14471 struct i40e_vsi *vsi = NULL; in i40e_vsi_setup() local
14500 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
14501 vsi = pf->vsi[i]; in i40e_vsi_setup()
14505 if (!vsi) { in i40e_vsi_setup()
14511 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14512 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14513 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14514 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_setup()
14515 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14516 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14518 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
14519 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup()
14534 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
14542 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_vsi_setup()
14550 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14551 if (!vsi) in i40e_vsi_setup()
14553 vsi->type = type; in i40e_vsi_setup()
14554 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); in i40e_vsi_setup()
14559 vsi->vf_id = param1; in i40e_vsi_setup()
14561 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_setup()
14562 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); in i40e_vsi_setup()
14564 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14568 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_setup()
14571 vsi->base_queue = ret; in i40e_vsi_setup()
14574 vsi->uplink_seid = uplink_seid; in i40e_vsi_setup()
14575 ret = i40e_add_vsi(vsi); in i40e_vsi_setup()
14579 switch (vsi->type) { in i40e_vsi_setup()
14583 ret = i40e_config_netdev(vsi); in i40e_vsi_setup()
14586 ret = i40e_netif_set_realnum_tx_rx_queues(vsi); in i40e_vsi_setup()
14589 ret = register_netdev(vsi->netdev); in i40e_vsi_setup()
14592 vsi->netdev_registered = true; in i40e_vsi_setup()
14593 netif_carrier_off(vsi->netdev); in i40e_vsi_setup()
14596 i40e_dcbnl_setup(vsi); in i40e_vsi_setup()
14601 ret = i40e_vsi_setup_vectors(vsi); in i40e_vsi_setup()
14605 ret = i40e_alloc_rings(vsi); in i40e_vsi_setup()
14610 i40e_vsi_map_rings_to_vectors(vsi); in i40e_vsi_setup()
14612 i40e_vsi_reset_stats(vsi); in i40e_vsi_setup()
14620 (vsi->type == I40E_VSI_VMDQ2)) { in i40e_vsi_setup()
14621 ret = i40e_vsi_config_rss(vsi); in i40e_vsi_setup()
14623 return vsi; in i40e_vsi_setup()
14626 i40e_vsi_free_q_vectors(vsi); in i40e_vsi_setup()
14628 if (vsi->netdev_registered) { in i40e_vsi_setup()
14629 vsi->netdev_registered = false; in i40e_vsi_setup()
14630 unregister_netdev(vsi->netdev); in i40e_vsi_setup()
14631 free_netdev(vsi->netdev); in i40e_vsi_setup()
14632 vsi->netdev = NULL; in i40e_vsi_setup()
14635 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14637 i40e_vsi_clear(vsi); in i40e_vsi_setup()
14769 if (!pf->vsi[i]) in i40e_switch_branch_release()
14771 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
14772 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14773 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
14813 struct i40e_vsi *vsi = NULL; in i40e_veb_release() local
14821 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
14823 vsi = pf->vsi[i]; in i40e_veb_release()
14834 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; in i40e_veb_release()
14836 vsi->uplink_seid = veb->uplink_seid; in i40e_veb_release()
14838 vsi->veb_idx = I40E_NO_VEB; in i40e_veb_release()
14840 vsi->veb_idx = veb->veb_idx; in i40e_veb_release()
14843 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
14844 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
14856 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) in i40e_add_veb() argument
14862 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
14895 vsi->uplink_seid = veb->seid; in i40e_add_veb()
14896 vsi->veb_idx = veb->idx; in i40e_add_veb()
14897 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_add_veb()
14937 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
14971 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
15183 struct i40e_vsi *vsi = NULL; in i40e_setup_pf_switch() local
15194 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); in i40e_setup_pf_switch()
15196 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15197 if (!vsi) { in i40e_setup_pf_switch()
15205 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
15207 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
15208 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
15209 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
15211 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15242 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
15399 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
15584 struct i40e_vsi *vsi; in i40e_init_recovery_mode() local
15613 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15615 if (!pf->vsi) { in i40e_init_recovery_mode()
15629 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15630 if (!vsi) { in i40e_init_recovery_mode()
15634 vsi->alloc_queue_pairs = 1; in i40e_init_recovery_mode()
15635 err = i40e_config_netdev(vsi); in i40e_init_recovery_mode()
15638 err = register_netdev(vsi->netdev); in i40e_init_recovery_mode()
15641 vsi->netdev_registered = true; in i40e_init_recovery_mode()
16037 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
16039 if (!pf->vsi) { in i40e_probe()
16058 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
16062 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
16063 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
16274 kfree(pf->vsi); in i40e_probe()
16343 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove() local
16349 unregister_netdev(vsi->netdev); in i40e_remove()
16350 free_netdev(vsi->netdev); in i40e_remove()
16358 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
16378 if (pf->vsi[i]) { in i40e_remove()
16379 i40e_vsi_close(pf->vsi[i]); in i40e_remove()
16380 i40e_vsi_release(pf->vsi[i]); in i40e_remove()
16381 pf->vsi[i] = NULL; in i40e_remove()
16420 if (pf->vsi[i]) { in i40e_remove()
16422 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
16423 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
16424 pf->vsi[i] = NULL; in i40e_remove()
16435 kfree(pf->vsi); in i40e_remove()
16569 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
16571 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
16623 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
16676 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()