Lines Matching full:pf
35 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
38 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
39 static int i40e_setup_misc_vector(struct i40e_pf *pf);
40 static void i40e_determine_queue_usage(struct i40e_pf *pf);
41 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
42 static void i40e_prep_for_reset(struct i40e_pf *pf);
43 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
45 static int i40e_reset(struct i40e_pf *pf);
46 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
47 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
48 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
49 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
50 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
51 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
53 static int i40e_get_capabilities(struct i40e_pf *pf,
55 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
137 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_hw_to_dev() local
139 return &pf->pdev->dev; in i40e_hw_to_dev()
152 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_allocate_dma_mem() local
155 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, in i40e_allocate_dma_mem()
170 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_free_dma_mem() local
172 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); in i40e_free_dma_mem()
215 * @pf: board private structure
222 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, in i40e_get_lump() argument
229 dev_info(&pf->pdev->dev, in i40e_get_lump()
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { in i40e_get_lump()
240 dev_err(&pf->pdev->dev, in i40e_get_lump()
308 * @pf: the pf structure to search for the vsi
311 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) in i40e_find_vsi_from_id() argument
315 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
316 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
317 return pf->vsi[i]; in i40e_find_vsi_from_id()
324 * @pf: board private structure
328 void i40e_service_event_schedule(struct i40e_pf *pf) in i40e_service_event_schedule() argument
330 if ((!test_bit(__I40E_DOWN, pf->state) && in i40e_service_event_schedule()
331 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || in i40e_service_event_schedule()
332 test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_service_event_schedule()
333 queue_work(i40e_wq, &pf->service_task); in i40e_service_event_schedule()
349 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout() local
354 pf->tx_timeout_count++; in i40e_tx_timeout()
367 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) in i40e_tx_timeout()
368 pf->tx_timeout_recovery_level = 1; /* reset after some time */ in i40e_tx_timeout()
370 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) in i40e_tx_timeout()
374 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) in i40e_tx_timeout()
380 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_tx_timeout()
381 val = rd32(&pf->hw, in i40e_tx_timeout()
385 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); in i40e_tx_timeout()
393 pf->tx_timeout_last_recovery = jiffies; in i40e_tx_timeout()
395 pf->tx_timeout_recovery_level, txqueue); in i40e_tx_timeout()
397 switch (pf->tx_timeout_recovery_level) { in i40e_tx_timeout()
399 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
402 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
405 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
409 set_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_tx_timeout()
414 i40e_service_event_schedule(pf); in i40e_tx_timeout()
415 pf->tx_timeout_recovery_level++; in i40e_tx_timeout()
549 * i40e_pf_reset_stats - Reset all of the stats for the given PF
550 * @pf: the PF to be reset
552 void i40e_pf_reset_stats(struct i40e_pf *pf) in i40e_pf_reset_stats() argument
556 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
557 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
558 pf->stat_offsets_loaded = false; in i40e_pf_reset_stats()
561 if (pf->veb[i]) { in i40e_pf_reset_stats()
562 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
563 sizeof(pf->veb[i]->stats)); in i40e_pf_reset_stats()
564 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
565 sizeof(pf->veb[i]->stats_offsets)); in i40e_pf_reset_stats()
566 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
567 sizeof(pf->veb[i]->tc_stats)); in i40e_pf_reset_stats()
568 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
569 sizeof(pf->veb[i]->tc_stats_offsets)); in i40e_pf_reset_stats()
570 pf->veb[i]->stat_offsets_loaded = false; in i40e_pf_reset_stats()
573 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
723 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats() local
724 struct i40e_hw *hw = &pf->hw; in i40e_update_eth_stats()
788 struct i40e_pf *pf = veb->pf; in i40e_update_veb_stats() local
789 struct i40e_hw *hw = &pf->hw; in i40e_update_veb_stats()
874 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats() local
891 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_update_vsi_stats()
999 /* pull in a couple PF stats if this is the main vsi */ in i40e_update_vsi_stats()
1000 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
1001 ns->rx_crc_errors = pf->stats.crc_errors; in i40e_update_vsi_stats()
1002 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; in i40e_update_vsi_stats()
1003 ns->rx_length_errors = pf->stats.rx_length_errors; in i40e_update_vsi_stats()
1008 * i40e_update_pf_stats - Update the PF statistics counters.
1009 * @pf: the PF to be updated
1011 static void i40e_update_pf_stats(struct i40e_pf *pf) in i40e_update_pf_stats() argument
1013 struct i40e_hw_port_stats *osd = &pf->stats_offsets; in i40e_update_pf_stats()
1014 struct i40e_hw_port_stats *nsd = &pf->stats; in i40e_update_pf_stats()
1015 struct i40e_hw *hw = &pf->hw; in i40e_update_pf_stats()
1021 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1025 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1028 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1033 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1038 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1043 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1048 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1053 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1058 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1063 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1068 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1072 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1076 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1080 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1085 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1090 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1093 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1096 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1099 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1104 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1108 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1112 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1116 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1121 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1128 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1132 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1136 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1140 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1144 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1148 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1152 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1157 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1161 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1165 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1169 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1173 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1177 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1181 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1185 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1188 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1191 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1194 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1216 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1219 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1222 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && in i40e_update_pf_stats()
1223 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1228 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && in i40e_update_pf_stats()
1229 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1234 pf->stat_offsets_loaded = true; in i40e_update_pf_stats()
1245 struct i40e_pf *pf = vsi->back; in i40e_update_stats() local
1247 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1248 i40e_update_pf_stats(pf); in i40e_update_stats()
1494 struct i40e_pf *pf = vsi->back; in i40e_get_vf_new_vlan() local
1504 !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING)); in i40e_get_vf_new_vlan()
1585 * @vsi: the PF Main VSI - inappropriate for any other VSI
1594 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter() local
1596 /* Only appropriate for the PF main VSI */ in i40e_rm_default_mac_filter()
1605 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1613 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1813 struct i40e_pf *pf = vsi->back; in i40e_set_mac() local
1814 struct i40e_hw *hw = &pf->hw; in i40e_set_mac()
1820 if (test_bit(__I40E_DOWN, pf->state) || in i40e_set_mac()
1821 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_set_mac()
1856 i40e_service_event_schedule(pf); in i40e_set_mac()
1870 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq() local
1871 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_aq()
1879 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1891 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1907 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss() local
1912 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) in i40e_vsi_config_rss()
1915 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1929 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
2040 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map() local
2067 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
2068 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2087 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); in i40e_vsi_setup_queue_map()
2092 i40e_pf_get_max_q_per_tc(pf)); in i40e_vsi_setup_queue_map()
2099 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
2100 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); in i40e_vsi_setup_queue_map()
2111 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_vsi_setup_queue_map()
2114 qcount = min_t(int, pf->alloc_rss_size, in i40e_vsi_setup_queue_map()
2484 * @pf: board private structure
2487 * There are different ways of setting promiscuous mode on a PF depending on
2491 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) in i40e_set_promiscuous() argument
2493 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous()
2494 struct i40e_hw *hw = &pf->hw; in i40e_set_promiscuous()
2498 pf->lan_veb != I40E_NO_VEB && in i40e_set_promiscuous()
2499 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { in i40e_set_promiscuous()
2514 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2526 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2536 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2544 pf->cur_promisc = promisc; in i40e_set_promiscuous()
2566 char vsi_name[16] = "PF"; in i40e_sync_vsi_filters()
2570 struct i40e_pf *pf; in i40e_sync_vsi_filters() local
2585 pf = vsi->back; in i40e_sync_vsi_filters()
2643 else if (pf->vf) in i40e_sync_vsi_filters()
2646 vlan_filters, pf->vf[vsi->vf_id].trusted); in i40e_sync_vsi_filters()
2809 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2817 if (vsi->type == I40E_VSI_SRIOV && pf->vf && in i40e_sync_vsi_filters()
2818 !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2843 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2849 dev_info(&pf->pdev->dev, "%s allmulti mode.\n", in i40e_sync_vsi_filters()
2859 aq_ret = i40e_set_promiscuous(pf, cur_promisc); in i40e_sync_vsi_filters()
2863 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2894 * @pf: board private structure
2896 static void i40e_sync_filters_subtask(struct i40e_pf *pf) in i40e_sync_filters_subtask() argument
2900 if (!pf) in i40e_sync_filters_subtask()
2902 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) in i40e_sync_filters_subtask()
2904 if (test_bit(__I40E_VF_DISABLE, pf->state)) { in i40e_sync_filters_subtask()
2905 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_sync_filters_subtask()
2909 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2910 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2911 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && in i40e_sync_filters_subtask()
2912 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { in i40e_sync_filters_subtask()
2913 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2918 pf->state); in i40e_sync_filters_subtask()
2968 struct i40e_pf *pf = vsi->back; in i40e_change_mtu() local
2983 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_change_mtu()
2984 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_change_mtu()
2997 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl() local
3001 return i40e_ptp_get_ts_config(pf, ifr); in i40e_ioctl()
3003 return i40e_ptp_set_ts_config(pf, ifr); in i40e_ioctl()
3839 * @pf: Pointer to the targeted PF
3843 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf) in i40e_reset_fdir_filter_cnt() argument
3845 pf->fd_tcp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3846 pf->fd_udp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3847 pf->fd_sctp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3848 pf->fd_ip4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3849 pf->fd_tcp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3850 pf->fd_udp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3851 pf->fd_sctp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3852 pf->fd_ip6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3865 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore() local
3868 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_filter_restore()
3872 i40e_reset_fdir_filter_cnt(pf); in i40e_fdir_filter_restore()
3875 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_restore()
3905 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix() local
3906 struct i40e_hw *hw = &pf->hw; in i40e_vsi_configure_msix()
3992 * @pf: pointer to private device data structure
3994 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) in i40e_enable_misc_int_causes() argument
3996 struct i40e_hw *hw = &pf->hw; in i40e_enable_misc_int_causes()
4012 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_enable_misc_int_causes()
4015 if (pf->flags & I40E_FLAG_PTP) in i40e_enable_misc_int_causes()
4036 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy() local
4037 struct i40e_hw *hw = &pf->hw; in i40e_configure_msi_and_legacy()
4049 i40e_enable_misc_int_causes(pf); in i40e_configure_msi_and_legacy()
4073 * @pf: board private structure
4075 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) in i40e_irq_dynamic_disable_icr0() argument
4077 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_disable_icr0()
4086 * @pf: board private structure
4088 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) in i40e_irq_dynamic_enable_icr0() argument
4090 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_enable_icr0()
4155 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix() local
4166 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4188 dev_info(&pf->pdev->dev, in i40e_vsi_request_irq_msix()
4214 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4228 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq() local
4229 struct i40e_hw *hw = &pf->hw; in i40e_vsi_disable_irq()
4251 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_disable_irq()
4258 synchronize_irq(pf->msix_entries[i + base].vector); in i40e_vsi_disable_irq()
4264 synchronize_irq(pf->pdev->irq); in i40e_vsi_disable_irq()
4274 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq() local
4277 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_enable_irq()
4281 i40e_irq_dynamic_enable_icr0(pf); in i40e_vsi_enable_irq()
4284 i40e_flush(&pf->hw); in i40e_vsi_enable_irq()
4290 * @pf: board private structure
4292 static void i40e_free_misc_vector(struct i40e_pf *pf) in i40e_free_misc_vector() argument
4295 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
4296 i40e_flush(&pf->hw); in i40e_free_misc_vector()
4298 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { in i40e_free_misc_vector()
4299 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
4300 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_free_misc_vector()
4315 struct i40e_pf *pf = (struct i40e_pf *)data; in i40e_intr() local
4316 struct i40e_hw *hw = &pf->hw; in i40e_intr()
4331 pf->sw_int_count++; in i40e_intr()
4333 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_intr()
4336 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); in i40e_intr()
4337 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_intr()
4342 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr()
4351 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_intr()
4357 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_intr()
4358 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); in i40e_intr()
4363 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_intr()
4368 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { in i40e_intr()
4375 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); in i40e_intr()
4380 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_intr()
4381 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4387 pf->corer_count++; in i40e_intr()
4389 pf->globr_count++; in i40e_intr()
4391 pf->empr_count++; in i40e_intr()
4392 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4398 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); in i40e_intr()
4399 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4408 schedule_work(&pf->ptp_extts0_work); in i40e_intr()
4411 i40e_ptp_tx_hwtstamp(pf); in i40e_intr()
4422 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4427 dev_info(&pf->pdev->dev, "device will be reset\n"); in i40e_intr()
4428 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_intr()
4429 i40e_service_event_schedule(pf); in i40e_intr()
4438 if (!test_bit(__I40E_DOWN, pf->state) || in i40e_intr()
4439 test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_intr()
4440 i40e_service_event_schedule(pf); in i40e_intr()
4441 i40e_irq_dynamic_enable_icr0(pf); in i40e_intr()
4636 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq() local
4639 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_request_irq()
4641 else if (pf->flags & I40E_FLAG_MSI_ENABLED) in i40e_vsi_request_irq()
4642 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4643 pf->int_name, pf); in i40e_vsi_request_irq()
4645 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, in i40e_vsi_request_irq()
4646 pf->int_name, pf); in i40e_vsi_request_irq()
4649 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); in i40e_vsi_request_irq()
4666 struct i40e_pf *pf = vsi->back; in i40e_netpoll() local
4673 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_netpoll()
4677 i40e_intr(pf->pdev->irq, netdev); in i40e_netpoll()
4685 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4686 * @pf: the PF being configured
4687 * @pf_q: the PF queue
4690 * This routine will wait for the given Tx queue of the PF to reach the
4695 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) in i40e_pf_txq_wait() argument
4701 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); in i40e_pf_txq_wait()
4715 * @pf: the PF structure
4716 * @pf_q: the PF queue to configure
4723 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_tx_q() argument
4725 struct i40e_hw *hw = &pf->hw; in i40e_control_tx_q()
4730 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); in i40e_control_tx_q()
4760 * @pf: the PF structure
4761 * @pf_q: the PF queue to configure
4765 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, in i40e_control_wait_tx_q() argument
4770 i40e_control_tx_q(pf, pf_q, enable); in i40e_control_wait_tx_q()
4773 ret = i40e_pf_txq_wait(pf, pf_q, enable); in i40e_control_wait_tx_q()
4775 dev_info(&pf->pdev->dev, in i40e_control_wait_tx_q()
4790 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx() local
4795 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4804 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4814 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4815 * @pf: the PF being configured
4816 * @pf_q: the PF queue
4819 * This routine will wait for the given Rx queue of the PF to reach the
4824 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) in i40e_pf_rxq_wait() argument
4830 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); in i40e_pf_rxq_wait()
4844 * @pf: the PF structure
4845 * @pf_q: the PF queue to configure
4852 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_rx_q() argument
4854 struct i40e_hw *hw = &pf->hw; in i40e_control_rx_q()
4881 * @pf: the PF structure
4889 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_wait_rx_q() argument
4893 i40e_control_rx_q(pf, pf_q, enable); in i40e_control_wait_rx_q()
4896 ret = i40e_pf_rxq_wait(pf, pf_q, enable); in i40e_control_wait_rx_q()
4909 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx() local
4914 ret = i40e_control_wait_rx_q(pf, pf_q, true); in i40e_vsi_enable_rx()
4916 dev_info(&pf->pdev->dev, in i40e_vsi_enable_rx()
4951 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings() local
4960 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); in i40e_vsi_stop_rings()
4963 err = i40e_control_wait_rx_q(pf, pf_q, false); in i40e_vsi_stop_rings()
4965 dev_info(&pf->pdev->dev, in i40e_vsi_stop_rings()
4973 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); in i40e_vsi_stop_rings()
4991 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait() local
4996 i40e_control_tx_q(pf, pf_q, false); in i40e_vsi_stop_rings_no_wait()
4997 i40e_control_rx_q(pf, pf_q, false); in i40e_vsi_stop_rings_no_wait()
5007 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq() local
5008 struct i40e_hw *hw = &pf->hw; in i40e_vsi_free_irq()
5013 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_free_irq()
5026 irq_num = pf->msix_entries[vector].vector; in i40e_vsi_free_irq()
5086 free_irq(pf->pdev->irq, pf); in i40e_vsi_free_irq()
5170 * @pf: board private structure
5172 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) in i40e_reset_interrupt_capability() argument
5175 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_reset_interrupt_capability()
5176 pci_disable_msix(pf->pdev); in i40e_reset_interrupt_capability()
5177 kfree(pf->msix_entries); in i40e_reset_interrupt_capability()
5178 pf->msix_entries = NULL; in i40e_reset_interrupt_capability()
5179 kfree(pf->irq_pile); in i40e_reset_interrupt_capability()
5180 pf->irq_pile = NULL; in i40e_reset_interrupt_capability()
5181 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { in i40e_reset_interrupt_capability()
5182 pci_disable_msi(pf->pdev); in i40e_reset_interrupt_capability()
5184 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_reset_interrupt_capability()
5189 * @pf: board private structure
5194 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) in i40e_clear_interrupt_scheme() argument
5198 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) in i40e_clear_interrupt_scheme()
5199 i40e_free_misc_vector(pf); in i40e_clear_interrupt_scheme()
5201 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, in i40e_clear_interrupt_scheme()
5204 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
5205 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
5206 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
5207 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
5208 i40e_reset_interrupt_capability(pf); in i40e_clear_interrupt_scheme()
5255 struct i40e_pf *pf = vsi->back; in i40e_vsi_close() local
5262 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_vsi_close()
5263 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_vsi_close()
5264 set_bit(__I40E_CLIENT_RESET, pf->state); in i40e_vsi_close()
5299 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5300 * @pf: the PF
5302 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) in i40e_pf_quiesce_all_vsi() argument
5306 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
5307 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
5308 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
5313 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5314 * @pf: the PF
5316 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) in i40e_pf_unquiesce_all_vsi() argument
5320 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
5321 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
5322 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
5334 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled() local
5340 ret = i40e_pf_txq_wait(pf, pf_q, false); in i40e_vsi_wait_queues_disabled()
5342 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5352 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5355 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5362 ret = i40e_pf_rxq_wait(pf, pf_q, false); in i40e_vsi_wait_queues_disabled()
5364 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5376 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5377 * @pf: the PF
5380 * VSIs that are managed by this PF.
5382 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) in i40e_pf_wait_queues_disabled() argument
5386 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_wait_queues_disabled()
5387 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5388 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5401 * @pf: pointer to PF
5403 * Get TC map for ISCSI PF type that will include iSCSI TC
5406 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) in i40e_get_iscsi_tc_map() argument
5409 struct i40e_hw *hw = &pf->hw; in i40e_get_iscsi_tc_map()
5491 * @pf: PF being queried
5496 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) in i40e_mqprio_get_enabled_tc() argument
5498 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc()
5508 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5509 * @pf: PF being queried
5511 * Return number of traffic classes enabled for the given PF
5513 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) in i40e_pf_get_num_tc() argument
5515 struct i40e_hw *hw = &pf->hw; in i40e_pf_get_num_tc()
5520 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_pf_get_num_tc()
5521 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5524 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_num_tc()
5528 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_num_tc()
5531 /* MFP mode return count of enabled TCs for this PF */ in i40e_pf_get_num_tc()
5532 if (pf->hw.func_caps.iscsi) in i40e_pf_get_num_tc()
5533 enabled_tc = i40e_get_iscsi_tc_map(pf); in i40e_pf_get_num_tc()
5546 * @pf: PF being queried
5548 * Return a bitmap for enabled traffic classes for this PF.
5550 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) in i40e_pf_get_tc_map() argument
5552 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_pf_get_tc_map()
5553 return i40e_mqprio_get_enabled_tc(pf); in i40e_pf_get_tc_map()
5555 /* If neither MQPRIO nor DCB is enabled for this PF then just return in i40e_pf_get_tc_map()
5558 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_tc_map()
5561 /* SFP mode we want PF to be enabled for all TCs */ in i40e_pf_get_tc_map()
5562 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_tc_map()
5563 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); in i40e_pf_get_tc_map()
5565 /* MFP enabled and iSCSI PF type */ in i40e_pf_get_tc_map()
5566 if (pf->hw.func_caps.iscsi) in i40e_pf_get_tc_map()
5567 return i40e_get_iscsi_tc_map(pf); in i40e_pf_get_tc_map()
5582 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info() local
5583 struct i40e_hw *hw = &pf->hw; in i40e_vsi_get_bw_info()
5591 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5592 "couldn't get PF vsi bw config, err %pe aq_err %s\n", in i40e_vsi_get_bw_info()
5594 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5602 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5603 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n", in i40e_vsi_get_bw_info()
5605 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5610 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5644 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc() local
5649 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_vsi_configure_bw_alloc()
5651 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_configure_bw_alloc()
5654 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5664 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5666 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5668 pf->hw.aq.asq_last_status); in i40e_vsi_configure_bw_alloc()
5687 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc() local
5688 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_netdev_tc()
5721 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_vsi_config_netdev_tc()
5761 struct i40e_pf *pf; in i40e_update_adq_vsi_queues() local
5767 pf = vsi->back; in i40e_update_adq_vsi_queues()
5768 hw = &pf->hw; in i40e_update_adq_vsi_queues()
5781 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_update_adq_vsi_queues()
5785 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); in i40e_update_adq_vsi_queues()
5793 dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", in i40e_update_adq_vsi_queues()
5821 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc() local
5822 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_tc()
5842 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5848 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5861 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5869 dev_err(&pf->pdev->dev, in i40e_vsi_config_tc()
5882 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_vsi_config_tc()
5915 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5928 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5948 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed() local
5950 switch (pf->hw.phy.link_info.link_speed) { in i40e_get_link_speed()
5996 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit() local
6003 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
6009 dev_warn(&pf->pdev->dev, in i40e_set_bw_limit()
6017 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, in i40e_set_bw_limit()
6020 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
6023 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_set_bw_limit()
6038 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels() local
6082 &pf->cloud_filter_list, cloud_node) { in i40e_remove_queue_channels()
6094 last_aq_status = pf->hw.aq.asq_last_status; in i40e_remove_queue_channels()
6096 dev_info(&pf->pdev->dev, in i40e_remove_queue_channels()
6099 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_remove_queue_channels()
6139 * @pf: ptr to PF device
6148 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, in i40e_validate_num_queues() argument
6159 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6165 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6180 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6200 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss() local
6202 struct i40e_hw *hw = &pf->hw; in i40e_vsi_reconfig_rss()
6219 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
6231 dev_info(&pf->pdev->dev, in i40e_vsi_reconfig_rss()
6250 * @pf: ptr to PF device
6256 static void i40e_channel_setup_queue_map(struct i40e_pf *pf, in i40e_channel_setup_queue_map() argument
6267 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); in i40e_channel_setup_queue_map()
6289 * @pf: ptr to PF device
6295 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, in i40e_add_channel() argument
6298 struct i40e_hw *hw = &pf->hw; in i40e_add_channel()
6304 dev_info(&pf->pdev->dev, in i40e_add_channel()
6317 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { in i40e_add_channel()
6325 i40e_channel_setup_queue_map(pf, &ctxt, ch); in i40e_add_channel()
6330 dev_info(&pf->pdev->dev, in i40e_add_channel()
6333 i40e_aq_str(&pf->hw, in i40e_add_channel()
6334 pf->hw.aq.asq_last_status)); in i40e_add_channel()
6388 * @pf: ptr to PF device
6395 static int i40e_channel_config_tx_ring(struct i40e_pf *pf, in i40e_channel_config_tx_ring() argument
6440 * @pf: ptr to PF device
6449 static inline int i40e_setup_hw_channel(struct i40e_pf *pf, in i40e_setup_hw_channel() argument
6461 ret = i40e_add_channel(pf, uplink_seid, ch); in i40e_setup_hw_channel()
6463 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6473 ret = i40e_channel_config_tx_ring(pf, vsi, ch); in i40e_setup_hw_channel()
6475 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6483 dev_dbg(&pf->pdev->dev, in i40e_setup_hw_channel()
6484 …"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base… in i40e_setup_hw_channel()
6493 * @pf: ptr to PF device
6500 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, in i40e_setup_channel() argument
6510 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", in i40e_setup_channel()
6516 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6519 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); in i40e_setup_channel()
6521 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); in i40e_setup_channel()
6530 * @vsi: ptr to VSI which has PF backing
6538 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode() local
6539 struct i40e_hw *hw = &pf->hw; in i40e_validate_and_set_switch_mode()
6542 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); in i40e_validate_and_set_switch_mode()
6555 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6572 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, in i40e_validate_and_set_switch_mode()
6573 pf->last_sw_conf_valid_flags, in i40e_validate_and_set_switch_mode()
6576 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6596 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel() local
6604 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", in i40e_create_queue_channel()
6610 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6613 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", in i40e_create_queue_channel()
6622 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_create_queue_channel()
6623 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_create_queue_channel()
6626 if (i40e_is_tc_mqprio_enabled(pf)) in i40e_create_queue_channel()
6627 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_create_queue_channel()
6629 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); in i40e_create_queue_channel()
6640 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6650 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6657 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_create_queue_channel()
6658 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); in i40e_create_queue_channel()
6662 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6674 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6757 struct i40e_pf *pf = veb->pf; in i40e_veb_config_tc() local
6774 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, in i40e_veb_config_tc()
6777 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6780 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6787 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6790 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6800 * @pf: PF struct
6802 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6806 static void i40e_dcb_reconfigure(struct i40e_pf *pf) in i40e_dcb_reconfigure() argument
6812 /* Enable the TCs available on PF to all VEBs */ in i40e_dcb_reconfigure()
6813 tc_map = i40e_pf_get_tc_map(pf); in i40e_dcb_reconfigure()
6818 if (!pf->veb[v]) in i40e_dcb_reconfigure()
6820 ret = i40e_veb_config_tc(pf->veb[v], tc_map); in i40e_dcb_reconfigure()
6822 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6824 pf->veb[v]->seid); in i40e_dcb_reconfigure()
6830 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6831 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6837 if (v == pf->lan_vsi) in i40e_dcb_reconfigure()
6838 tc_map = i40e_pf_get_tc_map(pf); in i40e_dcb_reconfigure()
6842 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6844 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6846 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6850 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6851 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6852 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
6859 * @pf: PF struct
6861 * Resume a port's Tx and issue a PF reset in case of failure to
6864 static int i40e_resume_port_tx(struct i40e_pf *pf) in i40e_resume_port_tx() argument
6866 struct i40e_hw *hw = &pf->hw; in i40e_resume_port_tx()
6871 dev_info(&pf->pdev->dev, in i40e_resume_port_tx()
6874 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_resume_port_tx()
6875 /* Schedule PF reset to recover */ in i40e_resume_port_tx()
6876 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_resume_port_tx()
6877 i40e_service_event_schedule(pf); in i40e_resume_port_tx()
6885 * @pf: PF struct
6887 * Suspend a port's Tx and issue a PF reset in case of failure.
6889 static int i40e_suspend_port_tx(struct i40e_pf *pf) in i40e_suspend_port_tx() argument
6891 struct i40e_hw *hw = &pf->hw; in i40e_suspend_port_tx()
6894 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); in i40e_suspend_port_tx()
6896 dev_info(&pf->pdev->dev, in i40e_suspend_port_tx()
6899 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_suspend_port_tx()
6900 /* Schedule PF reset to recover */ in i40e_suspend_port_tx()
6901 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_suspend_port_tx()
6902 i40e_service_event_schedule(pf); in i40e_suspend_port_tx()
6910 * @pf: PF being configured
6914 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6916 static int i40e_hw_set_dcb_config(struct i40e_pf *pf, in i40e_hw_set_dcb_config() argument
6919 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config; in i40e_hw_set_dcb_config()
6924 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n"); in i40e_hw_set_dcb_config()
6929 i40e_pf_quiesce_all_vsi(pf); in i40e_hw_set_dcb_config()
6934 ret = i40e_set_dcb_config(&pf->hw); in i40e_hw_set_dcb_config()
6936 dev_info(&pf->pdev->dev, in i40e_hw_set_dcb_config()
6939 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_set_dcb_config()
6944 i40e_dcb_reconfigure(pf); in i40e_hw_set_dcb_config()
6947 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { in i40e_hw_set_dcb_config()
6949 ret = i40e_resume_port_tx(pf); in i40e_hw_set_dcb_config()
6953 i40e_pf_unquiesce_all_vsi(pf); in i40e_hw_set_dcb_config()
6961 * @pf: PF being configured
6965 * given PF
6967 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) in i40e_hw_dcb_config() argument
6975 struct i40e_hw *hw = &pf->hw; in i40e_hw_dcb_config()
6984 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n"); in i40e_hw_dcb_config()
7026 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg); in i40e_hw_dcb_config()
7034 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_hw_dcb_config()
7036 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_hw_dcb_config()
7038 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7040 i40e_pf_quiesce_all_vsi(pf); in i40e_hw_dcb_config()
7041 ret = i40e_suspend_port_tx(pf); in i40e_hw_dcb_config()
7050 (hw, pf->mac_seid, &ets_data, in i40e_hw_dcb_config()
7053 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7056 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7076 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; in i40e_hw_dcb_config()
7083 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg); in i40e_hw_dcb_config()
7086 pf->pb_cfg = pb_cfg; in i40e_hw_dcb_config()
7089 ret = i40e_aq_dcb_updated(&pf->hw, NULL); in i40e_hw_dcb_config()
7091 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7094 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7102 i40e_dcb_reconfigure(pf); in i40e_hw_dcb_config()
7106 ret = i40e_resume_port_tx(pf); in i40e_hw_dcb_config()
7108 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7113 /* Wait for the PF's queues to be disabled */ in i40e_hw_dcb_config()
7114 ret = i40e_pf_wait_queues_disabled(pf); in i40e_hw_dcb_config()
7116 /* Schedule PF reset to recover */ in i40e_hw_dcb_config()
7117 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_hw_dcb_config()
7118 i40e_service_event_schedule(pf); in i40e_hw_dcb_config()
7121 i40e_pf_unquiesce_all_vsi(pf); in i40e_hw_dcb_config()
7122 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_hw_dcb_config()
7123 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_hw_dcb_config()
7126 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) in i40e_hw_dcb_config()
7127 ret = i40e_hw_set_dcb_config(pf, new_cfg); in i40e_hw_dcb_config()
7136 * @pf: PF being queried
7140 int i40e_dcb_sw_default_config(struct i40e_pf *pf) in i40e_dcb_sw_default_config() argument
7142 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config; in i40e_dcb_sw_default_config()
7144 struct i40e_hw *hw = &pf->hw; in i40e_dcb_sw_default_config()
7147 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { in i40e_dcb_sw_default_config()
7149 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); in i40e_dcb_sw_default_config()
7150 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
7151 pf->tmp_cfg.etscfg.maxtcs = 0; in i40e_dcb_sw_default_config()
7152 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7153 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; in i40e_dcb_sw_default_config()
7154 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING; in i40e_dcb_sw_default_config()
7155 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
7157 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS; in i40e_dcb_sw_default_config()
7158 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; in i40e_dcb_sw_default_config()
7159 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; in i40e_dcb_sw_default_config()
7160 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; in i40e_dcb_sw_default_config()
7162 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg); in i40e_dcb_sw_default_config()
7172 (hw, pf->mac_seid, &ets_data, in i40e_dcb_sw_default_config()
7175 dev_info(&pf->pdev->dev, in i40e_dcb_sw_default_config()
7178 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_dcb_sw_default_config()
7195 * @pf: PF being configured
7200 static int i40e_init_pf_dcb(struct i40e_pf *pf) in i40e_init_pf_dcb() argument
7202 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_dcb()
7208 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { in i40e_init_pf_dcb()
7209 dev_info(&pf->pdev->dev, "DCB is not supported.\n"); in i40e_init_pf_dcb()
7213 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { in i40e_init_pf_dcb()
7214 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); in i40e_init_pf_dcb()
7215 err = i40e_dcb_sw_default_config(pf); in i40e_init_pf_dcb()
7217 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n"); in i40e_init_pf_dcb()
7220 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n"); in i40e_init_pf_dcb()
7221 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_init_pf_dcb()
7224 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
7225 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
7233 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7234 "DCBX offload is not supported or is disabled for this PF.\n"); in i40e_init_pf_dcb()
7237 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | in i40e_init_pf_dcb()
7240 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
7245 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
7247 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
7248 dev_dbg(&pf->pdev->dev, in i40e_init_pf_dcb()
7249 "DCBX offload is supported for this PF.\n"); in i40e_init_pf_dcb()
7251 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_init_pf_dcb()
7252 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); in i40e_init_pf_dcb()
7253 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; in i40e_init_pf_dcb()
7255 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7258 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_init_pf_dcb()
7274 struct i40e_pf *pf = vsi->back; in i40e_print_link_message() local
7282 new_speed = pf->hw.phy.link_info.link_speed; in i40e_print_link_message()
7298 if (pf->hw.func_caps.npar_enable && in i40e_print_link_message()
7299 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || in i40e_print_link_message()
7300 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) in i40e_print_link_message()
7304 switch (pf->hw.phy.link_info.link_speed) { in i40e_print_link_message()
7333 switch (pf->hw.fc.current_mode) { in i40e_print_link_message()
7348 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { in i40e_print_link_message()
7353 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7356 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7359 else if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7377 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { in i40e_print_link_message()
7382 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7385 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7389 if (pf->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7410 struct i40e_pf *pf = vsi->back; in i40e_up_complete() local
7413 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_up_complete()
7427 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && in i40e_up_complete()
7437 pf->fd_add_err = 0; in i40e_up_complete()
7438 pf->fd_atr_cnt = 0; in i40e_up_complete()
7445 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_up_complete()
7446 i40e_service_event_schedule(pf); in i40e_up_complete()
7460 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked() local
7462 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_vsi_reinit_locked()
7467 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_vsi_reinit_locked()
7472 * @pf: board private structure
7475 static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) in i40e_force_link_state() argument
7480 struct i40e_hw *hw = &pf->hw; in i40e_force_link_state()
7494 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7506 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7517 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) in i40e_force_link_state()
7533 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { in i40e_force_link_state()
7551 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7554 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_force_link_state()
7839 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up() local
7840 struct i40e_hw *hw = &pf->hw; in i40e_fwd_ring_up()
7892 dev_info(&pf->pdev->dev, in i40e_fwd_ring_up()
7912 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans() local
7913 struct i40e_hw *hw = &pf->hw; in i40e_setup_macvlans()
7949 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7965 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7985 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_setup_macvlans()
7999 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); in i40e_setup_macvlans()
8015 struct i40e_pf *pf = vsi->back; in i40e_fwd_add() local
8019 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_fwd_add()
8023 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_fwd_add()
8027 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { in i40e_fwd_add()
8039 /* reserve bit 0 for the pf device */ in i40e_fwd_add()
8046 vectors = pf->num_lan_msix; in i40e_fwd_add()
8048 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ in i40e_fwd_add()
8052 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ in i40e_fwd_add()
8056 /* allocate 1 Q per macvlan and 16 Qs to the PF*/ in i40e_fwd_add()
8060 /* allocate 1 Q per macvlan and 8 Qs to the PF */ in i40e_fwd_add()
8064 /* allocate 1 Q per macvlan and 1 Q to the PF */ in i40e_fwd_add()
8123 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans() local
8124 struct i40e_hw *hw = &pf->hw; in i40e_del_all_macvlans()
8160 struct i40e_pf *pf = vsi->back; in i40e_fwd_del() local
8161 struct i40e_hw *hw = &pf->hw; in i40e_fwd_del()
8181 dev_info(&pf->pdev->dev, in i40e_fwd_del()
8201 struct i40e_pf *pf = vsi->back; in i40e_setup_tc() local
8214 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
8220 if (pf->flags & I40E_FLAG_MFP_ENABLED) { in i40e_setup_tc()
8227 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
8230 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_setup_tc()
8237 if (num_tc > i40e_pf_get_num_tc(pf)) { in i40e_setup_tc()
8244 if (pf->flags & I40E_FLAG_DCB_ENABLED) { in i40e_setup_tc()
8249 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_setup_tc()
8256 pf->flags |= I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
8257 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_setup_tc()
8276 if (!hw && !i40e_is_tc_mqprio_enabled(pf)) in i40e_setup_tc()
8300 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_setup_tc()
8398 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter() local
8437 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8440 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8443 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8446 pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter()
8448 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8468 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf() local
8526 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8539 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8545 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8548 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8553 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8555 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter_big_buf()
8557 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8578 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower() local
8590 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", in i40e_parse_cls_flower()
8630 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", in i40e_parse_cls_flower()
8640 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", in i40e_parse_cls_flower()
8658 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
8682 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", in i40e_parse_cls_flower()
8692 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", in i40e_parse_cls_flower()
8699 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); in i40e_parse_cls_flower()
8716 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8738 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
8748 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8762 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8817 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower() local
8826 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); in i40e_configure_clsflower()
8830 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_configure_clsflower()
8831 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_configure_clsflower()
8834 if (pf->fdir_pf_active_filters || in i40e_configure_clsflower()
8835 (!hlist_empty(&pf->fdir_filter_list))) { in i40e_configure_clsflower()
8869 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", in i40e_configure_clsflower()
8877 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); in i40e_configure_clsflower()
8879 pf->num_cloud_filters++; in i40e_configure_clsflower()
8916 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower() local
8933 dev_err(&pf->pdev->dev, in i40e_delete_clsflower()
8936 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); in i40e_delete_clsflower()
8939 pf->num_cloud_filters--; in i40e_delete_clsflower()
8940 if (!pf->num_cloud_filters) in i40e_delete_clsflower()
8941 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_delete_clsflower()
8942 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_delete_clsflower()
8943 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_delete_clsflower()
8944 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_delete_clsflower()
8945 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_delete_clsflower()
9025 struct i40e_pf *pf = vsi->back; in i40e_open() local
9029 if (test_bit(__I40E_TESTING, pf->state) || in i40e_open()
9030 test_bit(__I40E_BAD_EEPROM, pf->state)) in i40e_open()
9035 if (i40e_force_link_state(pf, true)) in i40e_open()
9043 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
9045 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
9048 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); in i40e_open()
9087 struct i40e_pf *pf = vsi->back; in i40e_vsi_open() local
9105 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
9117 dev_driver_string(&pf->pdev->dev), in i40e_vsi_open()
9118 dev_name(&pf->pdev->dev)); in i40e_vsi_open()
9142 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
9143 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_vsi_open()
9150 * @pf: Pointer to PF
9155 static void i40e_fdir_filter_exit(struct i40e_pf *pf) in i40e_fdir_filter_exit() argument
9162 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_exit()
9167 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_fdir_filter_exit()
9171 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_fdir_filter_exit()
9173 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_fdir_filter_exit()
9177 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_fdir_filter_exit()
9179 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
9180 i40e_reset_fdir_filter_cnt(pf); in i40e_fdir_filter_exit()
9183 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, in i40e_fdir_filter_exit()
9188 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP, in i40e_fdir_filter_exit()
9193 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, in i40e_fdir_filter_exit()
9198 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP, in i40e_fdir_filter_exit()
9203 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, in i40e_fdir_filter_exit()
9208 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, in i40e_fdir_filter_exit()
9213 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, in i40e_fdir_filter_exit()
9216 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, in i40e_fdir_filter_exit()
9220 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, in i40e_fdir_filter_exit()
9223 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6, in i40e_fdir_filter_exit()
9229 * @pf: Pointer to PF
9234 static void i40e_cloud_filter_exit(struct i40e_pf *pf) in i40e_cloud_filter_exit() argument
9240 &pf->cloud_filter_list, cloud_node) { in i40e_cloud_filter_exit()
9244 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
9246 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_cloud_filter_exit()
9247 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_cloud_filter_exit()
9248 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_cloud_filter_exit()
9249 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_cloud_filter_exit()
9250 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_cloud_filter_exit()
9275 * i40e_do_reset - Start a PF or Core Reset sequence
9276 * @pf: board private structure
9281 * The essential difference in resets is that the PF Reset
9285 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) in i40e_do_reset() argument
9300 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); in i40e_do_reset()
9301 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9303 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9311 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); in i40e_do_reset()
9312 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9314 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9315 i40e_flush(&pf->hw); in i40e_do_reset()
9319 /* Request a PF Reset in i40e_do_reset()
9321 * Resets only the PF-specific registers in i40e_do_reset()
9327 dev_dbg(&pf->pdev->dev, "PFR requested\n"); in i40e_do_reset()
9328 i40e_handle_reset_warning(pf, lock_acquired); in i40e_do_reset()
9331 /* Request a PF Reset in i40e_do_reset()
9333 * Resets PF and reinitializes PFs VSI. in i40e_do_reset()
9335 i40e_prep_for_reset(pf); in i40e_do_reset()
9336 i40e_reset_and_rebuild(pf, true, lock_acquired); in i40e_do_reset()
9337 dev_info(&pf->pdev->dev, in i40e_do_reset()
9338 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? in i40e_do_reset()
9346 dev_info(&pf->pdev->dev, in i40e_do_reset()
9348 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9349 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9354 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
9360 dev_info(&pf->pdev->dev, "VSI down requested\n"); in i40e_do_reset()
9361 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9362 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9372 dev_info(&pf->pdev->dev, in i40e_do_reset()
9380 * @pf: board private structure
9384 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, in i40e_dcb_need_reconfig() argument
9399 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); in i40e_dcb_need_reconfig()
9405 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); in i40e_dcb_need_reconfig()
9410 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); in i40e_dcb_need_reconfig()
9418 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); in i40e_dcb_need_reconfig()
9426 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); in i40e_dcb_need_reconfig()
9429 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); in i40e_dcb_need_reconfig()
9435 * @pf: board private structure
9438 static int i40e_handle_lldp_event(struct i40e_pf *pf, in i40e_handle_lldp_event() argument
9443 struct i40e_hw *hw = &pf->hw; in i40e_handle_lldp_event()
9453 !(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
9455 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_handle_lldp_event()
9458 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
9464 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
9470 dev_dbg(&pf->pdev->dev, in i40e_handle_lldp_event()
9486 ret = i40e_get_dcb_config(&pf->hw); in i40e_handle_lldp_event()
9492 dev_warn(&pf->pdev->dev, in i40e_handle_lldp_event()
9494 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_handle_lldp_event()
9496 dev_info(&pf->pdev->dev, in i40e_handle_lldp_event()
9499 i40e_aq_str(&pf->hw, in i40e_handle_lldp_event()
9500 pf->hw.aq.asq_last_status)); in i40e_handle_lldp_event()
9508 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); in i40e_handle_lldp_event()
9512 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, in i40e_handle_lldp_event()
9515 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); in i40e_handle_lldp_event()
9522 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
9524 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
9526 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9528 i40e_pf_quiesce_all_vsi(pf); in i40e_handle_lldp_event()
9531 i40e_dcb_reconfigure(pf); in i40e_handle_lldp_event()
9533 ret = i40e_resume_port_tx(pf); in i40e_handle_lldp_event()
9535 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9540 /* Wait for the PF's queues to be disabled */ in i40e_handle_lldp_event()
9541 ret = i40e_pf_wait_queues_disabled(pf); in i40e_handle_lldp_event()
9543 /* Schedule PF reset to recover */ in i40e_handle_lldp_event()
9544 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_handle_lldp_event()
9545 i40e_service_event_schedule(pf); in i40e_handle_lldp_event()
9547 i40e_pf_unquiesce_all_vsi(pf); in i40e_handle_lldp_event()
9548 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_handle_lldp_event()
9549 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_handle_lldp_event()
9559 * @pf: board private structure
9563 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) in i40e_do_reset_safe() argument
9566 i40e_do_reset(pf, reset_flags, true); in i40e_do_reset_safe()
9572 * @pf: board private structure
9575 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9578 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, in i40e_handle_lan_overflow_event() argument
9585 struct i40e_hw *hw = &pf->hw; in i40e_handle_lan_overflow_event()
9589 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
9598 vf = &pf->vf[vf_id]; in i40e_handle_lan_overflow_event()
9608 * @pf: board private structure
9610 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) in i40e_get_cur_guaranteed_fd_count() argument
9614 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_cur_guaranteed_fd_count()
9620 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9621 * @pf: board private structure
9623 u32 i40e_get_current_fd_count(struct i40e_pf *pf) in i40e_get_current_fd_count() argument
9627 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_current_fd_count()
9636 * @pf: board private structure
9638 u32 i40e_get_global_fd_count(struct i40e_pf *pf) in i40e_get_global_fd_count() argument
9642 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); in i40e_get_global_fd_count()
9651 * @pf: board private structure
9653 static void i40e_reenable_fdir_sb(struct i40e_pf *pf) in i40e_reenable_fdir_sb() argument
9655 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_reenable_fdir_sb()
9656 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_reenable_fdir_sb()
9657 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_sb()
9658 …dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now… in i40e_reenable_fdir_sb()
9663 * @pf: board private structure
9665 static void i40e_reenable_fdir_atr(struct i40e_pf *pf) in i40e_reenable_fdir_atr() argument
9667 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { in i40e_reenable_fdir_atr()
9673 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, in i40e_reenable_fdir_atr()
9677 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_reenable_fdir_atr()
9678 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_atr()
9679 …dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no c… in i40e_reenable_fdir_atr()
9685 * @pf: board private structure
9688 static void i40e_delete_invalid_filter(struct i40e_pf *pf, in i40e_delete_invalid_filter() argument
9692 pf->fdir_pf_active_filters--; in i40e_delete_invalid_filter()
9693 pf->fd_inv = 0; in i40e_delete_invalid_filter()
9697 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9700 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9703 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9706 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9709 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9712 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9717 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9720 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9723 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9726 pf->fd_ip4_filter_cnt--; in i40e_delete_invalid_filter()
9733 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9736 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9739 pf->fd_sctp6_filter_cnt--; in i40e_delete_invalid_filter()
9742 pf->fd_ip6_filter_cnt--; in i40e_delete_invalid_filter()
9755 * @pf: board private structure
9757 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) in i40e_fdir_check_and_reenable() argument
9763 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_check_and_reenable()
9767 fcnt_prog = i40e_get_global_fd_count(pf); in i40e_fdir_check_and_reenable()
9768 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fdir_check_and_reenable()
9770 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
9771 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) in i40e_fdir_check_and_reenable()
9772 i40e_reenable_fdir_sb(pf); in i40e_fdir_check_and_reenable()
9779 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) in i40e_fdir_check_and_reenable()
9780 i40e_reenable_fdir_atr(pf); in i40e_fdir_check_and_reenable()
9783 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
9785 &pf->fdir_filter_list, fdir_node) in i40e_fdir_check_and_reenable()
9786 if (filter->fd_id == pf->fd_inv) in i40e_fdir_check_and_reenable()
9787 i40e_delete_invalid_filter(pf, filter); in i40e_fdir_check_and_reenable()
9795 * @pf: board private structure
9797 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) in i40e_fdir_flush_and_replay() argument
9805 if (!time_after(jiffies, pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9812 min_flush_time = pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9814 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; in i40e_fdir_flush_and_replay()
9818 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9819 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); in i40e_fdir_flush_and_replay()
9823 pf->fd_flush_timestamp = jiffies; in i40e_fdir_flush_and_replay()
9824 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9826 wr32(&pf->hw, I40E_PFQF_CTL_1, in i40e_fdir_flush_and_replay()
9828 i40e_flush(&pf->hw); in i40e_fdir_flush_and_replay()
9829 pf->fd_flush_cnt++; in i40e_fdir_flush_and_replay()
9830 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
9834 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); in i40e_fdir_flush_and_replay()
9839 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); in i40e_fdir_flush_and_replay()
9842 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
9843 if (!disable_atr && !pf->fd_tcp4_filter_cnt) in i40e_fdir_flush_and_replay()
9844 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9845 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fdir_flush_and_replay()
9846 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9847 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); in i40e_fdir_flush_and_replay()
9853 * @pf: board private structure
9855 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) in i40e_get_current_atr_cnt() argument
9857 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; in i40e_get_current_atr_cnt()
9862 * @pf: board private structure
9864 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) in i40e_fdir_reinit_subtask() argument
9868 if (test_bit(__I40E_DOWN, pf->state)) in i40e_fdir_reinit_subtask()
9871 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_reinit_subtask()
9872 i40e_fdir_flush_and_replay(pf); in i40e_fdir_reinit_subtask()
9874 i40e_fdir_check_and_reenable(pf); in i40e_fdir_reinit_subtask()
9920 struct i40e_pf *pf; in i40e_veb_link_event() local
9923 if (!veb || !veb->pf) in i40e_veb_link_event()
9925 pf = veb->pf; in i40e_veb_link_event()
9929 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9930 i40e_veb_link_event(pf->veb[i], link_up); in i40e_veb_link_event()
9933 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9934 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9935 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9940 * @pf: board private structure
9942 static void i40e_link_event(struct i40e_pf *pf) in i40e_link_event() argument
9944 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event()
9953 pf->hw.phy.get_link_info = true; in i40e_link_event()
9954 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); in i40e_link_event()
9955 status = i40e_get_link_status(&pf->hw, &new_link); in i40e_link_event()
9959 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9964 set_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9965 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", in i40e_link_event()
9970 old_link_speed = pf->hw.phy.link_info_old.link_speed; in i40e_link_event()
9971 new_link_speed = pf->hw.phy.link_info.link_speed; in i40e_link_event()
9984 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_link_event()
9985 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); in i40e_link_event()
9989 if (pf->vf) in i40e_link_event()
9990 i40e_vc_notify_link_state(pf); in i40e_link_event()
9992 if (pf->flags & I40E_FLAG_PTP) in i40e_link_event()
9993 i40e_ptp_set_increment(pf); in i40e_link_event()
9998 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) in i40e_link_event()
10005 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n"); in i40e_link_event()
10006 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); in i40e_link_event()
10007 err = i40e_dcb_sw_default_config(pf); in i40e_link_event()
10009 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_link_event()
10012 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_link_event()
10014 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_link_event()
10015 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_link_event()
10023 * @pf: board private structure
10025 static void i40e_watchdog_subtask(struct i40e_pf *pf) in i40e_watchdog_subtask() argument
10030 if (test_bit(__I40E_DOWN, pf->state) || in i40e_watchdog_subtask()
10031 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_watchdog_subtask()
10035 if (time_before(jiffies, (pf->service_timer_previous + in i40e_watchdog_subtask()
10036 pf->service_timer_period))) in i40e_watchdog_subtask()
10038 pf->service_timer_previous = jiffies; in i40e_watchdog_subtask()
10040 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || in i40e_watchdog_subtask()
10041 test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) in i40e_watchdog_subtask()
10042 i40e_link_event(pf); in i40e_watchdog_subtask()
10047 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
10048 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
10049 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
10051 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { in i40e_watchdog_subtask()
10054 if (pf->veb[i]) in i40e_watchdog_subtask()
10055 i40e_update_veb_stats(pf->veb[i]); in i40e_watchdog_subtask()
10058 i40e_ptp_rx_hang(pf); in i40e_watchdog_subtask()
10059 i40e_ptp_tx_hang(pf); in i40e_watchdog_subtask()
10064 * @pf: board private structure
10066 static void i40e_reset_subtask(struct i40e_pf *pf) in i40e_reset_subtask() argument
10070 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { in i40e_reset_subtask()
10072 clear_bit(__I40E_REINIT_REQUESTED, pf->state); in i40e_reset_subtask()
10074 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10076 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10078 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10080 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10082 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10084 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10086 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { in i40e_reset_subtask()
10088 clear_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_reset_subtask()
10094 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_reset_subtask()
10095 i40e_prep_for_reset(pf); in i40e_reset_subtask()
10096 i40e_reset(pf); in i40e_reset_subtask()
10097 i40e_rebuild(pf, false, false); in i40e_reset_subtask()
10102 !test_bit(__I40E_DOWN, pf->state) && in i40e_reset_subtask()
10103 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_reset_subtask()
10104 i40e_do_reset(pf, reset_flags, false); in i40e_reset_subtask()
10110 * @pf: board private structure
10113 static void i40e_handle_link_event(struct i40e_pf *pf, in i40e_handle_link_event() argument
10125 i40e_link_event(pf); in i40e_handle_link_event()
10129 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10131 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10140 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { in i40e_handle_link_event()
10141 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10143 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10151 * @pf: board private structure
10153 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) in i40e_clean_adminq_subtask() argument
10156 struct i40e_hw *hw = &pf->hw; in i40e_clean_adminq_subtask()
10163 /* Do not run clean AQ when PF reset fails */ in i40e_clean_adminq_subtask()
10164 if (test_bit(__I40E_RESET_FAILED, pf->state)) in i40e_clean_adminq_subtask()
10168 val = rd32(&pf->hw, pf->hw.aq.arq.len); in i40e_clean_adminq_subtask()
10172 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10177 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10179 pf->arq_overflows++; in i40e_clean_adminq_subtask()
10183 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10187 wr32(&pf->hw, pf->hw.aq.arq.len, val); in i40e_clean_adminq_subtask()
10189 val = rd32(&pf->hw, pf->hw.aq.asq.len); in i40e_clean_adminq_subtask()
10192 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10193 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10197 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10198 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10202 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10203 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10207 wr32(&pf->hw, pf->hw.aq.asq.len, val); in i40e_clean_adminq_subtask()
10219 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); in i40e_clean_adminq_subtask()
10228 i40e_handle_link_event(pf, &event); in i40e_clean_adminq_subtask()
10232 ret = i40e_vc_process_vf_msg(pf, in i40e_clean_adminq_subtask()
10240 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); in i40e_clean_adminq_subtask()
10243 i40e_handle_lldp_event(pf, &event); in i40e_clean_adminq_subtask()
10248 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); in i40e_clean_adminq_subtask()
10249 i40e_handle_lan_overflow_event(pf, &event); in i40e_clean_adminq_subtask()
10252 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); in i40e_clean_adminq_subtask()
10257 i40e_debug(&pf->hw, I40E_DEBUG_NVM, in i40e_clean_adminq_subtask()
10262 dev_info(&pf->pdev->dev, in i40e_clean_adminq_subtask()
10267 } while (i++ < pf->adminq_work_limit); in i40e_clean_adminq_subtask()
10269 if (i < pf->adminq_work_limit) in i40e_clean_adminq_subtask()
10270 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_clean_adminq_subtask()
10283 * @pf: board private structure
10285 static void i40e_verify_eeprom(struct i40e_pf *pf) in i40e_verify_eeprom() argument
10289 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10292 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10294 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", in i40e_verify_eeprom()
10296 set_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10300 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_verify_eeprom()
10301 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); in i40e_verify_eeprom()
10302 clear_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10308 * @pf: pointer to the PF structure
10312 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) in i40e_enable_pf_switch_lb() argument
10314 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb()
10318 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_switch_lb()
10319 ctxt.pf_num = pf->hw.pf_id; in i40e_enable_pf_switch_lb()
10321 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10323 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10324 "couldn't get PF vsi config, err %pe aq_err %s\n", in i40e_enable_pf_switch_lb()
10326 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10335 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10338 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10344 * @pf: pointer to the PF structure
10348 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) in i40e_disable_pf_switch_lb() argument
10350 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb()
10354 ctxt.seid = pf->main_vsi_seid; in i40e_disable_pf_switch_lb()
10355 ctxt.pf_num = pf->hw.pf_id; in i40e_disable_pf_switch_lb()
10357 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10359 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10360 "couldn't get PF vsi config, err %pe aq_err %s\n", in i40e_disable_pf_switch_lb()
10362 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10371 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10374 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10388 struct i40e_pf *pf = veb->pf; in i40e_config_bridge_mode() local
10390 if (pf->hw.debug_mask & I40E_DEBUG_LAN) in i40e_config_bridge_mode()
10391 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", in i40e_config_bridge_mode()
10394 i40e_disable_pf_switch_lb(pf); in i40e_config_bridge_mode()
10396 i40e_enable_pf_switch_lb(pf); in i40e_config_bridge_mode()
10411 struct i40e_pf *pf = veb->pf; in i40e_reconstitute_veb() local
10416 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
10417 if (pf->vsi[v] && in i40e_reconstitute_veb()
10418 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
10419 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
10420 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10425 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10430 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
10431 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
10434 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10446 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_reconstitute_veb()
10453 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
10454 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
10457 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10458 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10463 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10474 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10475 pf->veb[veb_idx]->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10476 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); in i40e_reconstitute_veb()
10488 * @pf: the PF struct
10491 static int i40e_get_capabilities(struct i40e_pf *pf, in i40e_get_capabilities() argument
10506 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, in i40e_get_capabilities()
10512 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { in i40e_get_capabilities()
10515 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { in i40e_get_capabilities()
10516 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10519 i40e_aq_str(&pf->hw, in i40e_get_capabilities()
10520 pf->hw.aq.asq_last_status)); in i40e_get_capabilities()
10525 if (pf->hw.debug_mask & I40E_DEBUG_USER) { in i40e_get_capabilities()
10527 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10528 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", in i40e_get_capabilities()
10529 pf->hw.pf_id, pf->hw.func_caps.num_vfs, in i40e_get_capabilities()
10530 pf->hw.func_caps.num_msix_vectors, in i40e_get_capabilities()
10531 pf->hw.func_caps.num_msix_vectors_vf, in i40e_get_capabilities()
10532 pf->hw.func_caps.fd_filters_guaranteed, in i40e_get_capabilities()
10533 pf->hw.func_caps.fd_filters_best_effort, in i40e_get_capabilities()
10534 pf->hw.func_caps.num_tx_qp, in i40e_get_capabilities()
10535 pf->hw.func_caps.num_vsis); in i40e_get_capabilities()
10537 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10539 pf->hw.dev_caps.switch_mode, in i40e_get_capabilities()
10540 pf->hw.dev_caps.valid_functions); in i40e_get_capabilities()
10541 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10543 pf->hw.dev_caps.sr_iov_1_1, in i40e_get_capabilities()
10544 pf->hw.dev_caps.num_vfs); in i40e_get_capabilities()
10545 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10547 pf->hw.dev_caps.num_vsis, in i40e_get_capabilities()
10548 pf->hw.dev_caps.num_rx_qp, in i40e_get_capabilities()
10549 pf->hw.dev_caps.num_tx_qp); in i40e_get_capabilities()
10553 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
10554 + pf->hw.func_caps.num_vfs) in i40e_get_capabilities()
10555 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
10556 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { in i40e_get_capabilities()
10557 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10559 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); in i40e_get_capabilities()
10560 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; in i40e_get_capabilities()
10570 * @pf: board private structure
10572 static void i40e_fdir_sb_setup(struct i40e_pf *pf) in i40e_fdir_sb_setup() argument
10579 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
10588 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); in i40e_fdir_sb_setup()
10591 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_sb_setup()
10595 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_sb_setup()
10599 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, in i40e_fdir_sb_setup()
10600 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10602 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); in i40e_fdir_sb_setup()
10603 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_fdir_sb_setup()
10604 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_fdir_sb_setup()
10614 * @pf: board private structure
10616 static void i40e_fdir_teardown(struct i40e_pf *pf) in i40e_fdir_teardown() argument
10620 i40e_fdir_filter_exit(pf); in i40e_fdir_teardown()
10621 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_teardown()
10628 * @vsi: PF main vsi
10637 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters() local
10642 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, in i40e_rebuild_cloud_filters()
10654 dev_dbg(&pf->pdev->dev, in i40e_rebuild_cloud_filters()
10657 i40e_aq_str(&pf->hw, in i40e_rebuild_cloud_filters()
10658 pf->hw.aq.asq_last_status)); in i40e_rebuild_cloud_filters()
10667 * @vsi: PF main vsi
10743 * @pf: board private structure
10745 * Close up the VFs and other things in prep for PF Reset.
10747 static void i40e_prep_for_reset(struct i40e_pf *pf) in i40e_prep_for_reset() argument
10749 struct i40e_hw *hw = &pf->hw; in i40e_prep_for_reset()
10753 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_prep_for_reset()
10754 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_prep_for_reset()
10756 if (i40e_check_asq_alive(&pf->hw)) in i40e_prep_for_reset()
10757 i40e_vc_notify_reset(pf); in i40e_prep_for_reset()
10759 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); in i40e_prep_for_reset()
10762 i40e_pf_quiesce_all_vsi(pf); in i40e_prep_for_reset()
10764 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
10765 if (pf->vsi[v]) { in i40e_prep_for_reset()
10766 i40e_clean_xps_state(pf->vsi[v]); in i40e_prep_for_reset()
10767 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10771 i40e_shutdown_adminq(&pf->hw); in i40e_prep_for_reset()
10777 dev_warn(&pf->pdev->dev, in i40e_prep_for_reset()
10784 i40e_ptp_save_hw_time(pf); in i40e_prep_for_reset()
10789 * @pf: PF struct
10791 static void i40e_send_version(struct i40e_pf *pf) in i40e_send_version() argument
10800 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); in i40e_send_version()
10849 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10850 * @pf: board private structure
10852 static int i40e_reset(struct i40e_pf *pf) in i40e_reset() argument
10854 struct i40e_hw *hw = &pf->hw; in i40e_reset()
10859 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); in i40e_reset()
10860 set_bit(__I40E_RESET_FAILED, pf->state); in i40e_reset()
10861 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_reset()
10863 pf->pfr_count++; in i40e_reset()
10870 * @pf: board private structure
10875 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) in i40e_rebuild() argument
10877 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); in i40e_rebuild()
10878 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild()
10879 struct i40e_hw *hw = &pf->hw; in i40e_rebuild()
10884 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10886 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
10888 if (test_bit(__I40E_DOWN, pf->state) && in i40e_rebuild()
10889 !test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_rebuild()
10891 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); in i40e_rebuild()
10894 ret = i40e_init_adminq(&pf->hw); in i40e_rebuild()
10896 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", in i40e_rebuild()
10898 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10901 i40e_get_oem_version(&pf->hw); in i40e_rebuild()
10903 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { in i40e_rebuild()
10909 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) in i40e_rebuild()
10910 i40e_verify_eeprom(pf); in i40e_rebuild()
10916 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_rebuild()
10917 if (i40e_get_capabilities(pf, in i40e_rebuild()
10925 if (i40e_setup_misc_vector_for_recovery_mode(pf)) in i40e_rebuild()
10934 free_irq(pf->pdev->irq, pf); in i40e_rebuild()
10935 i40e_clear_interrupt_scheme(pf); in i40e_rebuild()
10936 if (i40e_restore_interrupt_scheme(pf)) in i40e_rebuild()
10941 i40e_send_version(pf); in i40e_rebuild()
10950 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); in i40e_rebuild()
10957 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10962 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10971 if (i40e_is_tc_mqprio_enabled(pf)) { in i40e_rebuild()
10978 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10980 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10983 ret = i40e_init_pf_dcb(pf); in i40e_rebuild()
10985 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", in i40e_rebuild()
10987 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10996 ret = i40e_setup_pf_switch(pf, reinit, true); in i40e_rebuild()
11003 ret = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_rebuild()
11008 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", in i40e_rebuild()
11010 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
11017 * to recover minimal use by getting the basic PF VSI working. in i40e_rebuild()
11019 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
11020 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); in i40e_rebuild()
11023 if (!pf->veb[v]) in i40e_rebuild()
11026 if (pf->veb[v]->uplink_seid == pf->mac_seid || in i40e_rebuild()
11027 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
11028 ret = i40e_reconstitute_veb(pf->veb[v]); in i40e_rebuild()
11035 * for minimal rebuild of PF VSI. in i40e_rebuild()
11039 if (pf->veb[v]->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11040 dev_info(&pf->pdev->dev, in i40e_rebuild()
11041 "rebuild of switch failed: %d, will try to set up simple PF connection\n", in i40e_rebuild()
11043 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
11045 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
11046 dev_info(&pf->pdev->dev, in i40e_rebuild()
11054 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11055 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); in i40e_rebuild()
11059 dev_info(&pf->pdev->dev, in i40e_rebuild()
11087 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs in i40e_rebuild()
11108 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_rebuild()
11110 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_rebuild()
11112 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", in i40e_rebuild()
11114 i40e_aq_str(&pf->hw, in i40e_rebuild()
11115 pf->hw.aq.asq_last_status)); in i40e_rebuild()
11118 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_rebuild()
11119 ret = i40e_setup_misc_vector(pf); in i40e_rebuild()
11127 * PF/VF VSIs. in i40e_rebuild()
11130 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_rebuild()
11131 pf->main_vsi_seid); in i40e_rebuild()
11134 i40e_pf_unquiesce_all_vsi(pf); in i40e_rebuild()
11141 ret = i40e_set_promiscuous(pf, pf->cur_promisc); in i40e_rebuild()
11143 dev_warn(&pf->pdev->dev, in i40e_rebuild()
11145 pf->cur_promisc ? "on" : "off", in i40e_rebuild()
11147 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
11149 i40e_reset_all_vfs(pf, true); in i40e_rebuild()
11152 i40e_send_version(pf); in i40e_rebuild()
11161 clear_bit(__I40E_RESET_FAILED, pf->state); in i40e_rebuild()
11163 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11164 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11169 * @pf: board private structure
11174 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, in i40e_reset_and_rebuild() argument
11179 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reset_and_rebuild()
11185 ret = i40e_reset(pf); in i40e_reset_and_rebuild()
11187 i40e_rebuild(pf, reinit, lock_acquired); in i40e_reset_and_rebuild()
11191 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11192 * @pf: board private structure
11199 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) in i40e_handle_reset_warning() argument
11201 i40e_prep_for_reset(pf); in i40e_handle_reset_warning()
11202 i40e_reset_and_rebuild(pf, false, lock_acquired); in i40e_handle_reset_warning()
11207 * @pf: pointer to the PF structure
11211 static void i40e_handle_mdd_event(struct i40e_pf *pf) in i40e_handle_mdd_event() argument
11213 struct i40e_hw *hw = &pf->hw; in i40e_handle_mdd_event()
11219 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) in i40e_handle_mdd_event()
11233 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11234 if (netif_msg_tx_err(pf)) in i40e_handle_mdd_event()
11235 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x … in i40e_handle_mdd_event()
11248 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11249 if (netif_msg_rx_err(pf)) in i40e_handle_mdd_event()
11250 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
11260 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11265 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11270 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
11271 vf = &(pf->vf[i]); in i40e_handle_mdd_event()
11276 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11278 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11279 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11287 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11289 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11290 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11296 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_handle_mdd_event()
11309 struct i40e_pf *pf = container_of(work, in i40e_service_task() local
11315 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_service_task()
11316 test_bit(__I40E_SUSPENDED, pf->state)) in i40e_service_task()
11319 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) in i40e_service_task()
11322 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_service_task()
11323 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
11324 i40e_sync_filters_subtask(pf); in i40e_service_task()
11325 i40e_reset_subtask(pf); in i40e_service_task()
11326 i40e_handle_mdd_event(pf); in i40e_service_task()
11327 i40e_vc_process_vflr_event(pf); in i40e_service_task()
11328 i40e_watchdog_subtask(pf); in i40e_service_task()
11329 i40e_fdir_reinit_subtask(pf); in i40e_service_task()
11330 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { in i40e_service_task()
11332 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
11335 i40e_client_subtask(pf); in i40e_service_task()
11337 pf->state)) in i40e_service_task()
11339 pf->vsi[pf->lan_vsi]); in i40e_service_task()
11341 i40e_sync_filters_subtask(pf); in i40e_service_task()
11343 i40e_reset_subtask(pf); in i40e_service_task()
11346 i40e_clean_adminq_subtask(pf); in i40e_service_task()
11350 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_service_task()
11356 if (time_after(jiffies, (start_time + pf->service_timer_period)) || in i40e_service_task()
11357 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || in i40e_service_task()
11358 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || in i40e_service_task()
11359 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) in i40e_service_task()
11360 i40e_service_event_schedule(pf); in i40e_service_task()
11369 struct i40e_pf *pf = from_timer(pf, t, service_timer); in i40e_service_timer() local
11371 mod_timer(&pf->service_timer, in i40e_service_timer()
11372 round_jiffies(jiffies + pf->service_timer_period)); in i40e_service_timer()
11373 i40e_service_event_schedule(pf); in i40e_service_timer()
11382 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi() local
11386 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
11393 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_set_num_rings_in_vsi()
11394 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
11406 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
11410 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
11417 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11421 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11487 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11488 * @pf: board private structure
11492 * On success: returns vsi index in PF (positive)
11494 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) in i40e_vsi_mem_alloc() argument
11501 /* Need to protect the allocation of the VSIs at the PF level */ in i40e_vsi_mem_alloc()
11502 mutex_lock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11510 i = pf->next_vsi; in i40e_vsi_mem_alloc()
11511 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11513 if (i >= pf->num_alloc_vsi) { in i40e_vsi_mem_alloc()
11515 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11519 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11525 pf->next_vsi = ++i; in i40e_vsi_mem_alloc()
11533 vsi->back = pf; in i40e_vsi_mem_alloc()
11539 pf->rss_table_size : 64; in i40e_vsi_mem_alloc()
11546 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11564 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11570 pf->next_vsi = i - 1; in i40e_vsi_mem_alloc()
11573 mutex_unlock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11621 struct i40e_pf *pf; in i40e_vsi_clear() local
11628 pf = vsi->back; in i40e_vsi_clear()
11630 mutex_lock(&pf->switch_mutex); in i40e_vsi_clear()
11631 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11632 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", in i40e_vsi_clear()
11637 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11638 dev_err(&pf->pdev->dev, in i40e_vsi_clear()
11639 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", in i40e_vsi_clear()
11640 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11641 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11646 /* updates the PF for this cleared vsi */ in i40e_vsi_clear()
11647 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11648 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11654 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11655 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11656 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11659 mutex_unlock(&pf->switch_mutex); in i40e_vsi_clear()
11692 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings() local
11707 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11713 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11724 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11731 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11740 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11744 ring->itr_setting = pf->rx_itr_default; in i40e_alloc_rings()
11757 * @pf: board private structure
11762 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) in i40e_reserve_msix_vectors() argument
11764 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, in i40e_reserve_msix_vectors()
11767 dev_info(&pf->pdev->dev, in i40e_reserve_msix_vectors()
11777 * @pf: board private structure
11783 static int i40e_init_msix(struct i40e_pf *pf) in i40e_init_msix() argument
11785 struct i40e_hw *hw = &pf->hw; in i40e_init_msix()
11792 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_init_msix()
11819 /* reserve some vectors for the main PF traffic queues. Initially we in i40e_init_msix()
11827 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); in i40e_init_msix()
11828 vectors_left -= pf->num_lan_msix; in i40e_init_msix()
11831 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11833 pf->num_fdsb_msix = 1; in i40e_init_msix()
11837 pf->num_fdsb_msix = 0; in i40e_init_msix()
11842 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11843 iwarp_requested = pf->num_iwarp_msix; in i40e_init_msix()
11846 pf->num_iwarp_msix = 0; in i40e_init_msix()
11847 else if (vectors_left < pf->num_iwarp_msix) in i40e_init_msix()
11848 pf->num_iwarp_msix = 1; in i40e_init_msix()
11849 v_budget += pf->num_iwarp_msix; in i40e_init_msix()
11850 vectors_left -= pf->num_iwarp_msix; in i40e_init_msix()
11854 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { in i40e_init_msix()
11856 pf->num_vmdq_msix = 0; in i40e_init_msix()
11857 pf->num_vmdq_qps = 0; in i40e_init_msix()
11860 pf->num_vmdq_vsis * pf->num_vmdq_qps; in i40e_init_msix()
11867 * queues/vectors used by the PF later with the ethtool in i40e_init_msix()
11871 pf->num_vmdq_qps = 1; in i40e_init_msix()
11872 vmdq_vecs_wanted = pf->num_vmdq_vsis; in i40e_init_msix()
11877 pf->num_vmdq_msix = pf->num_vmdq_qps; in i40e_init_msix()
11893 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); in i40e_init_msix()
11894 pf->num_lan_msix += extra_vectors; in i40e_init_msix()
11900 v_budget += pf->num_lan_msix; in i40e_init_msix()
11901 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), in i40e_init_msix()
11903 if (!pf->msix_entries) in i40e_init_msix()
11907 pf->msix_entries[i].entry = i; in i40e_init_msix()
11908 v_actual = i40e_reserve_msix_vectors(pf, v_budget); in i40e_init_msix()
11911 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; in i40e_init_msix()
11912 kfree(pf->msix_entries); in i40e_init_msix()
11913 pf->msix_entries = NULL; in i40e_init_msix()
11914 pci_disable_msix(pf->pdev); in i40e_init_msix()
11919 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11920 pf->num_vmdq_qps = 0; in i40e_init_msix()
11921 pf->num_lan_qps = 1; in i40e_init_msix()
11922 pf->num_lan_msix = 1; in i40e_init_msix()
11932 dev_info(&pf->pdev->dev, in i40e_init_msix()
11939 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ in i40e_init_msix()
11940 pf->num_vmdq_vsis = 1; in i40e_init_msix()
11941 pf->num_vmdq_qps = 1; in i40e_init_msix()
11946 pf->num_lan_msix = 1; in i40e_init_msix()
11949 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11950 pf->num_lan_msix = 1; in i40e_init_msix()
11951 pf->num_iwarp_msix = 1; in i40e_init_msix()
11953 pf->num_lan_msix = 2; in i40e_init_msix()
11957 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11958 pf->num_iwarp_msix = min_t(int, (vec / 3), in i40e_init_msix()
11960 pf->num_vmdq_vsis = min_t(int, (vec / 3), in i40e_init_msix()
11963 pf->num_vmdq_vsis = min_t(int, (vec / 2), in i40e_init_msix()
11966 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11967 pf->num_fdsb_msix = 1; in i40e_init_msix()
11970 pf->num_lan_msix = min_t(int, in i40e_init_msix()
11971 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), in i40e_init_msix()
11972 pf->num_lan_msix); in i40e_init_msix()
11973 pf->num_lan_qps = pf->num_lan_msix; in i40e_init_msix()
11978 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_init_msix()
11979 (pf->num_fdsb_msix == 0)) { in i40e_init_msix()
11980 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11981 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_init_msix()
11982 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_msix()
11984 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_init_msix()
11985 (pf->num_vmdq_msix == 0)) { in i40e_init_msix()
11986 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11987 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; in i40e_init_msix()
11990 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_init_msix()
11991 (pf->num_iwarp_msix == 0)) { in i40e_init_msix()
11992 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11993 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_init_msix()
11995 i40e_debug(&pf->hw, I40E_DEBUG_INIT, in i40e_init_msix()
11996 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", in i40e_init_msix()
11997 pf->num_lan_msix, in i40e_init_msix()
11998 pf->num_vmdq_msix * pf->num_vmdq_vsis, in i40e_init_msix()
11999 pf->num_fdsb_msix, in i40e_init_msix()
12000 pf->num_iwarp_msix); in i40e_init_msix()
12043 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors() local
12047 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_alloc_q_vectors()
12049 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
12071 * @pf: board private structure to initialize
12073 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) in i40e_init_interrupt_scheme() argument
12078 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_init_interrupt_scheme()
12079 vectors = i40e_init_msix(pf); in i40e_init_interrupt_scheme()
12081 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | in i40e_init_interrupt_scheme()
12090 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_interrupt_scheme()
12093 i40e_determine_queue_usage(pf); in i40e_init_interrupt_scheme()
12097 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_init_interrupt_scheme()
12098 (pf->flags & I40E_FLAG_MSI_ENABLED)) { in i40e_init_interrupt_scheme()
12099 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); in i40e_init_interrupt_scheme()
12100 vectors = pci_enable_msi(pf->pdev); in i40e_init_interrupt_scheme()
12102 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", in i40e_init_interrupt_scheme()
12104 pf->flags &= ~I40E_FLAG_MSI_ENABLED; in i40e_init_interrupt_scheme()
12109 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) in i40e_init_interrupt_scheme()
12110 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); in i40e_init_interrupt_scheme()
12114 pf->irq_pile = kzalloc(size, GFP_KERNEL); in i40e_init_interrupt_scheme()
12115 if (!pf->irq_pile) in i40e_init_interrupt_scheme()
12118 pf->irq_pile->num_entries = vectors; in i40e_init_interrupt_scheme()
12121 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); in i40e_init_interrupt_scheme()
12128 * @pf: private board data structure
12134 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) in i40e_restore_interrupt_scheme() argument
12142 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_restore_interrupt_scheme()
12144 err = i40e_init_interrupt_scheme(pf); in i40e_restore_interrupt_scheme()
12151 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
12152 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
12153 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12156 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12160 err = i40e_setup_misc_vector(pf); in i40e_restore_interrupt_scheme()
12164 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_restore_interrupt_scheme()
12165 i40e_client_update_msix_info(pf); in i40e_restore_interrupt_scheme()
12171 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
12172 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12181 * @pf: board private structure
12188 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) in i40e_setup_misc_vector_for_recovery_mode() argument
12192 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_setup_misc_vector_for_recovery_mode()
12193 err = i40e_setup_misc_vector(pf); in i40e_setup_misc_vector_for_recovery_mode()
12196 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12202 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
12204 err = request_irq(pf->pdev->irq, i40e_intr, flags, in i40e_setup_misc_vector_for_recovery_mode()
12205 pf->int_name, pf); in i40e_setup_misc_vector_for_recovery_mode()
12208 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12213 i40e_enable_misc_int_causes(pf); in i40e_setup_misc_vector_for_recovery_mode()
12214 i40e_irq_dynamic_enable_icr0(pf); in i40e_setup_misc_vector_for_recovery_mode()
12222 * @pf: board private structure
12228 static int i40e_setup_misc_vector(struct i40e_pf *pf) in i40e_setup_misc_vector() argument
12230 struct i40e_hw *hw = &pf->hw; in i40e_setup_misc_vector()
12234 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { in i40e_setup_misc_vector()
12235 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
12236 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
12238 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_setup_misc_vector()
12239 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector()
12241 pf->int_name, err); in i40e_setup_misc_vector()
12246 i40e_enable_misc_int_causes(pf); in i40e_setup_misc_vector()
12254 i40e_irq_dynamic_enable_icr0(pf); in i40e_setup_misc_vector()
12271 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq() local
12272 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_aq()
12279 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12282 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12283 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12293 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12296 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12297 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12317 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg() local
12318 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_reg()
12333 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); in i40e_config_rss_reg()
12351 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_config_rss_reg()
12371 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg() local
12372 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_reg()
12404 struct i40e_pf *pf = vsi->back; in i40e_config_rss() local
12406 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_config_rss()
12423 struct i40e_pf *pf = vsi->back; in i40e_get_rss() local
12425 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_get_rss()
12433 * @pf: Pointer to board private structure
12438 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, in i40e_fill_rss_lut() argument
12449 * @pf: board private structure
12451 static int i40e_pf_config_rss(struct i40e_pf *pf) in i40e_pf_config_rss() argument
12453 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss()
12456 struct i40e_hw *hw = &pf->hw; in i40e_pf_config_rss()
12464 hena |= i40e_pf_get_default_rss_hena(pf); in i40e_pf_config_rss()
12471 reg_val = (pf->rss_table_size == 512) ? in i40e_pf_config_rss()
12486 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12499 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12516 * @pf: board private structure
12523 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) in i40e_reconfig_rss_queues() argument
12525 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues()
12528 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_reconfig_rss_queues()
12532 new_rss_size = min_t(int, queue_count, pf->rss_size_max); in i40e_reconfig_rss_queues()
12538 i40e_prep_for_reset(pf); in i40e_reconfig_rss_queues()
12539 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reconfig_rss_queues()
12540 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12542 pf->alloc_rss_size = new_rss_size; in i40e_reconfig_rss_queues()
12544 i40e_reset_and_rebuild(pf, true, true); in i40e_reconfig_rss_queues()
12551 dev_dbg(&pf->pdev->dev, in i40e_reconfig_rss_queues()
12557 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12559 i40e_pf_config_rss(pf); in i40e_reconfig_rss_queues()
12561 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", in i40e_reconfig_rss_queues()
12562 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12563 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12567 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12568 * @pf: board private structure
12570 int i40e_get_partition_bw_setting(struct i40e_pf *pf) in i40e_get_partition_bw_setting() argument
12576 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, in i40e_get_partition_bw_setting()
12581 pf->min_bw = min_bw; in i40e_get_partition_bw_setting()
12583 pf->max_bw = max_bw; in i40e_get_partition_bw_setting()
12590 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12591 * @pf: board private structure
12593 int i40e_set_partition_bw_setting(struct i40e_pf *pf) in i40e_set_partition_bw_setting() argument
12600 /* Set the valid bit for this PF */ in i40e_set_partition_bw_setting()
12601 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); in i40e_set_partition_bw_setting()
12602 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12603 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12606 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); in i40e_set_partition_bw_setting()
12612 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12613 * @pf: board private structure
12615 int i40e_commit_partition_bw_setting(struct i40e_pf *pf) in i40e_commit_partition_bw_setting() argument
12622 if (pf->hw.partition_id != 1) { in i40e_commit_partition_bw_setting()
12623 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12625 pf->hw.partition_id); in i40e_commit_partition_bw_setting()
12631 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); in i40e_commit_partition_bw_setting()
12632 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12634 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12637 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12642 ret = i40e_aq_read_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12649 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12650 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12652 dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", in i40e_commit_partition_bw_setting()
12654 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12662 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); in i40e_commit_partition_bw_setting()
12663 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12665 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12668 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12675 ret = i40e_aq_update_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12682 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12683 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12685 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12688 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12696 * if total port shutdown feature is enabled for this PF
12697 * @pf: board private structure
12699 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) in i40e_is_total_port_shutdown_enabled() argument
12714 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12719 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12726 read_status = i40e_read_nvm_module_data(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12734 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); in i40e_is_total_port_shutdown_enabled()
12740 dev_warn(&pf->pdev->dev, in i40e_is_total_port_shutdown_enabled()
12748 * @pf: board private structure to initialize
12754 static int i40e_sw_init(struct i40e_pf *pf) in i40e_sw_init() argument
12761 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | in i40e_sw_init()
12766 pf->rx_itr_default = I40E_ITR_RX_DEF; in i40e_sw_init()
12767 pf->tx_itr_default = I40E_ITR_TX_DEF; in i40e_sw_init()
12769 /* Depending on PF configurations, it is possible that the RSS in i40e_sw_init()
12772 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); in i40e_sw_init()
12773 pf->alloc_rss_size = 1; in i40e_sw_init()
12774 pf->rss_table_size = pf->hw.func_caps.rss_table_size; in i40e_sw_init()
12775 pf->rss_size_max = min_t(int, pf->rss_size_max, in i40e_sw_init()
12776 pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12780 pf->rss_size_max = min_t(int, pf->rss_size_max, pow); in i40e_sw_init()
12782 if (pf->hw.func_caps.rss) { in i40e_sw_init()
12783 pf->flags |= I40E_FLAG_RSS_ENABLED; in i40e_sw_init()
12784 pf->alloc_rss_size = min_t(int, pf->rss_size_max, in i40e_sw_init()
12789 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { in i40e_sw_init()
12790 pf->flags |= I40E_FLAG_MFP_ENABLED; in i40e_sw_init()
12791 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); in i40e_sw_init()
12792 if (i40e_get_partition_bw_setting(pf)) { in i40e_sw_init()
12793 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12796 dev_info(&pf->pdev->dev, in i40e_sw_init()
12798 pf->min_bw, pf->max_bw); in i40e_sw_init()
12801 i40e_set_partition_bw_setting(pf); in i40e_sw_init()
12805 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
12806 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
12807 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; in i40e_sw_init()
12808 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; in i40e_sw_init()
12809 if (pf->flags & I40E_FLAG_MFP_ENABLED && in i40e_sw_init()
12810 pf->hw.num_partitions > 1) in i40e_sw_init()
12811 dev_info(&pf->pdev->dev, in i40e_sw_init()
12814 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_sw_init()
12815 pf->fdir_pf_filter_count = in i40e_sw_init()
12816 pf->hw.func_caps.fd_filters_guaranteed; in i40e_sw_init()
12817 pf->hw.fdir_shared_filter_count = in i40e_sw_init()
12818 pf->hw.func_caps.fd_filters_best_effort; in i40e_sw_init()
12821 if (pf->hw.mac.type == I40E_MAC_X722) { in i40e_sw_init()
12822 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | in i40e_sw_init()
12835 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != in i40e_sw_init()
12837 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12839 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; in i40e_sw_init()
12841 } else if ((pf->hw.aq.api_maj_ver > 1) || in i40e_sw_init()
12842 ((pf->hw.aq.api_maj_ver == 1) && in i40e_sw_init()
12843 (pf->hw.aq.api_min_ver > 4))) { in i40e_sw_init()
12845 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; in i40e_sw_init()
12849 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) in i40e_sw_init()
12850 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; in i40e_sw_init()
12852 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12853 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || in i40e_sw_init()
12854 (pf->hw.aq.fw_maj_ver < 4))) { in i40e_sw_init()
12855 pf->hw_features |= I40E_HW_RESTART_AUTONEG; in i40e_sw_init()
12857 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; in i40e_sw_init()
12861 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12862 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || in i40e_sw_init()
12863 (pf->hw.aq.fw_maj_ver < 4))) in i40e_sw_init()
12864 pf->hw_features |= I40E_HW_STOP_FW_LLDP; in i40e_sw_init()
12867 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12868 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || in i40e_sw_init()
12869 (pf->hw.aq.fw_maj_ver >= 5))) in i40e_sw_init()
12870 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; in i40e_sw_init()
12873 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12874 pf->hw.aq.fw_maj_ver >= 6) in i40e_sw_init()
12875 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; in i40e_sw_init()
12877 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
12878 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; in i40e_sw_init()
12879 pf->flags |= I40E_FLAG_VMDQ_ENABLED; in i40e_sw_init()
12880 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); in i40e_sw_init()
12883 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { in i40e_sw_init()
12884 pf->flags |= I40E_FLAG_IWARP_ENABLED; in i40e_sw_init()
12886 pf->num_iwarp_msix = (int)num_online_cpus() + 1; in i40e_sw_init()
12893 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12894 pf->hw.func_caps.npar_enable && in i40e_sw_init()
12895 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) in i40e_sw_init()
12896 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; in i40e_sw_init()
12899 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { in i40e_sw_init()
12900 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; in i40e_sw_init()
12901 pf->flags |= I40E_FLAG_SRIOV_ENABLED; in i40e_sw_init()
12902 pf->num_req_vfs = min_t(int, in i40e_sw_init()
12903 pf->hw.func_caps.num_vfs, in i40e_sw_init()
12907 pf->eeprom_version = 0xDEAD; in i40e_sw_init()
12908 pf->lan_veb = I40E_NO_VEB; in i40e_sw_init()
12909 pf->lan_vsi = I40E_NO_VSI; in i40e_sw_init()
12912 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; in i40e_sw_init()
12916 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12917 pf->qp_pile = kzalloc(size, GFP_KERNEL); in i40e_sw_init()
12918 if (!pf->qp_pile) { in i40e_sw_init()
12922 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; in i40e_sw_init()
12924 pf->tx_timeout_recovery_level = 1; in i40e_sw_init()
12926 if (pf->hw.mac.type != I40E_MAC_X722 && in i40e_sw_init()
12927 i40e_is_total_port_shutdown_enabled(pf)) { in i40e_sw_init()
12931 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | in i40e_sw_init()
12933 dev_info(&pf->pdev->dev, in i40e_sw_init()
12936 mutex_init(&pf->switch_mutex); in i40e_sw_init()
12944 * @pf: board private structure to initialize
12949 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) in i40e_set_ntuple() argument
12958 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_set_ntuple()
12963 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12964 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12965 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12969 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_set_ntuple()
12971 i40e_fdir_filter_exit(pf); in i40e_set_ntuple()
12973 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12974 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); in i40e_set_ntuple()
12975 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12978 pf->fd_add_err = 0; in i40e_set_ntuple()
12979 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12981 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_set_ntuple()
12982 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_set_ntuple()
12983 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_set_ntuple()
12984 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); in i40e_set_ntuple()
12995 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut() local
12996 struct i40e_hw *hw = &pf->hw; in i40e_clear_rss_lut()
13007 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_clear_rss_lut()
13012 * i40e_set_loopback - turn on/off loopback mode on underlying PF
13045 struct i40e_pf *pf = vsi->back; in i40e_set_features() local
13049 i40e_pf_config_rss(pf); in i40e_set_features()
13060 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) { in i40e_set_features()
13061 dev_err(&pf->pdev->dev, in i40e_set_features()
13069 need_reset = i40e_set_ntuple(pf, features); in i40e_set_features()
13072 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_set_features()
13128 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id() local
13129 struct i40e_hw *hw = &pf->hw; in i40e_get_phys_port_id()
13131 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) in i40e_get_phys_port_id()
13157 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add() local
13160 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) in i40e_ndo_fdb_add()
13200 * is to change the mode then that requires a PF reset to
13213 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink() local
13218 /* Only for PF VSI for now */ in i40e_ndo_bridge_setlink()
13219 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
13222 /* Find the HW bridge for PF VSI */ in i40e_ndo_bridge_setlink()
13224 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
13225 veb = pf->veb[i]; in i40e_ndo_bridge_setlink()
13245 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13260 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
13262 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
13263 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_ndo_bridge_setlink()
13290 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink() local
13294 /* Only for PF VSI for now */ in i40e_ndo_bridge_getlink()
13295 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
13298 /* Find the HW bridge for the PF VSI */ in i40e_ndo_bridge_getlink()
13300 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
13301 veb = pf->veb[i]; in i40e_ndo_bridge_getlink()
13379 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup() local
13385 if (prog && test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_xdp_setup()
13397 i40e_prep_for_reset(pf); in i40e_xdp_setup()
13407 i40e_reset_and_rebuild(pf, true, true); in i40e_xdp_setup()
13446 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf() local
13449 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_enter_busy_conf()
13465 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf() local
13467 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_exit_busy_conf()
13540 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings() local
13544 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13547 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13553 i40e_control_rx_q(pf, pf_q, enable); in i40e_queue_pair_toggle_rings()
13554 ret = i40e_pf_rxq_wait(pf, pf_q, enable); in i40e_queue_pair_toggle_rings()
13556 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13571 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13575 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13591 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq() local
13592 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_enable_irq()
13595 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_queue_pair_enable_irq()
13598 i40e_irq_dynamic_enable_icr0(pf); in i40e_queue_pair_enable_irq()
13611 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq() local
13612 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_disable_irq()
13620 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_queue_pair_disable_irq()
13625 synchronize_irq(pf->msix_entries[intpf].vector); in i40e_queue_pair_disable_irq()
13631 synchronize_irq(pf->pdev->irq); in i40e_queue_pair_disable_irq()
13766 struct i40e_pf *pf = vsi->back; in i40e_config_netdev() local
13767 struct i40e_hw *hw = &pf->hw; in i40e_config_netdev()
13805 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) in i40e_config_netdev()
13808 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; in i40e_config_netdev()
13841 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_config_netdev()
13852 SET_NETDEV_DEV(netdev, &pf->pdev->dev); in i40e_config_netdev()
13882 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
13954 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb() local
13960 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13962 dev_info(&pf->pdev->dev, in i40e_is_vsi_uplink_mode_veb()
13989 struct i40e_pf *pf = vsi->back; in i40e_add_vsi() local
13990 struct i40e_hw *hw = &pf->hw; in i40e_add_vsi()
14002 /* The PF's main VSI is already setup as part of the in i40e_add_vsi()
14007 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
14008 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
14010 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_add_vsi()
14013 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14014 "couldn't get PF vsi config, err %pe aq_err %s\n", in i40e_add_vsi()
14016 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14017 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14026 enabled_tc = i40e_pf_get_tc_map(pf); in i40e_add_vsi()
14032 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { in i40e_add_vsi()
14034 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
14035 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
14043 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14046 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14047 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14054 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && in i40e_add_vsi()
14055 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ in i40e_add_vsi()
14057 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
14058 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
14063 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14066 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14067 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14078 * For MFP case the iSCSI PF would use this in i40e_add_vsi()
14086 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14090 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14091 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14102 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && in i40e_add_vsi()
14160 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
14185 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14186 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14208 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_add_vsi()
14214 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14217 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14237 struct i40e_pf *pf; in i40e_vsi_release() local
14241 pf = vsi->back; in i40e_vsi_release()
14245 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", in i40e_vsi_release()
14249 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
14250 !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
14251 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); in i40e_vsi_release()
14302 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
14303 if (pf->vsi[i] && in i40e_vsi_release()
14304 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
14305 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
14310 if (!pf->veb[i]) in i40e_vsi_release()
14312 if (pf->veb[i]->uplink_seid == uplink_seid) in i40e_vsi_release()
14314 if (pf->veb[i]->seid == uplink_seid) in i40e_vsi_release()
14315 veb = pf->veb[i]; in i40e_vsi_release()
14336 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors() local
14339 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", in i40e_vsi_setup_vectors()
14345 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", in i40e_vsi_setup_vectors()
14352 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14362 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_vsi_setup_vectors()
14365 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
14368 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14392 struct i40e_pf *pf; in i40e_vsi_reinit_setup() local
14399 pf = vsi->back; in i40e_vsi_reinit_setup()
14401 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
14413 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
14415 dev_info(&pf->pdev->dev, in i40e_vsi_reinit_setup()
14425 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
14426 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
14427 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
14428 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
14430 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
14449 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
14457 * @pf: board private structure
14468 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, in i40e_vsi_setup() argument
14478 * - the PF's port seid in i40e_vsi_setup()
14479 * no VEB is needed because this is the PF in i40e_vsi_setup()
14485 * - seid of the PF VSI, which is what creates the first VEB in i40e_vsi_setup()
14491 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { in i40e_vsi_setup()
14492 veb = pf->veb[i]; in i40e_vsi_setup()
14497 if (!veb && uplink_seid != pf->mac_seid) { in i40e_vsi_setup()
14499 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
14500 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
14501 vsi = pf->vsi[i]; in i40e_vsi_setup()
14506 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", in i40e_vsi_setup()
14511 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14512 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14515 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14518 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
14527 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_vsi_setup()
14529 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_vsi_setup()
14534 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
14535 veb = pf->veb[i]; in i40e_vsi_setup()
14538 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); in i40e_vsi_setup()
14547 v_idx = i40e_vsi_mem_alloc(pf, type); in i40e_vsi_setup()
14550 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14557 pf->lan_vsi = v_idx; in i40e_vsi_setup()
14564 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14566 dev_info(&pf->pdev->dev, in i40e_vsi_setup()
14619 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && in i40e_vsi_setup()
14635 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14652 struct i40e_pf *pf = veb->pf; in i40e_veb_get_bw_info() local
14653 struct i40e_hw *hw = &pf->hw; in i40e_veb_get_bw_info()
14661 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14664 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14671 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14674 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14696 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14697 * @pf: board private structure
14700 * On success: returns vsi index in PF (positive)
14702 static int i40e_veb_mem_alloc(struct i40e_pf *pf) in i40e_veb_mem_alloc() argument
14708 /* Need to protect the allocation of switch elements at the PF level */ in i40e_veb_mem_alloc()
14709 mutex_lock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14718 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) in i40e_veb_mem_alloc()
14730 veb->pf = pf; in i40e_veb_mem_alloc()
14734 pf->veb[i] = veb; in i40e_veb_mem_alloc()
14737 mutex_unlock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14750 struct i40e_pf *pf = branch->pf; in i40e_switch_branch_release() local
14757 if (!pf->veb[i]) in i40e_switch_branch_release()
14759 if (pf->veb[i]->uplink_seid == branch->seid) in i40e_switch_branch_release()
14760 i40e_switch_branch_release(pf->veb[i]); in i40e_switch_branch_release()
14768 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
14769 if (!pf->vsi[i]) in i40e_switch_branch_release()
14771 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
14772 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14773 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
14782 if (pf->veb[veb_idx]) in i40e_switch_branch_release()
14783 i40e_veb_release(pf->veb[veb_idx]); in i40e_switch_branch_release()
14795 if (veb->pf) { in i40e_veb_clear()
14796 struct i40e_pf *pf = veb->pf; in i40e_veb_clear() local
14798 mutex_lock(&pf->switch_mutex); in i40e_veb_clear()
14799 if (pf->veb[veb->idx] == veb) in i40e_veb_clear()
14800 pf->veb[veb->idx] = NULL; in i40e_veb_clear()
14801 mutex_unlock(&pf->switch_mutex); in i40e_veb_clear()
14814 struct i40e_pf *pf; in i40e_veb_release() local
14817 pf = veb->pf; in i40e_veb_release()
14820 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
14821 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
14823 vsi = pf->vsi[i]; in i40e_veb_release()
14827 dev_info(&pf->pdev->dev, in i40e_veb_release()
14837 if (veb->uplink_seid == pf->mac_seid) in i40e_veb_release()
14843 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
14844 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
14847 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_veb_release()
14858 struct i40e_pf *pf = veb->pf; in i40e_add_veb() local
14859 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); in i40e_add_veb()
14862 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
14868 dev_info(&pf->pdev->dev, in i40e_add_veb()
14871 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14876 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, in i40e_add_veb()
14879 dev_info(&pf->pdev->dev, in i40e_add_veb()
14882 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14887 dev_info(&pf->pdev->dev, in i40e_add_veb()
14890 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14891 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_add_veb()
14904 * @pf: board private structure
14918 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, in i40e_veb_setup() argument
14929 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14936 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
14937 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
14939 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
14940 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", in i40e_veb_setup()
14945 if (uplink_seid && uplink_seid != pf->mac_seid) { in i40e_veb_setup()
14947 if (pf->veb[veb_idx] && in i40e_veb_setup()
14948 pf->veb[veb_idx]->seid == uplink_seid) { in i40e_veb_setup()
14949 uplink_veb = pf->veb[veb_idx]; in i40e_veb_setup()
14954 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14961 veb_idx = i40e_veb_mem_alloc(pf); in i40e_veb_setup()
14964 veb = pf->veb[veb_idx]; in i40e_veb_setup()
14971 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
14974 if (vsi_idx == pf->lan_vsi) in i40e_veb_setup()
14975 pf->lan_veb = veb->idx; in i40e_veb_setup()
14986 * i40e_setup_pf_switch_element - set PF vars based on switch type
14987 * @pf: board private structure
14994 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, in i40e_setup_pf_switch_element() argument
15004 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
15010 pf->mac_seid = seid; in i40e_setup_pf_switch_element()
15014 if (uplink_seid != pf->mac_seid) in i40e_setup_pf_switch_element()
15016 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
15021 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { in i40e_setup_pf_switch_element()
15022 pf->lan_veb = v; in i40e_setup_pf_switch_element()
15026 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
15027 v = i40e_veb_mem_alloc(pf); in i40e_setup_pf_switch_element()
15030 pf->lan_veb = v; in i40e_setup_pf_switch_element()
15033 if (pf->lan_veb >= I40E_MAX_VEB) in i40e_setup_pf_switch_element()
15036 pf->veb[pf->lan_veb]->seid = seid; in i40e_setup_pf_switch_element()
15037 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; in i40e_setup_pf_switch_element()
15038 pf->veb[pf->lan_veb]->pf = pf; in i40e_setup_pf_switch_element()
15039 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; in i40e_setup_pf_switch_element()
15045 * the PF's VSI in i40e_setup_pf_switch_element()
15047 pf->mac_seid = uplink_seid; in i40e_setup_pf_switch_element()
15048 pf->pf_seid = downlink_seid; in i40e_setup_pf_switch_element()
15049 pf->main_vsi_seid = seid; in i40e_setup_pf_switch_element()
15051 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
15053 pf->pf_seid, pf->main_vsi_seid); in i40e_setup_pf_switch_element()
15064 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", in i40e_setup_pf_switch_element()
15072 * @pf: board private structure
15078 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) in i40e_fetch_switch_configuration() argument
15094 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, in i40e_fetch_switch_configuration()
15098 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15101 i40e_aq_str(&pf->hw, in i40e_fetch_switch_configuration()
15102 pf->hw.aq.asq_last_status)); in i40e_fetch_switch_configuration()
15111 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15119 i40e_setup_pf_switch_element(pf, ele, num_reported, in i40e_fetch_switch_configuration()
15130 * @pf: board private structure
15136 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) in i40e_setup_pf_switch() argument
15142 ret = i40e_fetch_switch_configuration(pf, false); in i40e_setup_pf_switch()
15144 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15147 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15150 i40e_pf_reset_stats(pf); in i40e_setup_pf_switch()
15158 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
15159 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { in i40e_setup_pf_switch()
15161 pf->last_sw_conf_flags = flags; in i40e_setup_pf_switch()
15164 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
15168 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
15170 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_setup_pf_switch()
15171 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15174 i40e_aq_str(&pf->hw, in i40e_setup_pf_switch()
15175 pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15178 pf->last_sw_conf_valid_flags = valid_flags; in i40e_setup_pf_switch()
15182 if (pf->lan_vsi == I40E_NO_VSI || reinit) { in i40e_setup_pf_switch()
15186 /* Set up the PF VSI associated with the PF's main VSI in i40e_setup_pf_switch()
15189 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_setup_pf_switch()
15190 uplink_seid = pf->veb[pf->lan_veb]->seid; in i40e_setup_pf_switch()
15192 uplink_seid = pf->mac_seid; in i40e_setup_pf_switch()
15193 if (pf->lan_vsi == I40E_NO_VSI) in i40e_setup_pf_switch()
15194 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); in i40e_setup_pf_switch()
15196 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15198 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); in i40e_setup_pf_switch()
15199 i40e_cloud_filter_exit(pf); in i40e_setup_pf_switch()
15200 i40e_fdir_teardown(pf); in i40e_setup_pf_switch()
15205 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
15207 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
15208 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
15209 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
15211 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15213 i40e_fdir_sb_setup(pf); in i40e_setup_pf_switch()
15215 /* Setup static PF queue filter control settings */ in i40e_setup_pf_switch()
15216 ret = i40e_setup_pf_filter_control(pf); in i40e_setup_pf_switch()
15218 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", in i40e_setup_pf_switch()
15226 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_setup_pf_switch()
15227 i40e_pf_config_rss(pf); in i40e_setup_pf_switch()
15230 i40e_link_event(pf); in i40e_setup_pf_switch()
15233 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & in i40e_setup_pf_switch()
15236 i40e_ptp_init(pf); in i40e_setup_pf_switch()
15242 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
15252 * @pf: board private structure
15254 static void i40e_determine_queue_usage(struct i40e_pf *pf) in i40e_determine_queue_usage() argument
15259 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
15265 queues_left = pf->hw.func_caps.num_tx_qp; in i40e_determine_queue_usage()
15268 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { in i40e_determine_queue_usage()
15269 /* one qp for PF, no queues for anything else */ in i40e_determine_queue_usage()
15271 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15274 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
15282 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
15283 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
15287 /* one qp for PF */ in i40e_determine_queue_usage()
15288 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15289 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15291 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
15297 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
15300 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && in i40e_determine_queue_usage()
15302 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_determine_queue_usage()
15304 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); in i40e_determine_queue_usage()
15308 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); in i40e_determine_queue_usage()
15309 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); in i40e_determine_queue_usage()
15310 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); in i40e_determine_queue_usage()
15311 pf->num_lan_qps = q_max; in i40e_determine_queue_usage()
15313 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15316 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_determine_queue_usage()
15320 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_determine_queue_usage()
15321 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
15322 …dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n… in i40e_determine_queue_usage()
15326 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_determine_queue_usage()
15327 pf->num_vf_qps && pf->num_req_vfs && queues_left) { in i40e_determine_queue_usage()
15328 pf->num_req_vfs = min_t(int, pf->num_req_vfs, in i40e_determine_queue_usage()
15329 (queues_left / pf->num_vf_qps)); in i40e_determine_queue_usage()
15330 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); in i40e_determine_queue_usage()
15333 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_determine_queue_usage()
15334 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { in i40e_determine_queue_usage()
15335 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, in i40e_determine_queue_usage()
15336 (queues_left / pf->num_vmdq_qps)); in i40e_determine_queue_usage()
15337 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); in i40e_determine_queue_usage()
15340 pf->queues_left = queues_left; in i40e_determine_queue_usage()
15341 dev_dbg(&pf->pdev->dev, in i40e_determine_queue_usage()
15343 pf->hw.func_caps.num_tx_qp, in i40e_determine_queue_usage()
15344 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), in i40e_determine_queue_usage()
15345 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, in i40e_determine_queue_usage()
15346 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, in i40e_determine_queue_usage()
15351 * i40e_setup_pf_filter_control - Setup PF static filter control
15352 * @pf: PF to be setup
15354 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15355 * settings. If PE/FCoE are enabled then it will also set the per PF
15357 * ethertype and macvlan type filter settings for the pf.
15361 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) in i40e_setup_pf_filter_control() argument
15363 struct i40e_filter_control_settings *settings = &pf->filter_settings; in i40e_setup_pf_filter_control()
15368 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) in i40e_setup_pf_filter_control()
15371 /* Ethtype and MACVLAN filters enabled for PF */ in i40e_setup_pf_filter_control()
15375 if (i40e_set_filter_control(&pf->hw, settings)) in i40e_setup_pf_filter_control()
15383 static void i40e_print_features(struct i40e_pf *pf) in i40e_print_features() argument
15385 struct i40e_hw *hw = &pf->hw; in i40e_print_features()
15393 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); in i40e_print_features()
15395 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); in i40e_print_features()
15398 pf->hw.func_caps.num_vsis, in i40e_print_features()
15399 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
15400 if (pf->flags & I40E_FLAG_RSS_ENABLED) in i40e_print_features()
15402 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) in i40e_print_features()
15404 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_print_features()
15408 if (pf->flags & I40E_FLAG_DCB_CAPABLE) in i40e_print_features()
15412 if (pf->flags & I40E_FLAG_PTP) in i40e_print_features()
15414 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_print_features()
15419 dev_info(&pf->pdev->dev, "%s\n", buf); in i40e_print_features()
15427 * @pf: board private structure
15434 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) in i40e_get_platform_mac_addr() argument
15436 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) in i40e_get_platform_mac_addr()
15437 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); in i40e_get_platform_mac_addr()
15465 * @pf: board private structure
15472 static bool i40e_check_recovery_mode(struct i40e_pf *pf) in i40e_check_recovery_mode() argument
15474 u32 val = rd32(&pf->hw, I40E_GL_FWSTS); in i40e_check_recovery_mode()
15477 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); in i40e_check_recovery_mode()
15478 …dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for detai… in i40e_check_recovery_mode()
15479 set_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_check_recovery_mode()
15483 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_check_recovery_mode()
15484 …dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full … in i40e_check_recovery_mode()
15491 * @pf: board private structure
15500 * state is to issue a series of pf-resets and check a return value.
15501 * If a PF reset returns success then the firmware could be in recovery
15510 static int i40e_pf_loop_reset(struct i40e_pf *pf) in i40e_pf_loop_reset() argument
15512 /* wait max 10 seconds for PF reset to succeed */ in i40e_pf_loop_reset()
15514 struct i40e_hw *hw = &pf->hw; in i40e_pf_loop_reset()
15524 pf->pfr_count++; in i40e_pf_loop_reset()
15526 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); in i40e_pf_loop_reset()
15533 * @pf: board private structure
15542 static bool i40e_check_fw_empr(struct i40e_pf *pf) in i40e_check_fw_empr() argument
15544 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & in i40e_check_fw_empr()
15551 * i40e_handle_resets - handle EMP resets and PF resets
15552 * @pf: board private structure
15554 * Handle both EMP resets and PF resets and conclude whether there are
15561 static int i40e_handle_resets(struct i40e_pf *pf) in i40e_handle_resets() argument
15563 const int pfr = i40e_pf_loop_reset(pf); in i40e_handle_resets()
15564 const bool is_empr = i40e_check_fw_empr(pf); in i40e_handle_resets()
15567 …dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several … in i40e_handle_resets()
15574 * @pf: board private structure
15582 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) in i40e_init_recovery_mode() argument
15588 pci_set_drvdata(pf->pdev, pf); in i40e_init_recovery_mode()
15589 pci_save_state(pf->pdev); in i40e_init_recovery_mode()
15592 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
15593 pf->service_timer_period = HZ; in i40e_init_recovery_mode()
15595 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_init_recovery_mode()
15596 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_init_recovery_mode()
15598 err = i40e_init_interrupt_scheme(pf); in i40e_init_recovery_mode()
15607 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_init_recovery_mode()
15608 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_init_recovery_mode()
15610 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_init_recovery_mode()
15612 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */ in i40e_init_recovery_mode()
15613 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15615 if (!pf->vsi) { in i40e_init_recovery_mode()
15623 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); in i40e_init_recovery_mode()
15628 pf->lan_vsi = v_idx; in i40e_init_recovery_mode()
15629 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15642 i40e_dbg_pf_init(pf); in i40e_init_recovery_mode()
15644 err = i40e_setup_misc_vector_for_recovery_mode(pf); in i40e_init_recovery_mode()
15649 i40e_send_version(pf); in i40e_init_recovery_mode()
15652 mod_timer(&pf->service_timer, in i40e_init_recovery_mode()
15653 round_jiffies(jiffies + pf->service_timer_period)); in i40e_init_recovery_mode()
15658 i40e_reset_interrupt_capability(pf); in i40e_init_recovery_mode()
15659 timer_shutdown_sync(&pf->service_timer); in i40e_init_recovery_mode()
15662 pci_release_mem_regions(pf->pdev); in i40e_init_recovery_mode()
15663 pci_disable_device(pf->pdev); in i40e_init_recovery_mode()
15664 kfree(pf); in i40e_init_recovery_mode()
15678 struct i40e_pf *pf = i40e_hw_to_pf(hw); in i40e_set_subsystem_device_id() local
15680 hw->subsystem_device_id = pf->pdev->subsystem_device ? in i40e_set_subsystem_device_id()
15681 pf->pdev->subsystem_device : in i40e_set_subsystem_device_id()
15690 * i40e_probe initializes a PF identified by a pci_dev structure.
15691 * The OS initialization, configuring of the PF private structure,
15702 struct i40e_pf *pf; in i40e_probe() local
15741 pf = kzalloc(sizeof(*pf), GFP_KERNEL); in i40e_probe()
15742 if (!pf) { in i40e_probe()
15746 pf->next_vsi = 0; in i40e_probe()
15747 pf->pdev = pdev; in i40e_probe()
15748 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15750 hw = &pf->hw; in i40e_probe()
15752 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
15759 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { in i40e_probe()
15761 pf->ioremap_len); in i40e_probe()
15765 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
15770 pf->ioremap_len, err); in i40e_probe()
15781 pf->instance = pfs_found; in i40e_probe()
15790 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_probe()
15791 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_probe()
15792 INIT_LIST_HEAD(&pf->ddp_old_prof); in i40e_probe()
15800 pf->msg_enable = netif_msg_init(debug, in i40e_probe()
15805 pf->hw.debug_mask = debug; in i40e_probe()
15813 pf->corer_count++; in i40e_probe()
15818 /* Reset here to make sure all is clean and to define PF 'n' */ in i40e_probe()
15828 err = i40e_handle_resets(pf); in i40e_probe()
15832 i40e_check_recovery_mode(pf); in i40e_probe()
15843 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; in i40e_probe()
15845 snprintf(pf->int_name, sizeof(pf->int_name) - 1, in i40e_probe()
15847 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); in i40e_probe()
15857 pf->hw.fc.requested_mode = I40E_FC_NONE; in i40e_probe()
15899 i40e_verify_eeprom(pf); in i40e_probe()
15907 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); in i40e_probe()
15911 err = i40e_sw_init(pf); in i40e_probe()
15917 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_probe()
15918 return i40e_init_recovery_mode(pf, hw); in i40e_probe()
15938 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { in i40e_probe()
15944 i40e_get_platform_mac_addr(pdev, pf); in i40e_probe()
15955 pf->hw_features |= I40E_HW_PORT_ID_VALID; in i40e_probe()
15957 i40e_ptp_alloc_pins(pf); in i40e_probe()
15958 pci_set_drvdata(pdev, pf); in i40e_probe()
15962 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); in i40e_probe()
15965 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : in i40e_probe()
15966 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); in i40e_probe()
15968 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? in i40e_probe()
15975 err = i40e_init_pf_dcb(pf); in i40e_probe()
15978 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); in i40e_probe()
15984 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
15985 pf->service_timer_period = HZ; in i40e_probe()
15987 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_probe()
15988 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_probe()
15993 pf->wol_en = false; in i40e_probe()
15995 pf->wol_en = true; in i40e_probe()
15996 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_probe()
15999 i40e_determine_queue_usage(pf); in i40e_probe()
16000 err = i40e_init_interrupt_scheme(pf); in i40e_probe()
16006 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus in i40e_probe()
16007 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. in i40e_probe()
16010 pf->num_lan_msix = 1; in i40e_probe()
16012 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; in i40e_probe()
16013 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; in i40e_probe()
16014 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in i40e_probe()
16015 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; in i40e_probe()
16016 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
16017 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
16025 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_probe()
16026 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_probe()
16028 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_probe()
16029 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in i40e_probe()
16030 dev_warn(&pf->pdev->dev, in i40e_probe()
16032 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in i40e_probe()
16033 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in i40e_probe()
16036 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ in i40e_probe()
16037 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
16039 if (!pf->vsi) { in i40e_probe()
16046 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
16047 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
16048 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
16050 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_probe()
16053 err = i40e_setup_pf_switch(pf, false, false); in i40e_probe()
16058 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
16061 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
16062 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
16063 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
16071 err = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_probe()
16076 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", in i40e_probe()
16078 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16091 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_probe()
16093 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_probe()
16095 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", in i40e_probe()
16097 i40e_aq_str(&pf->hw, in i40e_probe()
16098 pf->hw.aq.asq_last_status)); in i40e_probe()
16104 clear_bit(__I40E_DOWN, pf->state); in i40e_probe()
16111 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_probe()
16112 err = i40e_setup_misc_vector(pf); in i40e_probe()
16116 i40e_cloud_filter_exit(pf); in i40e_probe()
16117 i40e_fdir_teardown(pf); in i40e_probe()
16124 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
16125 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
16126 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
16136 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); in i40e_probe()
16145 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
16146 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_probe()
16147 pf->num_iwarp_msix, in i40e_probe()
16149 if (pf->iwarp_base_vector < 0) { in i40e_probe()
16152 pf->num_iwarp_msix, pf->iwarp_base_vector); in i40e_probe()
16153 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_probe()
16157 i40e_dbg_pf_init(pf); in i40e_probe()
16160 i40e_send_version(pf); in i40e_probe()
16163 mod_timer(&pf->service_timer, in i40e_probe()
16164 round_jiffies(jiffies + pf->service_timer_period)); in i40e_probe()
16166 /* add this PF to client device list and launch a client service task */ in i40e_probe()
16167 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
16168 err = i40e_lan_add_device(pf); in i40e_probe()
16170 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", in i40e_probe()
16180 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { in i40e_probe()
16187 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, in i40e_probe()
16228 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n", in i40e_probe()
16230 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16231 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; in i40e_probe()
16234 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); in i40e_probe()
16239 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n", in i40e_probe()
16241 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16245 val = (rd32(&pf->hw, I40E_PRTGL_SAH) & in i40e_probe()
16249 pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT); in i40e_probe()
16254 * PF/VF VSIs. in i40e_probe()
16257 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_probe()
16258 pf->main_vsi_seid); in i40e_probe()
16260 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || in i40e_probe()
16261 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) in i40e_probe()
16262 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; in i40e_probe()
16263 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) in i40e_probe()
16264 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; in i40e_probe()
16266 i40e_print_features(pf); in i40e_probe()
16272 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
16273 i40e_clear_interrupt_scheme(pf); in i40e_probe()
16274 kfree(pf->vsi); in i40e_probe()
16276 i40e_reset_interrupt_capability(pf); in i40e_probe()
16277 timer_shutdown_sync(&pf->service_timer); in i40e_probe()
16282 kfree(pf->qp_pile); in i40e_probe()
16288 kfree(pf); in i40e_probe()
16308 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_remove() local
16309 struct i40e_hw *hw = &pf->hw; in i40e_remove()
16313 i40e_dbg_pf_exit(pf); in i40e_remove()
16315 i40e_ptp_stop(pf); in i40e_remove()
16325 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_remove()
16327 set_bit(__I40E_IN_REMOVE, pf->state); in i40e_remove()
16329 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { in i40e_remove()
16330 set_bit(__I40E_VF_RESETS_DISABLED, pf->state); in i40e_remove()
16331 i40e_free_vfs(pf); in i40e_remove()
16332 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; in i40e_remove()
16335 set_bit(__I40E_SUSPENDED, pf->state); in i40e_remove()
16336 set_bit(__I40E_DOWN, pf->state); in i40e_remove()
16337 if (pf->service_timer.function) in i40e_remove()
16338 timer_shutdown_sync(&pf->service_timer); in i40e_remove()
16339 if (pf->service_task.func) in i40e_remove()
16340 cancel_work_sync(&pf->service_task); in i40e_remove()
16342 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_remove()
16343 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
16345 /* We know that we have allocated only one vsi for this PF, in i40e_remove()
16358 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
16360 i40e_fdir_teardown(pf); in i40e_remove()
16363 * This will leave only the PF's VSI remaining. in i40e_remove()
16366 if (!pf->veb[i]) in i40e_remove()
16369 if (pf->veb[i]->uplink_seid == pf->mac_seid || in i40e_remove()
16370 pf->veb[i]->uplink_seid == 0) in i40e_remove()
16371 i40e_switch_branch_release(pf->veb[i]); in i40e_remove()
16374 /* Now we can shutdown the PF's VSIs, just before we kill in i40e_remove()
16377 for (i = pf->num_alloc_vsi; i--;) in i40e_remove()
16378 if (pf->vsi[i]) { in i40e_remove()
16379 i40e_vsi_close(pf->vsi[i]); in i40e_remove()
16380 i40e_vsi_release(pf->vsi[i]); in i40e_remove()
16381 pf->vsi[i] = NULL; in i40e_remove()
16384 i40e_cloud_filter_exit(pf); in i40e_remove()
16387 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_remove()
16388 ret_code = i40e_lan_del_device(pf); in i40e_remove()
16405 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_remove()
16406 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_remove()
16407 free_irq(pf->pdev->irq, pf); in i40e_remove()
16418 i40e_clear_interrupt_scheme(pf); in i40e_remove()
16419 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
16420 if (pf->vsi[i]) { in i40e_remove()
16421 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_remove()
16422 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
16423 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
16424 pf->vsi[i] = NULL; in i40e_remove()
16430 kfree(pf->veb[i]); in i40e_remove()
16431 pf->veb[i] = NULL; in i40e_remove()
16434 kfree(pf->qp_pile); in i40e_remove()
16435 kfree(pf->vsi); in i40e_remove()
16438 kfree(pf); in i40e_remove()
16456 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_detected() local
16460 if (!pf) { in i40e_pci_error_detected()
16467 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_detected()
16468 i40e_prep_for_reset(pf); in i40e_pci_error_detected()
16485 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_slot_reset() local
16500 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_pci_error_slot_reset()
16516 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_reset_prepare() local
16518 i40e_prep_for_reset(pf); in i40e_pci_error_reset_prepare()
16527 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_reset_done() local
16529 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_pci_error_reset_done()
16532 i40e_reset_and_rebuild(pf, false, false); in i40e_pci_error_reset_done()
16547 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_resume() local
16550 if (test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_resume()
16553 i40e_handle_reset_warning(pf, false); in i40e_pci_error_resume()
16559 * @pf: pointer to i40e_pf struct
16561 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) in i40e_enable_mc_magic_wake() argument
16563 struct i40e_hw *hw = &pf->hw; in i40e_enable_mc_magic_wake()
16569 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
16571 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
16573 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16589 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16599 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16609 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_shutdown() local
16610 struct i40e_hw *hw = &pf->hw; in i40e_shutdown()
16612 set_bit(__I40E_SUSPENDED, pf->state); in i40e_shutdown()
16613 set_bit(__I40E_DOWN, pf->state); in i40e_shutdown()
16615 del_timer_sync(&pf->service_timer); in i40e_shutdown()
16616 cancel_work_sync(&pf->service_task); in i40e_shutdown()
16617 i40e_cloud_filter_exit(pf); in i40e_shutdown()
16618 i40e_fdir_teardown(pf); in i40e_shutdown()
16623 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
16625 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_shutdown()
16626 i40e_enable_mc_magic_wake(pf); in i40e_shutdown()
16628 i40e_prep_for_reset(pf); in i40e_shutdown()
16631 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
16633 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
16636 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_shutdown()
16637 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_shutdown()
16638 free_irq(pf->pdev->irq, pf); in i40e_shutdown()
16645 i40e_clear_interrupt_scheme(pf); in i40e_shutdown()
16649 pci_wake_from_d3(pdev, pf->wol_en); in i40e_shutdown()
16660 struct i40e_pf *pf = dev_get_drvdata(dev); in i40e_suspend() local
16661 struct i40e_hw *hw = &pf->hw; in i40e_suspend()
16664 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) in i40e_suspend()
16667 set_bit(__I40E_DOWN, pf->state); in i40e_suspend()
16670 del_timer_sync(&pf->service_timer); in i40e_suspend()
16671 cancel_work_sync(&pf->service_task); in i40e_suspend()
16676 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()
16678 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_suspend()
16679 i40e_enable_mc_magic_wake(pf); in i40e_suspend()
16687 i40e_prep_for_reset(pf); in i40e_suspend()
16689 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
16690 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
16697 i40e_clear_interrupt_scheme(pf); in i40e_suspend()
16710 struct i40e_pf *pf = dev_get_drvdata(dev); in i40e_resume() local
16714 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_resume()
16725 err = i40e_restore_interrupt_scheme(pf); in i40e_resume()
16731 clear_bit(__I40E_DOWN, pf->state); in i40e_resume()
16732 i40e_reset_and_rebuild(pf, false, true); in i40e_resume()
16737 clear_bit(__I40E_SUSPENDED, pf->state); in i40e_resume()
16740 mod_timer(&pf->service_timer, in i40e_resume()
16741 round_jiffies(jiffies + pf->service_timer_period)); in i40e_resume()