Lines Matching +full:num +full:- +full:rings

1 // SPDX-License-Identifier: GPL-2.0
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
40 * @ena: start or stop the Rx rings
42 * First enable/disable all of the Rx rings, flush any remaining writes, and
44 * let all of the register writes complete when enabling/disabling the Rx rings
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
79 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_arrays()
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
86 return -ENOMEM; in ice_vsi_alloc_arrays()
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
89 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
93 /* txq_map needs to have enough space to track both Tx (stack) rings in ice_vsi_alloc_arrays()
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, in ice_vsi_alloc_arrays()
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), in ice_vsi_alloc_arrays()
100 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
102 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
106 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
107 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
111 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
116 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
117 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
123 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
125 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
127 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
129 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
130 return -ENOMEM; in ice_vsi_alloc_arrays()
134 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
139 switch (vsi->type) { in ice_vsi_set_num_desc()
145 * ethtool -G so we should keep those values instead of in ice_vsi_set_num_desc()
148 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
150 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
155 vsi->type); in ice_vsi_set_num_desc()
161 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
168 enum ice_vsi_type vsi_type = vsi->type; in ice_vsi_set_num_qs()
169 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
170 struct ice_vf *vf = vsi->vf; in ice_vsi_set_num_qs()
177 if (vsi->req_txq) { in ice_vsi_set_num_qs()
178 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
179 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
181 vsi->alloc_txq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
186 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
189 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_num_qs()
190 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
192 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
193 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
194 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
196 vsi->alloc_rxq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
202 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
204 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, in ice_vsi_set_num_qs()
205 max_t(int, vsi->alloc_rxq, in ice_vsi_set_num_qs()
206 vsi->alloc_txq)); in ice_vsi_set_num_qs()
212 vsi->alloc_txq = ice_get_num_vfs(pf); in ice_vsi_set_num_qs()
213 vsi->alloc_rxq = vsi->alloc_txq; in ice_vsi_set_num_qs()
214 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
217 if (vf->num_req_qs) in ice_vsi_set_num_qs()
218 vf->num_vf_qs = vf->num_req_qs; in ice_vsi_set_num_qs()
219 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
220 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
221 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + in ice_vsi_set_num_qs()
222 * data queue interrupts). Since vsi->num_q_vectors is number in ice_vsi_set_num_qs()
226 vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
229 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
230 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
231 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
234 vsi->alloc_txq = 0; in ice_vsi_set_num_qs()
235 vsi->alloc_rxq = 0; in ice_vsi_set_num_qs()
238 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
239 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
250 * ice_get_free_slot - get the next non-NULL location index in array
263 if (curr < (size - 1) && !tmp_array[curr + 1]) { in ice_get_free_slot()
279 * ice_vsi_delete_from_hw - delete a VSI from the switch
284 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
293 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete_from_hw()
294 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw()
295 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw()
297 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw()
299 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw()
301 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", in ice_vsi_delete_from_hw()
302 vsi->vsi_num, status); in ice_vsi_delete_from_hw()
308 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
313 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
319 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
320 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
321 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
322 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
323 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
324 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
325 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
326 vsi->txq_map = NULL; in ice_vsi_free_arrays()
327 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
328 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
332 * ice_vsi_free_stats - Free the ring statistics structures
338 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
341 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_free_stats()
343 if (!pf->vsi_stats) in ice_vsi_free_stats()
346 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_free_stats()
351 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_free_stats()
352 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_free_stats()
353 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_free_stats()
358 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_free_stats()
359 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_free_stats()
360 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_free_stats()
364 kfree(vsi_stat->tx_ring_stats); in ice_vsi_free_stats()
365 kfree(vsi_stat->rx_ring_stats); in ice_vsi_free_stats()
367 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_free_stats()
371 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
379 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
382 vsi_stats = pf->vsi_stats[vsi->idx]; in ice_vsi_alloc_ring_stats()
383 tx_ring_stats = vsi_stats->tx_ring_stats; in ice_vsi_alloc_ring_stats()
384 rx_ring_stats = vsi_stats->rx_ring_stats; in ice_vsi_alloc_ring_stats()
391 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats()
402 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
410 ring = vsi->rx_rings[i]; in ice_vsi_alloc_ring_stats()
421 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
428 return -ENOMEM; in ice_vsi_alloc_ring_stats()
432 * ice_vsi_free - clean up and deallocate the provided VSI
443 if (!vsi || !vsi->back) in ice_vsi_free()
446 pf = vsi->back; in ice_vsi_free()
449 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_free()
450 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_free()
454 mutex_lock(&pf->sw_mutex); in ice_vsi_free()
457 pf->vsi[vsi->idx] = NULL; in ice_vsi_free()
458 pf->next_vsi = vsi->idx; in ice_vsi_free()
462 mutex_destroy(&vsi->xdp_state_lock); in ice_vsi_free()
463 mutex_unlock(&pf->sw_mutex); in ice_vsi_free()
474 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
482 if (!q_vector->tx.tx_ring) in ice_msix_clean_ctrl_vsi()
486 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi()
487 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); in ice_msix_clean_ctrl_vsi()
493 * ice_msix_clean_rings - MSIX mode Interrupt Handler
501 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_msix_clean_rings()
504 q_vector->total_events++; in ice_msix_clean_rings()
506 napi_schedule(&q_vector->napi); in ice_msix_clean_rings()
514 struct ice_pf *pf = q_vector->vsi->back; in ice_eswitch_msix_clean_rings()
518 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_eswitch_msix_clean_rings()
523 napi_schedule(&vf->repr->q_vector->napi); in ice_eswitch_msix_clean_rings()
530 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
536 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
538 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_stat_arrays()
540 if (!pf->vsi_stats) in ice_vsi_alloc_stat_arrays()
541 return -ENOENT; in ice_vsi_alloc_stat_arrays()
543 if (pf->vsi_stats[vsi->idx]) in ice_vsi_alloc_stat_arrays()
549 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
551 vsi_stat->tx_ring_stats = in ice_vsi_alloc_stat_arrays()
552 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_alloc_stat_arrays()
554 if (!vsi_stat->tx_ring_stats) in ice_vsi_alloc_stat_arrays()
557 vsi_stat->rx_ring_stats = in ice_vsi_alloc_stat_arrays()
558 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_alloc_stat_arrays()
560 if (!vsi_stat->rx_ring_stats) in ice_vsi_alloc_stat_arrays()
563 pf->vsi_stats[vsi->idx] = vsi_stat; in ice_vsi_alloc_stat_arrays()
568 kfree(vsi_stat->rx_ring_stats); in ice_vsi_alloc_stat_arrays()
570 kfree(vsi_stat->tx_ring_stats); in ice_vsi_alloc_stat_arrays()
572 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_alloc_stat_arrays()
573 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
577 * ice_vsi_alloc_def - set default values for already allocated VSI
584 if (vsi->type != ICE_VSI_CHNL) { in ice_vsi_alloc_def()
587 return -ENOMEM; in ice_vsi_alloc_def()
590 switch (vsi->type) { in ice_vsi_alloc_def()
593 vsi->irq_handler = ice_eswitch_msix_clean_rings; in ice_vsi_alloc_def()
597 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc_def()
601 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc_def()
605 return -EINVAL; in ice_vsi_alloc_def()
607 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def()
608 vsi->num_txq = ch->num_txq; in ice_vsi_alloc_def()
609 vsi->next_base_q = ch->base_q; in ice_vsi_alloc_def()
616 return -EINVAL; in ice_vsi_alloc_def()
623 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
638 mutex_lock(&pf->sw_mutex); in ice_vsi_alloc()
641 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index in ice_vsi_alloc()
644 if (pf->next_vsi == ICE_NO_VSI) { in ice_vsi_alloc()
653 vsi->back = pf; in ice_vsi_alloc()
654 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
657 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
658 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
660 /* prepare pf->next_vsi for next use */ in ice_vsi_alloc()
661 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
662 pf->next_vsi); in ice_vsi_alloc()
664 mutex_init(&vsi->xdp_state_lock); in ice_vsi_alloc()
667 mutex_unlock(&pf->sw_mutex); in ice_vsi_alloc()
672 * ice_alloc_fd_res - Allocate FD resource for a VSI
677 * Returns 0 on success, -EPERM on no-op or -EIO on failure
681 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
688 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_alloc_fd_res()
689 return -EPERM; in ice_alloc_fd_res()
691 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || in ice_alloc_fd_res()
692 vsi->type == ICE_VSI_CHNL)) in ice_alloc_fd_res()
693 return -EPERM; in ice_alloc_fd_res()
696 g_val = pf->hw.func_caps.fd_fltr_guar; in ice_alloc_fd_res()
698 return -EPERM; in ice_alloc_fd_res()
701 b_val = pf->hw.func_caps.fd_fltr_best_effort; in ice_alloc_fd_res()
703 return -EPERM; in ice_alloc_fd_res()
713 if (vsi->type == ICE_VSI_PF) { in ice_alloc_fd_res()
714 vsi->num_gfltr = g_val; in ice_alloc_fd_res()
718 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_alloc_fd_res()
720 return -EPERM; in ice_alloc_fd_res()
722 vsi->num_gfltr = ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
726 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
727 } else if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
728 vsi->num_gfltr = 0; in ice_alloc_fd_res()
731 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
738 return -EPERM; in ice_alloc_fd_res()
740 if (!main_vsi->all_numtc) in ice_alloc_fd_res()
741 return -EINVAL; in ice_alloc_fd_res()
744 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; in ice_alloc_fd_res()
750 return -EPERM; in ice_alloc_fd_res()
752 g_val -= ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
754 vsi->num_gfltr = g_val / numtc; in ice_alloc_fd_res()
757 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
764 * ice_vsi_get_qs - Assign queues from PF to VSI
771 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
773 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
774 .pf_map = pf->avail_txqs, in ice_vsi_get_qs()
775 .pf_map_size = pf->max_pf_txqs, in ice_vsi_get_qs()
776 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
778 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
783 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
784 .pf_map = pf->avail_rxqs, in ice_vsi_get_qs()
785 .pf_map_size = pf->max_pf_rxqs, in ice_vsi_get_qs()
786 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
788 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
794 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_get_qs()
800 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
805 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
811 * ice_vsi_put_qs - Release queues from VSI to PF
816 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
819 mutex_lock(&pf->avail_q_mutex); in ice_vsi_put_qs()
822 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
823 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
827 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
828 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
831 mutex_unlock(&pf->avail_q_mutex); in ice_vsi_put_qs()
842 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_is_safe_mode()
853 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_is_rdma_ena()
857 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
865 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
871 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
874 vsi->vsi_num, status); in ice_vsi_clean_rss_flow_fld()
878 * ice_rss_clean - Delete RSS related VSI structures and configuration
883 struct ice_pf *pf = vsi->back; in ice_rss_clean()
888 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
889 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
894 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
898 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
904 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
907 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_rss_params()
908 vsi->rss_size = 1; in ice_vsi_set_rss_params()
912 cap = &pf->hw.func_caps.common_cap; in ice_vsi_set_rss_params()
913 max_rss_size = BIT(cap->rss_table_entry_width); in ice_vsi_set_rss_params()
914 switch (vsi->type) { in ice_vsi_set_rss_params()
918 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
919 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_set_rss_params()
920 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params()
922 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
924 vsi->rss_lut_type = ICE_LUT_PF; in ice_vsi_set_rss_params()
927 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
928 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); in ice_vsi_set_rss_params()
929 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
935 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
936 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
937 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
943 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
949 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
959 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx()
961 ctxt->alloc_from_pool = true; in ice_set_dflt_vsi_ctx()
963 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; in ice_set_dflt_vsi_ctx()
965 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; in ice_set_dflt_vsi_ctx()
967 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & in ice_set_dflt_vsi_ctx()
970 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which in ice_set_dflt_vsi_ctx()
973 * DVM - leave inner VLAN in packet by default in ice_set_dflt_vsi_ctx()
976 ctxt->info.inner_vlan_flags |= in ice_set_dflt_vsi_ctx()
979 ctxt->info.outer_vlan_flags = in ice_set_dflt_vsi_ctx()
983 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
987 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
1000 ctxt->info.ingress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1001 ctxt->info.egress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1003 ctxt->info.outer_up_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1008 * ice_vsi_setup_q_map - Setup a VSI queue map
1016 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
1017 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
1021 if (!vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
1023 vsi->tc_cfg.numtc = 1; in ice_vsi_setup_q_map()
1024 vsi->tc_cfg.ena_tc = 1; in ice_vsi_setup_q_map()
1027 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
1030 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
1034 /* find the (rounded up) power-of-2 of qcount */ in ice_vsi_setup_q_map()
1040 * queues allocated to TC0. No:of queues is a power-of-2. in ice_vsi_setup_q_map()
1049 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
1051 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
1052 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
1053 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
1054 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
1055 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map()
1060 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
1061 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
1062 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
1063 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
1071 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map()
1074 /* if offset is non-zero, means it is calculated correctly based on in ice_vsi_setup_q_map()
1076 * be correct and non-zero because it is based off - VSI's in ice_vsi_setup_q_map()
1085 if (rx_count > vsi->alloc_rxq) { in ice_vsi_setup_q_map()
1086 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1087 rx_count, vsi->alloc_rxq); in ice_vsi_setup_q_map()
1088 return -EINVAL; in ice_vsi_setup_q_map()
1091 if (tx_count > vsi->alloc_txq) { in ice_vsi_setup_q_map()
1092 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1093 tx_count, vsi->alloc_txq); in ice_vsi_setup_q_map()
1094 return -EINVAL; in ice_vsi_setup_q_map()
1097 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
1098 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map()
1100 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
1101 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1105 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
1109 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_vsi_setup_q_map()
1114 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
1115 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
1121 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1130 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
1131 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) in ice_set_fd_vsi_ctx()
1135 ctxt->info.valid_sections |= cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1143 ctxt->info.fd_options = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1145 ctxt->info.max_fd_fltr_dedicated = in ice_set_fd_vsi_ctx()
1146 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
1148 ctxt->info.max_fd_fltr_shared = in ice_set_fd_vsi_ctx()
1149 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
1156 ctxt->info.fd_def_q = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1163 ctxt->info.fd_report_opt = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1167 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1177 pf = vsi->back; in ice_set_rss_vsi_ctx()
1180 switch (vsi->type) { in ice_set_rss_vsi_ctx()
1194 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
1198 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & in ice_set_rss_vsi_ctx()
1206 struct ice_pf *pf = vsi->back; in ice_chnl_vsi_setup_q_map()
1211 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); in ice_chnl_vsi_setup_q_map()
1219 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_chnl_vsi_setup_q_map()
1220 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_chnl_vsi_setup_q_map()
1221 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); in ice_chnl_vsi_setup_q_map()
1222 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); in ice_chnl_vsi_setup_q_map()
1226 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1233 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_vsi_is_vlan_pruning_ena()
1237 * ice_vsi_init - Create and initialize a VSI
1249 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1250 struct ice_hw *hw = &pf->hw; in ice_vsi_init()
1258 return -ENOMEM; in ice_vsi_init()
1260 switch (vsi->type) { in ice_vsi_init()
1264 ctxt->flags = ICE_AQ_VSI_TYPE_PF; in ice_vsi_init()
1268 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; in ice_vsi_init()
1271 ctxt->flags = ICE_AQ_VSI_TYPE_VF; in ice_vsi_init()
1272 /* VF number here is the absolute VF number (0-255) */ in ice_vsi_init()
1273 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
1276 ret = -ENODEV; in ice_vsi_init()
1283 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1288 ctxt->info.sw_flags2 |= in ice_vsi_init()
1291 ctxt->info.sw_flags2 &= in ice_vsi_init()
1296 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_vsi_init()
1299 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
1300 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_init()
1303 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && in ice_vsi_init()
1304 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1310 ctxt->info.valid_sections |= in ice_vsi_init()
1314 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1315 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1327 ctxt->info.valid_sections |= in ice_vsi_init()
1332 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1333 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_init()
1334 ctxt->info.valid_sections |= in ice_vsi_init()
1339 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1342 ret = -EIO; in ice_vsi_init()
1346 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1349 ret = -EIO; in ice_vsi_init()
1355 vsi->info = ctxt->info; in ice_vsi_init()
1358 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1366 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1367 * @vsi: the VSI having rings deallocated
1374 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1376 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1379 q_vector->tx.tx_ring = NULL; in ice_vsi_clear_rings()
1380 q_vector->rx.rx_ring = NULL; in ice_vsi_clear_rings()
1385 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1387 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1388 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1389 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1393 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1395 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1396 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1397 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1404 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1405 * @vsi: VSI which is having rings allocated
1409 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1410 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1415 /* Allocate Tx rings */ in ice_vsi_alloc_rings()
1425 ring->q_index = i; in ice_vsi_alloc_rings()
1426 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1427 ring->vsi = vsi; in ice_vsi_alloc_rings()
1428 ring->tx_tstamps = &pf->ptp.port.tx; in ice_vsi_alloc_rings()
1429 ring->dev = dev; in ice_vsi_alloc_rings()
1430 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1431 ring->txq_teid = ICE_INVAL_TEID; in ice_vsi_alloc_rings()
1433 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; in ice_vsi_alloc_rings()
1435 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; in ice_vsi_alloc_rings()
1436 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1439 /* Allocate Rx rings */ in ice_vsi_alloc_rings()
1448 ring->q_index = i; in ice_vsi_alloc_rings()
1449 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1450 ring->vsi = vsi; in ice_vsi_alloc_rings()
1451 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1452 ring->dev = dev; in ice_vsi_alloc_rings()
1453 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1454 ring->cached_phctime = pf->ptp.cached_phc_time; in ice_vsi_alloc_rings()
1455 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1462 return -ENOMEM; in ice_vsi_alloc_rings()
1466 * ice_vsi_manage_rss_lut - disable/enable RSS
1478 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1483 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1484 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1486 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1487 vsi->rss_size); in ice_vsi_manage_rss_lut()
1490 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1495 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1505 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1507 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1511 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1516 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1522 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && in ice_vsi_cfg_rss_lut_key()
1523 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { in ice_vsi_cfg_rss_lut_key()
1524 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); in ice_vsi_cfg_rss_lut_key()
1526 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1530 * orig_rss_size so that when tc-qdisc is deleted, main VSI in ice_vsi_cfg_rss_lut_key()
1532 * to begin with (prior to setup-tc for ADQ config) in ice_vsi_cfg_rss_lut_key()
1534 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && in ice_vsi_cfg_rss_lut_key()
1535 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key()
1536 vsi->rss_size = vsi->orig_rss_size; in ice_vsi_cfg_rss_lut_key()
1538 vsi->orig_rss_size = 0; in ice_vsi_cfg_rss_lut_key()
1542 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1544 return -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1546 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1547 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1549 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1551 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1559 err = -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1563 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1564 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1579 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1588 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1594 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", in ice_vsi_set_vf_rss_flow_fld()
1595 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1599 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1602 vsi->vsi_num, status); in ice_vsi_set_vf_rss_flow_fld()
1606 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1618 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1619 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1620 struct ice_hw *hw = &pf->hw; in ice_vsi_set_rss_flow_fld()
1626 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", in ice_vsi_set_rss_flow_fld()
1694 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1699 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { in ice_vsi_cfg_frame_size()
1700 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; in ice_vsi_cfg_frame_size()
1701 vsi->rx_buf_len = ICE_RXBUF_1664; in ice_vsi_cfg_frame_size()
1704 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in ice_vsi_cfg_frame_size()
1705 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1706 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1709 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; in ice_vsi_cfg_frame_size()
1710 vsi->rx_buf_len = ICE_RXBUF_3072; in ice_vsi_cfg_frame_size()
1715 * ice_pf_state_is_nominal - checks the PF for nominal state
1732 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) in ice_pf_state_is_nominal()
1739 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1745 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1746 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1747 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1749 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1750 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1752 if (ice_is_reset_in_progress(pf->state)) in ice_update_eth_stats()
1753 vsi->stat_offsets_loaded = false; in ice_update_eth_stats()
1755 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1756 &prev_es->rx_bytes, &cur_es->rx_bytes); in ice_update_eth_stats()
1758 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1759 &prev_es->rx_unicast, &cur_es->rx_unicast); in ice_update_eth_stats()
1761 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1762 &prev_es->rx_multicast, &cur_es->rx_multicast); in ice_update_eth_stats()
1764 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1765 &prev_es->rx_broadcast, &cur_es->rx_broadcast); in ice_update_eth_stats()
1767 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1768 &prev_es->rx_discards, &cur_es->rx_discards); in ice_update_eth_stats()
1770 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1771 &prev_es->tx_bytes, &cur_es->tx_bytes); in ice_update_eth_stats()
1773 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1774 &prev_es->tx_unicast, &cur_es->tx_unicast); in ice_update_eth_stats()
1776 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1777 &prev_es->tx_multicast, &cur_es->tx_multicast); in ice_update_eth_stats()
1779 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1780 &prev_es->tx_broadcast, &cur_es->tx_broadcast); in ice_update_eth_stats()
1782 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1783 &prev_es->tx_errors, &cur_es->tx_errors); in ice_update_eth_stats()
1785 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1789 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1822 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
1823 return -EINVAL; in ice_vsi_cfg_single_rxq()
1825 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1833 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1834 return -EINVAL; in ice_vsi_cfg_single_txq()
1838 return -ENOMEM; in ice_vsi_cfg_single_txq()
1840 qg_buf->num_txqs = 1; in ice_vsi_cfg_single_txq()
1848 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1858 if (vsi->type == ICE_VSI_VF) in ice_vsi_cfg_rxqs()
1863 /* set up individual rings */ in ice_vsi_cfg_rxqs()
1865 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); in ice_vsi_cfg_rxqs()
1875 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1877 * @rings: Tx ring array to be configured
1884 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) in ice_vsi_cfg_txqs() argument
1892 return -ENOMEM; in ice_vsi_cfg_txqs()
1894 qg_buf->num_txqs = 1; in ice_vsi_cfg_txqs()
1897 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs()
1908 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1916 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); in ice_vsi_cfg_lan_txqs()
1920 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1931 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
1942 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1959 * ice_write_intrl - write throttle rate limit to interrupt specific register
1965 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1967 wr32(hw, GLINT_RATE(q_vector->reg_idx), in ice_write_intrl()
1973 switch (rc->type) { in ice_pull_qvec_from_rc()
1975 if (rc->rx_ring) in ice_pull_qvec_from_rc()
1976 return rc->rx_ring->q_vector; in ice_pull_qvec_from_rc()
1979 if (rc->tx_ring) in ice_pull_qvec_from_rc()
1980 return rc->tx_ring->q_vector; in ice_pull_qvec_from_rc()
1990 * __ice_write_itr - write throttle rate to register
1998 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
2000 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), in __ice_write_itr()
2005 * ice_write_itr - write throttle rate to queue specific register
2021 * ice_set_q_vector_intrl - set up interrupt rate limiting
2024 * Interrupt rate limiting is local to the vector, not per-queue so we must
2032 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { in ice_set_q_vector_intrl()
2041 ice_write_intrl(q_vector, q_vector->intrl); in ice_set_q_vector_intrl()
2046 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
2054 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
2055 struct ice_hw *hw = &pf->hw; in ice_vsi_cfg_msix()
2060 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
2061 u16 reg_idx = q_vector->reg_idx; in ice_vsi_cfg_msix()
2070 * For SR-IOV VF VSIs queue vector index always starts in ice_vsi_cfg_msix()
2076 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_cfg_msix()
2078 q_vector->tx.itr_idx); in ice_vsi_cfg_msix()
2082 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_cfg_msix()
2084 q_vector->rx.itr_idx); in ice_vsi_cfg_msix()
2091 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
2092 * @vsi: the VSI whose rings are to be enabled
2102 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
2103 * @vsi: the VSI whose rings are to be disabled
2113 * ice_vsi_stop_tx_rings - Disable Tx rings
2117 * @rings: Tx ring array to be stopped
2122 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) in ice_vsi_stop_tx_rings() argument
2126 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
2127 return -EINVAL; in ice_vsi_stop_tx_rings()
2133 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings()
2134 return -EINVAL; in ice_vsi_stop_tx_rings()
2136 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
2138 rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
2148 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2157 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2161 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2166 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2177 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2178 struct ice_hw *hw = &pf->hw; in ice_vsi_is_rx_queue_active()
2185 pf_q = vsi->rxq_map[i]; in ice_vsi_is_rx_queue_active()
2196 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2197 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; in ice_vsi_set_tc_cfg()
2198 vsi->tc_cfg.numtc = 1; in ice_vsi_set_tc_cfg()
2207 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2216 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2227 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { in ice_cfg_sw_lldp()
2228 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2239 vsi->vsi_num, status); in ice_cfg_sw_lldp()
2243 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2251 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2257 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2263 * - PF aggregator node to contains VSIs of type _PF and _CTRL in ice_set_agg_vsi()
2264 * - VF aggregator nodes will contain VF VSI in ice_set_agg_vsi()
2266 port_info = pf->hw.port_info; in ice_set_agg_vsi()
2270 switch (vsi->type) { in ice_set_agg_vsi()
2278 agg_node_iter = &pf->pf_agg_node[0]; in ice_set_agg_vsi()
2289 agg_node_iter = &pf->vf_agg_node[0]; in ice_set_agg_vsi()
2294 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2303 if (agg_node_iter->num_vsis && in ice_set_agg_vsi()
2304 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { in ice_set_agg_vsi()
2309 if (agg_node_iter->valid && in ice_set_agg_vsi()
2310 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2311 agg_id = agg_node_iter->agg_id; in ice_set_agg_vsi()
2317 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2330 if (!agg_node->valid) { in ice_set_agg_vsi()
2332 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2339 agg_node->valid = true; in ice_set_agg_vsi()
2340 agg_node->agg_id = agg_id; in ice_set_agg_vsi()
2344 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2345 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2348 vsi->idx, agg_id); in ice_set_agg_vsi()
2353 agg_node->num_vsis++; in ice_set_agg_vsi()
2355 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved in ice_set_agg_vsi()
2358 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2360 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2361 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2372 if (!(vsi->tc_cfg.ena_tc & BIT(i))) in ice_vsi_cfg_tc_lan()
2375 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_cfg_tc_lan()
2376 if (!vsi->alloc_txq && vsi->num_txq) in ice_vsi_cfg_tc_lan()
2377 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc_lan()
2379 max_txqs[i] = pf->num_lan_tx; in ice_vsi_cfg_tc_lan()
2381 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc_lan()
2384 if (vsi->type == ICE_VSI_PF) in ice_vsi_cfg_tc_lan()
2385 max_txqs[i] += vsi->num_xdp_txq; in ice_vsi_cfg_tc_lan()
2388 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); in ice_vsi_cfg_tc_lan()
2389 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc_lan()
2393 vsi->vsi_num, ret); in ice_vsi_cfg_tc_lan()
2401 * ice_vsi_cfg_def - configure default VSI based on the type
2408 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2409 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2412 vsi->vsw = pf->first_sw; in ice_vsi_cfg_def()
2414 ret = ice_vsi_alloc_def(vsi, params->ch); in ice_vsi_cfg_def()
2427 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", in ice_vsi_cfg_def()
2428 vsi->idx); in ice_vsi_cfg_def()
2439 ret = ice_vsi_init(vsi, params->flags); in ice_vsi_cfg_def()
2445 switch (vsi->type) { in ice_vsi_cfg_def()
2462 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2468 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, in ice_vsi_cfg_def()
2475 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_cfg_def()
2480 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2487 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2510 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2516 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2533 ret = -EINVAL; in ice_vsi_cfg_def()
2555 * ice_vsi_cfg - configure a previously allocated VSI
2561 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2564 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) in ice_vsi_cfg()
2565 return -EINVAL; in ice_vsi_cfg()
2567 vsi->type = params->type; in ice_vsi_cfg()
2568 vsi->port_info = params->pi; in ice_vsi_cfg()
2571 vsi->vf = params->vf; in ice_vsi_cfg()
2577 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2581 if (vsi->type == ICE_VSI_CTRL) { in ice_vsi_cfg()
2582 if (vsi->vf) { in ice_vsi_cfg()
2583 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2584 vsi->vf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2586 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2587 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2595 * ice_vsi_decfg - remove all VSI configuration
2600 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2603 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2604 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2607 vsi->vsi_num, err); in ice_vsi_decfg()
2620 /* SR-IOV determines needed MSIX resources all at once instead of per in ice_vsi_decfg()
2622 * many interrupts each VF needs. SR-IOV MSIX resources are also in ice_vsi_decfg()
2626 if (vsi->type == ICE_VSI_VF && in ice_vsi_decfg()
2627 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_decfg()
2628 vsi->agg_node->num_vsis--; in ice_vsi_decfg()
2632 * ice_vsi_setup - Set up a VSI by a given type
2651 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || in ice_vsi_setup()
2652 WARN_ON(!params->pi)) in ice_vsi_setup()
2674 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2680 if (!vsi->agg_node) in ice_vsi_setup()
2692 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2697 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2698 struct ice_hw *hw = &pf->hw; in ice_vsi_release_msix()
2704 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2707 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_release_msix()
2708 ice_write_itr(&q_vector->tx, 0); in ice_vsi_release_msix()
2709 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2711 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2713 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2718 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_release_msix()
2719 ice_write_itr(&q_vector->rx, 0); in ice_vsi_release_msix()
2720 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2729 * ice_vsi_free_irq - Free the IRQ association with the OS
2734 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2737 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2741 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2744 vsi->irqs_ready = false; in ice_vsi_free_irq()
2750 irq_num = vsi->q_vectors[i]->irq.virq; in ice_vsi_free_irq()
2753 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2754 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2755 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2765 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2770 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2777 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2781 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2782 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2786 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2793 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2797 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2798 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2802 * ice_vsi_close - Shut down a VSI
2807 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2816 * ice_ena_vsi - resume a VSI
2824 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2827 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2829 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_ena_vsi()
2830 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2834 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2839 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2847 * ice_dis_vsi - pause a VSI
2853 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_dis_vsi()
2856 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2858 if (vsi->type == ICE_VSI_PF && vsi->netdev) { in ice_dis_vsi()
2859 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2870 } else if (vsi->type == ICE_VSI_CTRL || in ice_dis_vsi()
2871 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { in ice_dis_vsi()
2877 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2878 * @vsi: the VSI being un-configured
2882 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
2883 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
2888 if (vsi->tx_rings) { in ice_vsi_dis_irq()
2890 if (vsi->tx_rings[i]) { in ice_vsi_dis_irq()
2893 reg = vsi->tx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2901 if (vsi->rx_rings) { in ice_vsi_dis_irq()
2903 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
2906 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2916 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
2918 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
2924 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
2928 synchronize_irq(vsi->q_vectors[i]->irq.virq); in ice_vsi_dis_irq()
2932 * ice_vsi_release - Delete a VSI and free its resources
2941 if (!vsi->back) in ice_vsi_release()
2942 return -ENODEV; in ice_vsi_release()
2943 pf = vsi->back; in ice_vsi_release()
2945 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) in ice_vsi_release()
2953 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && in ice_vsi_release()
2954 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) in ice_vsi_release()
2963 if (!ice_is_reset_in_progress(pf->state)) in ice_vsi_release()
2970 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2983 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
2985 coalesce[i].itr_tx = q_vector->tx.itr_settings; in ice_vsi_rebuild_get_coalesce()
2986 coalesce[i].itr_rx = q_vector->rx.itr_settings; in ice_vsi_rebuild_get_coalesce()
2987 coalesce[i].intrl = q_vector->intrl; in ice_vsi_rebuild_get_coalesce()
2989 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
2991 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
2995 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
2999 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
3020 * the number of Tx or Rx rings changes (the first for loop) in ice_vsi_rebuild_set_coalesce()
3024 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3028 * and the loop variable is less than the number of rings in ice_vsi_rebuild_set_coalesce()
3032 * rings is less than are allocated (this means the number of in ice_vsi_rebuild_set_coalesce()
3033 * rings increased from previously), then write out the in ice_vsi_rebuild_set_coalesce()
3040 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
3041 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3042 rc->itr_settings = coalesce[i].itr_rx; in ice_vsi_rebuild_set_coalesce()
3043 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3044 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
3045 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3046 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3047 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3050 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
3051 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3052 rc->itr_settings = coalesce[i].itr_tx; in ice_vsi_rebuild_set_coalesce()
3053 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3054 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
3055 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3056 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3057 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3060 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
3061 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
3067 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3069 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3070 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3071 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3074 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3075 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3076 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3078 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
3079 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
3084 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3090 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
3091 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
3095 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
3096 u16 prev_txq = vsi->alloc_txq; in ice_vsi_realloc_stat_arrays()
3097 u16 prev_rxq = vsi->alloc_rxq; in ice_vsi_realloc_stat_arrays()
3100 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_realloc_stat_arrays()
3104 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
3105 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3106 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3111 tx_ring_stats = vsi_stat->tx_ring_stats; in ice_vsi_realloc_stat_arrays()
3112 vsi_stat->tx_ring_stats = in ice_vsi_realloc_stat_arrays()
3113 krealloc_array(vsi_stat->tx_ring_stats, req_txq, in ice_vsi_realloc_stat_arrays()
3114 sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_realloc_stat_arrays()
3116 if (!vsi_stat->tx_ring_stats) { in ice_vsi_realloc_stat_arrays()
3117 vsi_stat->tx_ring_stats = tx_ring_stats; in ice_vsi_realloc_stat_arrays()
3118 return -ENOMEM; in ice_vsi_realloc_stat_arrays()
3123 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
3124 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3125 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3130 rx_ring_stats = vsi_stat->rx_ring_stats; in ice_vsi_realloc_stat_arrays()
3131 vsi_stat->rx_ring_stats = in ice_vsi_realloc_stat_arrays()
3132 krealloc_array(vsi_stat->rx_ring_stats, req_rxq, in ice_vsi_realloc_stat_arrays()
3133 sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_realloc_stat_arrays()
3135 if (!vsi_stat->rx_ring_stats) { in ice_vsi_realloc_stat_arrays()
3136 vsi_stat->rx_ring_stats = rx_ring_stats; in ice_vsi_realloc_stat_arrays()
3137 return -ENOMEM; in ice_vsi_realloc_stat_arrays()
3144 * ice_vsi_rebuild - Rebuild VSI after reset
3162 return -EINVAL; in ice_vsi_rebuild()
3167 pf = vsi->back; in ice_vsi_rebuild()
3168 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_rebuild()
3169 return -EINVAL; in ice_vsi_rebuild()
3171 mutex_lock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3182 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3185 ret = -ENOMEM; in ice_vsi_rebuild()
3194 ret = -EIO; in ice_vsi_rebuild()
3203 clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); in ice_vsi_rebuild()
3211 mutex_unlock(&vsi->xdp_state_lock); in ice_vsi_rebuild()
3216 * ice_is_reset_in_progress - check for a reset in progress
3228 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3237 * Returns 0 on success, -EBUSY if the reset is not finished within the
3238 * timeout, and -ERESTARTSYS if the thread was interrupted.
3244 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, in ice_wait_for_reset()
3245 !ice_is_reset_in_progress(pf->state), in ice_wait_for_reset()
3250 return -EBUSY; in ice_wait_for_reset()
3256 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3262 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3263 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3264 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3265 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3266 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3270 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3276 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
3277 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3278 int numtc = vsi->tc_cfg.numtc; in ice_vsi_cfg_netdev_tc()
3287 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_cfg_netdev_tc()
3295 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) in ice_vsi_cfg_netdev_tc()
3296 numtc = vsi->all_numtc; in ice_vsi_cfg_netdev_tc()
3301 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_vsi_cfg_netdev_tc()
3304 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
3306 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
3307 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
3308 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
3311 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_cfg_netdev_tc()
3313 if (!vsi->mqprio_qopt.qopt.count[i]) in ice_vsi_cfg_netdev_tc()
3316 vsi->mqprio_qopt.qopt.count[i], in ice_vsi_cfg_netdev_tc()
3317 vsi->mqprio_qopt.qopt.offset[i]); in ice_vsi_cfg_netdev_tc()
3320 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_netdev_tc()
3324 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; in ice_vsi_cfg_netdev_tc()
3327 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
3333 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3345 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; in ice_vsi_setup_q_map_mqprio()
3346 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; in ice_vsi_setup_q_map_mqprio()
3351 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; in ice_vsi_setup_q_map_mqprio()
3359 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map_mqprio()
3361 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map_mqprio()
3362 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map_mqprio()
3363 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map_mqprio()
3364 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map_mqprio()
3365 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map_mqprio()
3369 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3370 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3371 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3372 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map_mqprio()
3373 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; in ice_vsi_setup_q_map_mqprio()
3374 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; in ice_vsi_setup_q_map_mqprio()
3375 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map_mqprio()
3378 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map_mqprio()
3380 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_setup_q_map_mqprio()
3382 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3383 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3384 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3389 if (new_txq > vsi->alloc_txq) { in ice_vsi_setup_q_map_mqprio()
3390 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3391 new_txq, vsi->alloc_txq); in ice_vsi_setup_q_map_mqprio()
3392 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3396 if (new_rxq > vsi->alloc_rxq) { in ice_vsi_setup_q_map_mqprio()
3397 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3398 new_rxq, vsi->alloc_rxq); in ice_vsi_setup_q_map_mqprio()
3399 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3403 vsi->num_txq = new_txq; in ice_vsi_setup_q_map_mqprio()
3404 vsi->num_rxq = new_rxq; in ice_vsi_setup_q_map_mqprio()
3407 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_vsi_setup_q_map_mqprio()
3408 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map_mqprio()
3409 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); in ice_vsi_setup_q_map_mqprio()
3414 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { in ice_vsi_setup_q_map_mqprio()
3415 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3416 vsi->next_base_q = tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3418 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3419 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3420 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3421 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); in ice_vsi_setup_q_map_mqprio()
3427 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3436 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3444 if (vsi->tc_cfg.ena_tc == ena_tc && in ice_vsi_cfg_tc()
3445 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in ice_vsi_cfg_tc()
3453 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3457 if (vsi->type == ICE_VSI_CHNL && in ice_vsi_cfg_tc()
3458 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3459 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc()
3462 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); in ice_vsi_cfg_tc()
3463 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3464 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3468 return -ENOMEM; in ice_vsi_cfg_tc()
3470 ctx->vf_num = 0; in ice_vsi_cfg_tc()
3471 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3473 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3474 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3480 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); in ice_vsi_cfg_tc()
3485 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); in ice_vsi_cfg_tc()
3486 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3492 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3493 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3494 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); in ice_vsi_cfg_tc()
3496 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, in ice_vsi_cfg_tc()
3497 vsi->tc_cfg.ena_tc, max_txqs); in ice_vsi_cfg_tc()
3501 vsi->vsi_num, ret); in ice_vsi_cfg_tc()
3505 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3514 * ice_update_ring_stats - Update ring statistics
3523 stats->bytes += bytes; in ice_update_ring_stats()
3524 stats->pkts += pkts; in ice_update_ring_stats()
3528 * ice_update_tx_ring_stats - Update Tx ring specific counters
3535 u64_stats_update_begin(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3536 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); in ice_update_tx_ring_stats()
3537 u64_stats_update_end(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3541 * ice_update_rx_ring_stats - Update Rx ring specific counters
3548 u64_stats_update_begin(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3549 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); in ice_update_rx_ring_stats()
3550 u64_stats_update_end(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3554 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3568 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3576 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); in ice_is_vsi_dflt_vsi()
3580 * ice_set_dflt_vsi - set the default forwarding VSI
3595 return -EINVAL; in ice_set_dflt_vsi()
3597 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3599 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3601 vsi->vsi_num); in ice_set_dflt_vsi()
3608 vsi->vsi_num); in ice_set_dflt_vsi()
3612 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3615 vsi->vsi_num, status); in ice_set_dflt_vsi()
3623 * ice_clear_dflt_vsi - clear the default forwarding VSI
3636 return -EINVAL; in ice_clear_dflt_vsi()
3638 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3641 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) in ice_clear_dflt_vsi()
3642 return -ENODEV; in ice_clear_dflt_vsi()
3644 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, in ice_clear_dflt_vsi()
3648 vsi->vsi_num, status); in ice_clear_dflt_vsi()
3649 return -EIO; in ice_clear_dflt_vsi()
3656 * ice_get_link_speed_mbps - get link speed in Mbps
3665 link_speed = vsi->port_info->phy.link_info.link_speed; in ice_get_link_speed_mbps()
3667 return (int)ice_get_link_speed(fls(link_speed) - 1); in ice_get_link_speed_mbps()
3671 * ice_get_link_speed_kbps - get link speed in Kbps
3686 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3691 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3696 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3702 if (!vsi->port_info) { in ice_set_min_bw_limit()
3704 vsi->idx, vsi->type); in ice_set_min_bw_limit()
3705 return -EINVAL; in ice_set_min_bw_limit()
3711 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_min_bw_limit()
3713 return -EINVAL; in ice_set_min_bw_limit()
3718 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_min_bw_limit()
3722 min_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_min_bw_limit()
3723 vsi->idx); in ice_set_min_bw_limit()
3728 min_tx_rate, ice_vsi_type_str(vsi->type)); in ice_set_min_bw_limit()
3730 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_min_bw_limit()
3731 vsi->idx, 0, in ice_set_min_bw_limit()
3735 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3740 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3747 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3752 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3757 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3763 if (!vsi->port_info) { in ice_set_max_bw_limit()
3765 vsi->idx, vsi->type); in ice_set_max_bw_limit()
3766 return -EINVAL; in ice_set_max_bw_limit()
3772 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_max_bw_limit()
3774 return -EINVAL; in ice_set_max_bw_limit()
3779 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_max_bw_limit()
3783 max_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_max_bw_limit()
3784 vsi->idx); in ice_set_max_bw_limit()
3789 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3791 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_max_bw_limit()
3792 vsi->idx, 0, in ice_set_max_bw_limit()
3796 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3801 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3808 * ice_set_link - turn on/off physical link
3814 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3815 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3816 struct ice_hw *hw = pi->hw; in ice_set_link()
3819 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3820 return -EINVAL; in ice_set_link()
3829 if (status == -EIO) { in ice_set_link()
3830 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) in ice_set_link()
3833 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3837 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3845 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3868 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3869 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3873 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3877 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3878 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3885 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3898 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3899 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3903 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3907 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3908 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3914 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3919 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3930 if (vsi->type == ICE_VSI_VF) { in ice_vsi_num_zero_vlans()
3931 if (WARN_ON(!vsi->vf)) in ice_vsi_num_zero_vlans()
3934 if (ice_vf_is_port_vlan_ena(vsi->vf)) in ice_vsi_num_zero_vlans()
3938 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
3945 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3946 * @vsi: VSI used to determine if any non-zero VLANs have been added
3950 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); in ice_vsi_has_non_zero_vlans()
3954 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3955 * @vsi: VSI used to get the number of non-zero VLANs added
3959 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); in ice_vsi_num_non_zero_vlans()
3974 return test_bit(f, pf->features); in ice_is_feature_supported()
3987 set_bit(f, pf->features); in ice_set_feature_support()
4000 clear_bit(f, pf->features); in ice_clear_feature_support()
4011 switch (pf->hw.device_id) { in ice_init_feature_support()
4017 if (ice_is_e810t(&pf->hw)) { in ice_init_feature_support()
4019 if (ice_gnss_is_gps_present(&pf->hw)) in ice_init_feature_support()
4029 * ice_vsi_update_security - update security block in VSI
4038 ctx.info = vsi->info; in ice_vsi_update_security()
4042 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
4043 return -ENODEV; in ice_vsi_update_security()
4045 vsi->info = ctx.info; in ice_vsi_update_security()
4050 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4055 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | in ice_vsi_ctx_set_antispoof()
4061 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4066 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & in ice_vsi_ctx_clear_antispoof()
4072 * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4077 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_ctx_set_allow_override()
4081 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4086 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_ctx_clear_allow_override()
4090 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4098 .info = vsi->info, in ice_vsi_update_local_lb()
4107 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
4108 return -ENODEV; in ice_vsi_update_local_lb()
4110 vsi->info = ctx.info; in ice_vsi_update_local_lb()