Lines Matching refs:pf

15 ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,  in ice_init_irq_tracker()  argument
18 pf->irq_tracker.num_entries = max_vectors; in ice_init_irq_tracker()
19 pf->irq_tracker.num_static = num_static; in ice_init_irq_tracker()
20 xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC); in ice_init_irq_tracker()
27 static void ice_deinit_irq_tracker(struct ice_pf *pf) in ice_deinit_irq_tracker() argument
29 xa_destroy(&pf->irq_tracker.entries); in ice_deinit_irq_tracker()
37 static void ice_free_irq_res(struct ice_pf *pf, u16 index) in ice_free_irq_res() argument
41 entry = xa_erase(&pf->irq_tracker.entries, index); in ice_free_irq_res()
56 static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) in ice_get_irq_res() argument
58 struct xa_limit limit = { .max = pf->irq_tracker.num_entries, in ice_get_irq_res()
60 unsigned int num_static = pf->irq_tracker.num_static; in ice_get_irq_res()
73 ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, in ice_get_irq_res()
96 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) in ice_reduce_msix_usage() argument
100 if (!ice_is_rdma_ena(pf)) { in ice_reduce_msix_usage()
101 pf->num_lan_msix = v_remain; in ice_reduce_msix_usage()
109 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); in ice_reduce_msix_usage()
110 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_reduce_msix_usage()
112 pf->num_rdma_msix = 0; in ice_reduce_msix_usage()
113 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; in ice_reduce_msix_usage()
118 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; in ice_reduce_msix_usage()
119 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; in ice_reduce_msix_usage()
123 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + in ice_reduce_msix_usage()
125 pf->num_lan_msix = v_remain - pf->num_rdma_msix; in ice_reduce_msix_usage()
137 static int ice_ena_msix_range(struct ice_pf *pf) in ice_ena_msix_range() argument
140 struct device *dev = ice_pf_to_dev(pf); in ice_ena_msix_range()
143 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_ena_msix_range()
150 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_ena_msix_range()
159 pf->num_lan_msix = num_cpus; in ice_ena_msix_range()
160 v_wanted += pf->num_lan_msix; in ice_ena_msix_range()
163 if (ice_is_rdma_ena(pf)) { in ice_ena_msix_range()
164 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; in ice_ena_msix_range()
165 v_wanted += pf->num_rdma_msix; in ice_ena_msix_range()
185 ice_reduce_msix_usage(pf, v_remain); in ice_ena_msix_range()
186 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; in ice_ena_msix_range()
189 pf->num_lan_msix); in ice_ena_msix_range()
190 if (ice_is_rdma_ena(pf)) in ice_ena_msix_range()
192 pf->num_rdma_msix); in ice_ena_msix_range()
196 v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted, in ice_ena_msix_range()
210 pci_free_irq_vectors(pf->pdev); in ice_ena_msix_range()
219 ice_reduce_msix_usage(pf, v_remain); in ice_ena_msix_range()
222 pf->num_lan_msix); in ice_ena_msix_range()
224 if (ice_is_rdma_ena(pf)) in ice_ena_msix_range()
226 pf->num_rdma_msix); in ice_ena_msix_range()
233 pf->num_rdma_msix = 0; in ice_ena_msix_range()
234 pf->num_lan_msix = 0; in ice_ena_msix_range()
242 void ice_clear_interrupt_scheme(struct ice_pf *pf) in ice_clear_interrupt_scheme() argument
244 pci_free_irq_vectors(pf->pdev); in ice_clear_interrupt_scheme()
245 ice_deinit_irq_tracker(pf); in ice_clear_interrupt_scheme()
252 int ice_init_interrupt_scheme(struct ice_pf *pf) in ice_init_interrupt_scheme() argument
254 int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_init_interrupt_scheme()
257 vectors = ice_ena_msix_range(pf); in ice_init_interrupt_scheme()
262 if (pci_msix_can_alloc_dyn(pf->pdev)) in ice_init_interrupt_scheme()
267 ice_init_irq_tracker(pf, max_vectors, vectors); in ice_init_interrupt_scheme()
294 struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) in ice_alloc_irq() argument
296 int sriov_base_vector = pf->sriov_base_vector; in ice_alloc_irq()
298 struct device *dev = ice_pf_to_dev(pf); in ice_alloc_irq()
301 entry = ice_get_irq_res(pf, dyn_only); in ice_alloc_irq()
309 if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) { in ice_alloc_irq()
310 map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL); in ice_alloc_irq()
316 map.virq = pci_irq_vector(pf->pdev, map.index); in ice_alloc_irq()
323 ice_free_irq_res(pf, entry->index); in ice_alloc_irq()
335 void ice_free_irq(struct ice_pf *pf, struct msi_map map) in ice_free_irq() argument
339 entry = xa_load(&pf->irq_tracker.entries, map.index); in ice_free_irq()
342 dev_err(ice_pf_to_dev(pf), "Failed to get MSIX interrupt entry at index %d", in ice_free_irq()
347 dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index); in ice_free_irq()
350 pci_msix_free_irq(pf->pdev, map); in ice_free_irq()
352 ice_free_irq_res(pf, map.index); in ice_free_irq()
363 int ice_get_max_used_msix_vector(struct ice_pf *pf) in ice_get_max_used_msix_vector() argument
369 start = pf->irq_tracker.num_static; in ice_get_max_used_msix_vector()
372 xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) { in ice_get_max_used_msix_vector()