138e97a98SPiotr Raczynski // SPDX-License-Identifier: GPL-2.0
238e97a98SPiotr Raczynski /* Copyright (C) 2023, Intel Corporation. */
338e97a98SPiotr Raczynski 
438e97a98SPiotr Raczynski #include "ice.h"
538e97a98SPiotr Raczynski #include "ice_lib.h"
638e97a98SPiotr Raczynski #include "ice_irq.h"
738e97a98SPiotr Raczynski 
838e97a98SPiotr Raczynski /**
9cfebc0a3SPiotr Raczynski  * ice_init_irq_tracker - initialize interrupt tracker
10cfebc0a3SPiotr Raczynski  * @pf: board private structure
11cfebc0a3SPiotr Raczynski  * @max_vectors: maximum number of vectors that tracker can hold
12*011670ccSPiotr Raczynski  * @num_static: number of preallocated interrupts
13cfebc0a3SPiotr Raczynski  */
14cfebc0a3SPiotr Raczynski static void
ice_init_irq_tracker(struct ice_pf * pf,unsigned int max_vectors,unsigned int num_static)15*011670ccSPiotr Raczynski ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
16*011670ccSPiotr Raczynski 		     unsigned int num_static)
17cfebc0a3SPiotr Raczynski {
18cfebc0a3SPiotr Raczynski 	pf->irq_tracker.num_entries = max_vectors;
19*011670ccSPiotr Raczynski 	pf->irq_tracker.num_static = num_static;
20cfebc0a3SPiotr Raczynski 	xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
21cfebc0a3SPiotr Raczynski }
22cfebc0a3SPiotr Raczynski 
23cfebc0a3SPiotr Raczynski /**
24cfebc0a3SPiotr Raczynski  * ice_deinit_irq_tracker - free xarray tracker
25cfebc0a3SPiotr Raczynski  * @pf: board private structure
26cfebc0a3SPiotr Raczynski  */
ice_deinit_irq_tracker(struct ice_pf * pf)27cfebc0a3SPiotr Raczynski static void ice_deinit_irq_tracker(struct ice_pf *pf)
28cfebc0a3SPiotr Raczynski {
29cfebc0a3SPiotr Raczynski 	xa_destroy(&pf->irq_tracker.entries);
30cfebc0a3SPiotr Raczynski }
31cfebc0a3SPiotr Raczynski 
32cfebc0a3SPiotr Raczynski /**
33cfebc0a3SPiotr Raczynski  * ice_free_irq_res - free a block of resources
34cfebc0a3SPiotr Raczynski  * @pf: board private structure
35cfebc0a3SPiotr Raczynski  * @index: starting index previously returned by ice_get_res
36cfebc0a3SPiotr Raczynski  */
ice_free_irq_res(struct ice_pf * pf,u16 index)37cfebc0a3SPiotr Raczynski static void ice_free_irq_res(struct ice_pf *pf, u16 index)
38cfebc0a3SPiotr Raczynski {
39cfebc0a3SPiotr Raczynski 	struct ice_irq_entry *entry;
40cfebc0a3SPiotr Raczynski 
41cfebc0a3SPiotr Raczynski 	entry = xa_erase(&pf->irq_tracker.entries, index);
42cfebc0a3SPiotr Raczynski 	kfree(entry);
43cfebc0a3SPiotr Raczynski }
44cfebc0a3SPiotr Raczynski 
45cfebc0a3SPiotr Raczynski /**
46cfebc0a3SPiotr Raczynski  * ice_get_irq_res - get an interrupt resource
47cfebc0a3SPiotr Raczynski  * @pf: board private structure
48*011670ccSPiotr Raczynski  * @dyn_only: force entry to be dynamically allocated
49cfebc0a3SPiotr Raczynski  *
50cfebc0a3SPiotr Raczynski  * Allocate new irq entry in the free slot of the tracker. Since xarray
51cfebc0a3SPiotr Raczynski  * is used, always allocate new entry at the lowest possible index. Set
52cfebc0a3SPiotr Raczynski  * proper allocation limit for maximum tracker entries.
53cfebc0a3SPiotr Raczynski  *
54cfebc0a3SPiotr Raczynski  * Returns allocated irq entry or NULL on failure.
55cfebc0a3SPiotr Raczynski  */
ice_get_irq_res(struct ice_pf * pf,bool dyn_only)56*011670ccSPiotr Raczynski static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
57cfebc0a3SPiotr Raczynski {
58cfebc0a3SPiotr Raczynski 	struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
59cfebc0a3SPiotr Raczynski 				  .min = 0 };
60*011670ccSPiotr Raczynski 	unsigned int num_static = pf->irq_tracker.num_static;
61cfebc0a3SPiotr Raczynski 	struct ice_irq_entry *entry;
62cfebc0a3SPiotr Raczynski 	unsigned int index;
63cfebc0a3SPiotr Raczynski 	int ret;
64cfebc0a3SPiotr Raczynski 
65cfebc0a3SPiotr Raczynski 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
66cfebc0a3SPiotr Raczynski 	if (!entry)
67cfebc0a3SPiotr Raczynski 		return NULL;
68cfebc0a3SPiotr Raczynski 
69*011670ccSPiotr Raczynski 	/* skip preallocated entries if the caller says so */
70*011670ccSPiotr Raczynski 	if (dyn_only)
71*011670ccSPiotr Raczynski 		limit.min = num_static;
72*011670ccSPiotr Raczynski 
73cfebc0a3SPiotr Raczynski 	ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
74cfebc0a3SPiotr Raczynski 		       GFP_KERNEL);
75cfebc0a3SPiotr Raczynski 
76cfebc0a3SPiotr Raczynski 	if (ret) {
77cfebc0a3SPiotr Raczynski 		kfree(entry);
78cfebc0a3SPiotr Raczynski 		entry = NULL;
79cfebc0a3SPiotr Raczynski 	} else {
80cfebc0a3SPiotr Raczynski 		entry->index = index;
81*011670ccSPiotr Raczynski 		entry->dynamic = index >= num_static;
82cfebc0a3SPiotr Raczynski 	}
83cfebc0a3SPiotr Raczynski 
84cfebc0a3SPiotr Raczynski 	return entry;
85cfebc0a3SPiotr Raczynski }
86cfebc0a3SPiotr Raczynski 
87cfebc0a3SPiotr Raczynski /**
8838e97a98SPiotr Raczynski  * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
8938e97a98SPiotr Raczynski  * @pf: board private structure
9038e97a98SPiotr Raczynski  * @v_remain: number of remaining MSI-X vectors to be distributed
9138e97a98SPiotr Raczynski  *
9238e97a98SPiotr Raczynski  * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
9338e97a98SPiotr Raczynski  * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
9438e97a98SPiotr Raczynski  * remaining vectors.
9538e97a98SPiotr Raczynski  */
ice_reduce_msix_usage(struct ice_pf * pf,int v_remain)9638e97a98SPiotr Raczynski static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
9738e97a98SPiotr Raczynski {
9838e97a98SPiotr Raczynski 	int v_rdma;
9938e97a98SPiotr Raczynski 
10038e97a98SPiotr Raczynski 	if (!ice_is_rdma_ena(pf)) {
10138e97a98SPiotr Raczynski 		pf->num_lan_msix = v_remain;
10238e97a98SPiotr Raczynski 		return;
10338e97a98SPiotr Raczynski 	}
10438e97a98SPiotr Raczynski 
10538e97a98SPiotr Raczynski 	/* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
10638e97a98SPiotr Raczynski 	v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
10738e97a98SPiotr Raczynski 
10838e97a98SPiotr Raczynski 	if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
10938e97a98SPiotr Raczynski 		dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
11038e97a98SPiotr Raczynski 		clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
11138e97a98SPiotr Raczynski 
11238e97a98SPiotr Raczynski 		pf->num_rdma_msix = 0;
11338e97a98SPiotr Raczynski 		pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
11438e97a98SPiotr Raczynski 	} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
11538e97a98SPiotr Raczynski 		   (v_remain - v_rdma < v_rdma)) {
11638e97a98SPiotr Raczynski 		/* Support minimum RDMA and give remaining vectors to LAN MSIX
11738e97a98SPiotr Raczynski 		 */
11838e97a98SPiotr Raczynski 		pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
11938e97a98SPiotr Raczynski 		pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
12038e97a98SPiotr Raczynski 	} else {
12138e97a98SPiotr Raczynski 		/* Split remaining MSIX with RDMA after accounting for AEQ MSIX
12238e97a98SPiotr Raczynski 		 */
12338e97a98SPiotr Raczynski 		pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
12438e97a98SPiotr Raczynski 				    ICE_RDMA_NUM_AEQ_MSIX;
12538e97a98SPiotr Raczynski 		pf->num_lan_msix = v_remain - pf->num_rdma_msix;
12638e97a98SPiotr Raczynski 	}
12738e97a98SPiotr Raczynski }
12838e97a98SPiotr Raczynski 
12938e97a98SPiotr Raczynski /**
13038e97a98SPiotr Raczynski  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
13138e97a98SPiotr Raczynski  * @pf: board private structure
13238e97a98SPiotr Raczynski  *
13338e97a98SPiotr Raczynski  * Compute the number of MSIX vectors wanted and request from the OS. Adjust
13438e97a98SPiotr Raczynski  * device usage if there are not enough vectors. Return the number of vectors
13538e97a98SPiotr Raczynski  * reserved or negative on failure.
13638e97a98SPiotr Raczynski  */
ice_ena_msix_range(struct ice_pf * pf)13738e97a98SPiotr Raczynski static int ice_ena_msix_range(struct ice_pf *pf)
13838e97a98SPiotr Raczynski {
13938e97a98SPiotr Raczynski 	int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
14038e97a98SPiotr Raczynski 	struct device *dev = ice_pf_to_dev(pf);
14105018936SPiotr Raczynski 	int err;
14238e97a98SPiotr Raczynski 
14338e97a98SPiotr Raczynski 	hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
14438e97a98SPiotr Raczynski 	num_cpus = num_online_cpus();
14538e97a98SPiotr Raczynski 
14638e97a98SPiotr Raczynski 	/* LAN miscellaneous handler */
14738e97a98SPiotr Raczynski 	v_other = ICE_MIN_LAN_OICR_MSIX;
14838e97a98SPiotr Raczynski 
14938e97a98SPiotr Raczynski 	/* Flow Director */
15038e97a98SPiotr Raczynski 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
15138e97a98SPiotr Raczynski 		v_other += ICE_FDIR_MSIX;
15238e97a98SPiotr Raczynski 
15338e97a98SPiotr Raczynski 	/* switchdev */
15438e97a98SPiotr Raczynski 	v_other += ICE_ESWITCH_MSIX;
15538e97a98SPiotr Raczynski 
15638e97a98SPiotr Raczynski 	v_wanted = v_other;
15738e97a98SPiotr Raczynski 
15838e97a98SPiotr Raczynski 	/* LAN traffic */
15938e97a98SPiotr Raczynski 	pf->num_lan_msix = num_cpus;
16038e97a98SPiotr Raczynski 	v_wanted += pf->num_lan_msix;
16138e97a98SPiotr Raczynski 
16238e97a98SPiotr Raczynski 	/* RDMA auxiliary driver */
16338e97a98SPiotr Raczynski 	if (ice_is_rdma_ena(pf)) {
16438e97a98SPiotr Raczynski 		pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
16538e97a98SPiotr Raczynski 		v_wanted += pf->num_rdma_msix;
16638e97a98SPiotr Raczynski 	}
16738e97a98SPiotr Raczynski 
16838e97a98SPiotr Raczynski 	if (v_wanted > hw_num_msix) {
16938e97a98SPiotr Raczynski 		int v_remain;
17038e97a98SPiotr Raczynski 
17138e97a98SPiotr Raczynski 		dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
17238e97a98SPiotr Raczynski 			 v_wanted, hw_num_msix);
17338e97a98SPiotr Raczynski 
17438e97a98SPiotr Raczynski 		if (hw_num_msix < ICE_MIN_MSIX) {
17538e97a98SPiotr Raczynski 			err = -ERANGE;
17638e97a98SPiotr Raczynski 			goto exit_err;
17738e97a98SPiotr Raczynski 		}
17838e97a98SPiotr Raczynski 
17938e97a98SPiotr Raczynski 		v_remain = hw_num_msix - v_other;
18038e97a98SPiotr Raczynski 		if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
18138e97a98SPiotr Raczynski 			v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
18238e97a98SPiotr Raczynski 			v_remain = ICE_MIN_LAN_TXRX_MSIX;
18338e97a98SPiotr Raczynski 		}
18438e97a98SPiotr Raczynski 
18538e97a98SPiotr Raczynski 		ice_reduce_msix_usage(pf, v_remain);
18638e97a98SPiotr Raczynski 		v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
18738e97a98SPiotr Raczynski 
18838e97a98SPiotr Raczynski 		dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
18938e97a98SPiotr Raczynski 			   pf->num_lan_msix);
19038e97a98SPiotr Raczynski 		if (ice_is_rdma_ena(pf))
19138e97a98SPiotr Raczynski 			dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
19238e97a98SPiotr Raczynski 				   pf->num_rdma_msix);
19338e97a98SPiotr Raczynski 	}
19438e97a98SPiotr Raczynski 
19538e97a98SPiotr Raczynski 	/* actually reserve the vectors */
19605018936SPiotr Raczynski 	v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
19705018936SPiotr Raczynski 					 PCI_IRQ_MSIX);
19838e97a98SPiotr Raczynski 	if (v_actual < 0) {
19938e97a98SPiotr Raczynski 		dev_err(dev, "unable to reserve MSI-X vectors\n");
20038e97a98SPiotr Raczynski 		err = v_actual;
20105018936SPiotr Raczynski 		goto exit_err;
20238e97a98SPiotr Raczynski 	}
20338e97a98SPiotr Raczynski 
20438e97a98SPiotr Raczynski 	if (v_actual < v_wanted) {
20538e97a98SPiotr Raczynski 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
20638e97a98SPiotr Raczynski 			 v_wanted, v_actual);
20738e97a98SPiotr Raczynski 
20838e97a98SPiotr Raczynski 		if (v_actual < ICE_MIN_MSIX) {
20938e97a98SPiotr Raczynski 			/* error if we can't get minimum vectors */
21005018936SPiotr Raczynski 			pci_free_irq_vectors(pf->pdev);
21138e97a98SPiotr Raczynski 			err = -ERANGE;
21205018936SPiotr Raczynski 			goto exit_err;
21338e97a98SPiotr Raczynski 		} else {
21438e97a98SPiotr Raczynski 			int v_remain = v_actual - v_other;
21538e97a98SPiotr Raczynski 
21638e97a98SPiotr Raczynski 			if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
21738e97a98SPiotr Raczynski 				v_remain = ICE_MIN_LAN_TXRX_MSIX;
21838e97a98SPiotr Raczynski 
21938e97a98SPiotr Raczynski 			ice_reduce_msix_usage(pf, v_remain);
22038e97a98SPiotr Raczynski 
22138e97a98SPiotr Raczynski 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
22238e97a98SPiotr Raczynski 				   pf->num_lan_msix);
22338e97a98SPiotr Raczynski 
22438e97a98SPiotr Raczynski 			if (ice_is_rdma_ena(pf))
22538e97a98SPiotr Raczynski 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
22638e97a98SPiotr Raczynski 					   pf->num_rdma_msix);
22738e97a98SPiotr Raczynski 		}
22838e97a98SPiotr Raczynski 	}
22938e97a98SPiotr Raczynski 
23038e97a98SPiotr Raczynski 	return v_actual;
23138e97a98SPiotr Raczynski 
23238e97a98SPiotr Raczynski exit_err:
23338e97a98SPiotr Raczynski 	pf->num_rdma_msix = 0;
23438e97a98SPiotr Raczynski 	pf->num_lan_msix = 0;
23538e97a98SPiotr Raczynski 	return err;
23638e97a98SPiotr Raczynski }
23738e97a98SPiotr Raczynski 
23838e97a98SPiotr Raczynski /**
23938e97a98SPiotr Raczynski  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
24038e97a98SPiotr Raczynski  * @pf: board private structure
24138e97a98SPiotr Raczynski  */
ice_clear_interrupt_scheme(struct ice_pf * pf)24238e97a98SPiotr Raczynski void ice_clear_interrupt_scheme(struct ice_pf *pf)
24338e97a98SPiotr Raczynski {
24405018936SPiotr Raczynski 	pci_free_irq_vectors(pf->pdev);
245cfebc0a3SPiotr Raczynski 	ice_deinit_irq_tracker(pf);
24638e97a98SPiotr Raczynski }
24738e97a98SPiotr Raczynski 
24838e97a98SPiotr Raczynski /**
24938e97a98SPiotr Raczynski  * ice_init_interrupt_scheme - Determine proper interrupt scheme
25038e97a98SPiotr Raczynski  * @pf: board private structure to initialize
25138e97a98SPiotr Raczynski  */
ice_init_interrupt_scheme(struct ice_pf * pf)25238e97a98SPiotr Raczynski int ice_init_interrupt_scheme(struct ice_pf *pf)
25338e97a98SPiotr Raczynski {
254*011670ccSPiotr Raczynski 	int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
255*011670ccSPiotr Raczynski 	int vectors, max_vectors;
25638e97a98SPiotr Raczynski 
25738e97a98SPiotr Raczynski 	vectors = ice_ena_msix_range(pf);
25838e97a98SPiotr Raczynski 
25938e97a98SPiotr Raczynski 	if (vectors < 0)
260*011670ccSPiotr Raczynski 		return -ENOMEM;
26138e97a98SPiotr Raczynski 
262*011670ccSPiotr Raczynski 	if (pci_msix_can_alloc_dyn(pf->pdev))
263*011670ccSPiotr Raczynski 		max_vectors = total_vectors;
264*011670ccSPiotr Raczynski 	else
265*011670ccSPiotr Raczynski 		max_vectors = vectors;
266*011670ccSPiotr Raczynski 
267*011670ccSPiotr Raczynski 	ice_init_irq_tracker(pf, max_vectors, vectors);
26838e97a98SPiotr Raczynski 
26938e97a98SPiotr Raczynski 	return 0;
27038e97a98SPiotr Raczynski }
2714aad5335SPiotr Raczynski 
2724aad5335SPiotr Raczynski /**
2734aad5335SPiotr Raczynski  * ice_alloc_irq - Allocate new interrupt vector
2744aad5335SPiotr Raczynski  * @pf: board private structure
275*011670ccSPiotr Raczynski  * @dyn_only: force dynamic allocation of the interrupt
2764aad5335SPiotr Raczynski  *
2774aad5335SPiotr Raczynski  * Allocate new interrupt vector for a given owner id.
2784aad5335SPiotr Raczynski  * return struct msi_map with interrupt details and track
2794aad5335SPiotr Raczynski  * allocated interrupt appropriately.
2804aad5335SPiotr Raczynski  *
281*011670ccSPiotr Raczynski  * This function reserves new irq entry from the irq_tracker.
282*011670ccSPiotr Raczynski  * if according to the tracker information all interrupts that
283*011670ccSPiotr Raczynski  * were allocated with ice_pci_alloc_irq_vectors are already used
284*011670ccSPiotr Raczynski  * and dynamically allocated interrupts are supported then new
285*011670ccSPiotr Raczynski  * interrupt will be allocated with pci_msix_alloc_irq_at.
286*011670ccSPiotr Raczynski  *
287*011670ccSPiotr Raczynski  * Some callers may only support dynamically allocated interrupts.
288*011670ccSPiotr Raczynski  * This is indicated with dyn_only flag.
2894aad5335SPiotr Raczynski  *
2904aad5335SPiotr Raczynski  * On failure, return map with negative .index. The caller
2914aad5335SPiotr Raczynski  * is expected to check returned map index.
2924aad5335SPiotr Raczynski  *
2934aad5335SPiotr Raczynski  */
ice_alloc_irq(struct ice_pf * pf,bool dyn_only)294*011670ccSPiotr Raczynski struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
2954aad5335SPiotr Raczynski {
296*011670ccSPiotr Raczynski 	int sriov_base_vector = pf->sriov_base_vector;
2974aad5335SPiotr Raczynski 	struct msi_map map = { .index = -ENOENT };
298*011670ccSPiotr Raczynski 	struct device *dev = ice_pf_to_dev(pf);
299cfebc0a3SPiotr Raczynski 	struct ice_irq_entry *entry;
3004aad5335SPiotr Raczynski 
301*011670ccSPiotr Raczynski 	entry = ice_get_irq_res(pf, dyn_only);
302cfebc0a3SPiotr Raczynski 	if (!entry)
3034aad5335SPiotr Raczynski 		return map;
3044aad5335SPiotr Raczynski 
305*011670ccSPiotr Raczynski 	/* fail if we're about to violate SRIOV vectors space */
306*011670ccSPiotr Raczynski 	if (sriov_base_vector && entry->index >= sriov_base_vector)
307*011670ccSPiotr Raczynski 		goto exit_free_res;
308*011670ccSPiotr Raczynski 
309*011670ccSPiotr Raczynski 	if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
310*011670ccSPiotr Raczynski 		map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
311*011670ccSPiotr Raczynski 		if (map.index < 0)
312*011670ccSPiotr Raczynski 			goto exit_free_res;
313*011670ccSPiotr Raczynski 		dev_dbg(dev, "allocated new irq at index %d\n", map.index);
314*011670ccSPiotr Raczynski 	} else {
315cfebc0a3SPiotr Raczynski 		map.index = entry->index;
3164aad5335SPiotr Raczynski 		map.virq = pci_irq_vector(pf->pdev, map.index);
317*011670ccSPiotr Raczynski 	}
3184aad5335SPiotr Raczynski 
3194aad5335SPiotr Raczynski 	return map;
320*011670ccSPiotr Raczynski 
321*011670ccSPiotr Raczynski exit_free_res:
322*011670ccSPiotr Raczynski 	dev_err(dev, "Could not allocate irq at idx %d\n", entry->index);
323*011670ccSPiotr Raczynski 	ice_free_irq_res(pf, entry->index);
324*011670ccSPiotr Raczynski 	return map;
3254aad5335SPiotr Raczynski }
3264aad5335SPiotr Raczynski 
3274aad5335SPiotr Raczynski /**
3284aad5335SPiotr Raczynski  * ice_free_irq - Free interrupt vector
3294aad5335SPiotr Raczynski  * @pf: board private structure
3304aad5335SPiotr Raczynski  * @map: map with interrupt details
3314aad5335SPiotr Raczynski  *
332*011670ccSPiotr Raczynski  * Remove allocated interrupt from the interrupt tracker. If interrupt was
333*011670ccSPiotr Raczynski  * allocated dynamically, free respective interrupt vector.
3344aad5335SPiotr Raczynski  */
ice_free_irq(struct ice_pf * pf,struct msi_map map)3354aad5335SPiotr Raczynski void ice_free_irq(struct ice_pf *pf, struct msi_map map)
3364aad5335SPiotr Raczynski {
337*011670ccSPiotr Raczynski 	struct ice_irq_entry *entry;
338*011670ccSPiotr Raczynski 
339*011670ccSPiotr Raczynski 	entry = xa_load(&pf->irq_tracker.entries, map.index);
340*011670ccSPiotr Raczynski 
341*011670ccSPiotr Raczynski 	if (!entry) {
342*011670ccSPiotr Raczynski 		dev_err(ice_pf_to_dev(pf), "Failed to get MSIX interrupt entry at index %d",
343*011670ccSPiotr Raczynski 			map.index);
344*011670ccSPiotr Raczynski 		return;
345*011670ccSPiotr Raczynski 	}
346*011670ccSPiotr Raczynski 
347*011670ccSPiotr Raczynski 	dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index);
348*011670ccSPiotr Raczynski 
349*011670ccSPiotr Raczynski 	if (entry->dynamic)
350*011670ccSPiotr Raczynski 		pci_msix_free_irq(pf->pdev, map);
351*011670ccSPiotr Raczynski 
352cfebc0a3SPiotr Raczynski 	ice_free_irq_res(pf, map.index);
3534aad5335SPiotr Raczynski }
354*011670ccSPiotr Raczynski 
355*011670ccSPiotr Raczynski /**
356*011670ccSPiotr Raczynski  * ice_get_max_used_msix_vector - Get the max used interrupt vector
357*011670ccSPiotr Raczynski  * @pf: board private structure
358*011670ccSPiotr Raczynski  *
359*011670ccSPiotr Raczynski  * Return index of maximum used interrupt vectors with respect to the
360*011670ccSPiotr Raczynski  * beginning of the MSIX table. Take into account that some interrupts
361*011670ccSPiotr Raczynski  * may have been dynamically allocated after MSIX was initially enabled.
362*011670ccSPiotr Raczynski  */
ice_get_max_used_msix_vector(struct ice_pf * pf)363*011670ccSPiotr Raczynski int ice_get_max_used_msix_vector(struct ice_pf *pf)
364*011670ccSPiotr Raczynski {
365*011670ccSPiotr Raczynski 	unsigned long start, index, max_idx;
366*011670ccSPiotr Raczynski 	void *entry;
367*011670ccSPiotr Raczynski 
368*011670ccSPiotr Raczynski 	/* Treat all preallocated interrupts as used */
369*011670ccSPiotr Raczynski 	start = pf->irq_tracker.num_static;
370*011670ccSPiotr Raczynski 	max_idx = start - 1;
371*011670ccSPiotr Raczynski 
372*011670ccSPiotr Raczynski 	xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
373*011670ccSPiotr Raczynski 		if (index > max_idx)
374*011670ccSPiotr Raczynski 			max_idx = index;
375*011670ccSPiotr Raczynski 	}
376*011670ccSPiotr Raczynski 
377*011670ccSPiotr Raczynski 	return max_idx;
378*011670ccSPiotr Raczynski }
379