1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_irq.h"
7
8 /**
9 * ice_init_irq_tracker - initialize interrupt tracker
10 * @pf: board private structure
11 * @max_vectors: maximum number of vectors that tracker can hold
12 * @num_static: number of preallocated interrupts
13 */
14 static void
ice_init_irq_tracker(struct ice_pf * pf,unsigned int max_vectors,unsigned int num_static)15 ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
16 unsigned int num_static)
17 {
18 pf->irq_tracker.num_entries = max_vectors;
19 pf->irq_tracker.num_static = num_static;
20 xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
21 }
22
23 /**
24 * ice_deinit_irq_tracker - free xarray tracker
25 * @pf: board private structure
26 */
ice_deinit_irq_tracker(struct ice_pf * pf)27 static void ice_deinit_irq_tracker(struct ice_pf *pf)
28 {
29 xa_destroy(&pf->irq_tracker.entries);
30 }
31
32 /**
33 * ice_free_irq_res - free a block of resources
34 * @pf: board private structure
35 * @index: starting index previously returned by ice_get_res
36 */
ice_free_irq_res(struct ice_pf * pf,u16 index)37 static void ice_free_irq_res(struct ice_pf *pf, u16 index)
38 {
39 struct ice_irq_entry *entry;
40
41 entry = xa_erase(&pf->irq_tracker.entries, index);
42 kfree(entry);
43 }
44
45 /**
46 * ice_get_irq_res - get an interrupt resource
47 * @pf: board private structure
48 * @dyn_allowed: allow entry to be dynamically allocated
49 *
50 * Allocate new irq entry in the free slot of the tracker. Since xarray
51 * is used, always allocate new entry at the lowest possible index. Set
52 * proper allocation limit for maximum tracker entries.
53 *
54 * Returns allocated irq entry or NULL on failure.
55 */
ice_get_irq_res(struct ice_pf * pf,bool dyn_allowed)56 static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
57 bool dyn_allowed)
58 {
59 struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
60 .min = 0 };
61 unsigned int num_static = pf->irq_tracker.num_static - 1;
62 struct ice_irq_entry *entry;
63 unsigned int index;
64 int ret;
65
66 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
67 if (!entry)
68 return NULL;
69
70 /* only already allocated if the caller says so */
71 if (!dyn_allowed)
72 limit.max = num_static;
73
74 ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
75 GFP_KERNEL);
76
77 if (ret) {
78 kfree(entry);
79 entry = NULL;
80 } else {
81 entry->index = index;
82 entry->dynamic = index > num_static;
83 }
84
85 return entry;
86 }
87
88 /**
89 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
90 * @pf: board private structure
91 * @v_remain: number of remaining MSI-X vectors to be distributed
92 *
93 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
94 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
95 * remaining vectors.
96 */
ice_reduce_msix_usage(struct ice_pf * pf,int v_remain)97 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
98 {
99 int v_rdma;
100
101 if (!ice_is_rdma_ena(pf)) {
102 pf->num_lan_msix = v_remain;
103 return;
104 }
105
106 /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
107 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
108
109 if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
110 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
111 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
112
113 pf->num_rdma_msix = 0;
114 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
115 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
116 (v_remain - v_rdma < v_rdma)) {
117 /* Support minimum RDMA and give remaining vectors to LAN MSIX
118 */
119 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
120 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
121 } else {
122 /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
123 */
124 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
125 ICE_RDMA_NUM_AEQ_MSIX;
126 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
127 }
128 }
129
130 /**
131 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
132 * @pf: board private structure
133 *
134 * Compute the number of MSIX vectors wanted and request from the OS. Adjust
135 * device usage if there are not enough vectors. Return the number of vectors
136 * reserved or negative on failure.
137 */
ice_ena_msix_range(struct ice_pf * pf)138 static int ice_ena_msix_range(struct ice_pf *pf)
139 {
140 int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
141 struct device *dev = ice_pf_to_dev(pf);
142 int err;
143
144 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
145 num_cpus = num_online_cpus();
146
147 /* LAN miscellaneous handler */
148 v_other = ICE_MIN_LAN_OICR_MSIX;
149
150 /* Flow Director */
151 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
152 v_other += ICE_FDIR_MSIX;
153
154 /* switchdev */
155 v_other += ICE_ESWITCH_MSIX;
156
157 v_wanted = v_other;
158
159 /* LAN traffic */
160 pf->num_lan_msix = num_cpus;
161 v_wanted += pf->num_lan_msix;
162
163 /* RDMA auxiliary driver */
164 if (ice_is_rdma_ena(pf)) {
165 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
166 v_wanted += pf->num_rdma_msix;
167 }
168
169 if (v_wanted > hw_num_msix) {
170 int v_remain;
171
172 dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
173 v_wanted, hw_num_msix);
174
175 if (hw_num_msix < ICE_MIN_MSIX) {
176 err = -ERANGE;
177 goto exit_err;
178 }
179
180 v_remain = hw_num_msix - v_other;
181 if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
182 v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
183 v_remain = ICE_MIN_LAN_TXRX_MSIX;
184 }
185
186 ice_reduce_msix_usage(pf, v_remain);
187 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
188
189 dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
190 pf->num_lan_msix);
191 if (ice_is_rdma_ena(pf))
192 dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
193 pf->num_rdma_msix);
194 }
195
196 /* actually reserve the vectors */
197 v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
198 PCI_IRQ_MSIX);
199 if (v_actual < 0) {
200 dev_err(dev, "unable to reserve MSI-X vectors\n");
201 err = v_actual;
202 goto exit_err;
203 }
204
205 if (v_actual < v_wanted) {
206 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
207 v_wanted, v_actual);
208
209 if (v_actual < ICE_MIN_MSIX) {
210 /* error if we can't get minimum vectors */
211 pci_free_irq_vectors(pf->pdev);
212 err = -ERANGE;
213 goto exit_err;
214 } else {
215 int v_remain = v_actual - v_other;
216
217 if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
218 v_remain = ICE_MIN_LAN_TXRX_MSIX;
219
220 ice_reduce_msix_usage(pf, v_remain);
221
222 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
223 pf->num_lan_msix);
224
225 if (ice_is_rdma_ena(pf))
226 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
227 pf->num_rdma_msix);
228 }
229 }
230
231 return v_actual;
232
233 exit_err:
234 pf->num_rdma_msix = 0;
235 pf->num_lan_msix = 0;
236 return err;
237 }
238
239 /**
240 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
241 * @pf: board private structure
242 */
ice_clear_interrupt_scheme(struct ice_pf * pf)243 void ice_clear_interrupt_scheme(struct ice_pf *pf)
244 {
245 pci_free_irq_vectors(pf->pdev);
246 ice_deinit_irq_tracker(pf);
247 }
248
249 /**
250 * ice_init_interrupt_scheme - Determine proper interrupt scheme
251 * @pf: board private structure to initialize
252 */
ice_init_interrupt_scheme(struct ice_pf * pf)253 int ice_init_interrupt_scheme(struct ice_pf *pf)
254 {
255 int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
256 int vectors, max_vectors;
257
258 vectors = ice_ena_msix_range(pf);
259
260 if (vectors < 0)
261 return -ENOMEM;
262
263 if (pci_msix_can_alloc_dyn(pf->pdev))
264 max_vectors = total_vectors;
265 else
266 max_vectors = vectors;
267
268 ice_init_irq_tracker(pf, max_vectors, vectors);
269
270 return 0;
271 }
272
273 /**
274 * ice_alloc_irq - Allocate new interrupt vector
275 * @pf: board private structure
276 * @dyn_allowed: allow dynamic allocation of the interrupt
277 *
278 * Allocate new interrupt vector for a given owner id.
279 * return struct msi_map with interrupt details and track
280 * allocated interrupt appropriately.
281 *
282 * This function reserves new irq entry from the irq_tracker.
283 * if according to the tracker information all interrupts that
284 * were allocated with ice_pci_alloc_irq_vectors are already used
285 * and dynamically allocated interrupts are supported then new
286 * interrupt will be allocated with pci_msix_alloc_irq_at.
287 *
288 * Some callers may only support dynamically allocated interrupts.
289 * This is indicated with dyn_allowed flag.
290 *
291 * On failure, return map with negative .index. The caller
292 * is expected to check returned map index.
293 *
294 */
ice_alloc_irq(struct ice_pf * pf,bool dyn_allowed)295 struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
296 {
297 int sriov_base_vector = pf->sriov_base_vector;
298 struct msi_map map = { .index = -ENOENT };
299 struct device *dev = ice_pf_to_dev(pf);
300 struct ice_irq_entry *entry;
301
302 entry = ice_get_irq_res(pf, dyn_allowed);
303 if (!entry)
304 return map;
305
306 /* fail if we're about to violate SRIOV vectors space */
307 if (sriov_base_vector && entry->index >= sriov_base_vector)
308 goto exit_free_res;
309
310 if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
311 map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
312 if (map.index < 0)
313 goto exit_free_res;
314 dev_dbg(dev, "allocated new irq at index %d\n", map.index);
315 } else {
316 map.index = entry->index;
317 map.virq = pci_irq_vector(pf->pdev, map.index);
318 }
319
320 return map;
321
322 exit_free_res:
323 dev_err(dev, "Could not allocate irq at idx %d\n", entry->index);
324 ice_free_irq_res(pf, entry->index);
325 return map;
326 }
327
328 /**
329 * ice_free_irq - Free interrupt vector
330 * @pf: board private structure
331 * @map: map with interrupt details
332 *
333 * Remove allocated interrupt from the interrupt tracker. If interrupt was
334 * allocated dynamically, free respective interrupt vector.
335 */
ice_free_irq(struct ice_pf * pf,struct msi_map map)336 void ice_free_irq(struct ice_pf *pf, struct msi_map map)
337 {
338 struct ice_irq_entry *entry;
339
340 entry = xa_load(&pf->irq_tracker.entries, map.index);
341
342 if (!entry) {
343 dev_err(ice_pf_to_dev(pf), "Failed to get MSIX interrupt entry at index %d",
344 map.index);
345 return;
346 }
347
348 dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index);
349
350 if (entry->dynamic)
351 pci_msix_free_irq(pf->pdev, map);
352
353 ice_free_irq_res(pf, map.index);
354 }
355
356 /**
357 * ice_get_max_used_msix_vector - Get the max used interrupt vector
358 * @pf: board private structure
359 *
360 * Return index of maximum used interrupt vectors with respect to the
361 * beginning of the MSIX table. Take into account that some interrupts
362 * may have been dynamically allocated after MSIX was initially enabled.
363 */
ice_get_max_used_msix_vector(struct ice_pf * pf)364 int ice_get_max_used_msix_vector(struct ice_pf *pf)
365 {
366 unsigned long start, index, max_idx;
367 void *entry;
368
369 /* Treat all preallocated interrupts as used */
370 start = pf->irq_tracker.num_static;
371 max_idx = start - 1;
372
373 xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
374 if (index > max_idx)
375 max_idx = index;
376 }
377
378 return max_idx;
379 }
380