1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/interrupt.h>
5 #include <linux/notifier.h>
6 #include <linux/module.h>
7 #include <linux/mlx5/driver.h>
8 #include "mlx5_core.h"
9 #include "mlx5_irq.h"
10 #include "lib/sf.h"
11 #ifdef CONFIG_RFS_ACCEL
12 #include <linux/cpu_rmap.h>
13 #endif
14 
15 #define MLX5_MAX_IRQ_NAME (32)
16 /* max irq_index is 255. three chars */
17 #define MLX5_MAX_IRQ_IDX_CHARS (3)
18 
19 #define MLX5_SFS_PER_CTRL_IRQ 64
20 #define MLX5_IRQ_CTRL_SF_MAX 8
21 /* min num of vectores for SFs to be enabled */
22 #define MLX5_IRQ_VEC_COMP_BASE_SF 2
23 
24 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
25 #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
26 #define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
27 #define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
28 #define MLX5_EQ_REFS_PER_IRQ (2)
29 
30 struct mlx5_irq {
31 	u32 index;
32 	struct atomic_notifier_head nh;
33 	cpumask_var_t mask;
34 	char name[MLX5_MAX_IRQ_NAME];
35 	struct kref kref;
36 	int irqn;
37 	struct mlx5_irq_pool *pool;
38 };
39 
40 struct mlx5_irq_pool {
41 	char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
42 	struct xa_limit xa_num_irqs;
43 	struct mutex lock; /* sync IRQs creations */
44 	struct xarray irqs;
45 	u32 max_threshold;
46 	u32 min_threshold;
47 	struct mlx5_core_dev *dev;
48 };
49 
50 struct mlx5_irq_table {
51 	struct mlx5_irq_pool *pf_pool;
52 	struct mlx5_irq_pool *sf_ctrl_pool;
53 	struct mlx5_irq_pool *sf_comp_pool;
54 };
55 
56 /**
57  * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
58  *                                   to be ssigned to each VF.
59  * @dev: PF to work on
60  * @num_vfs: Number of enabled VFs
61  */
62 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
63 {
64 	int num_vf_msix, min_msix, max_msix;
65 
66 	num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
67 	if (!num_vf_msix)
68 		return 0;
69 
70 	min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
71 	max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
72 
73 	/* Limit maximum number of MSI-X vectors so the default configuration
74 	 * has some available in the pool. This will allow the user to increase
75 	 * the number of vectors in a VF without having to first size-down other
76 	 * VFs.
77 	 */
78 	return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
79 }
80 
81 /**
82  * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
83  * @dev: PF to work on
84  * @function_id: Internal PCI VF function IDd
85  * @msix_vec_count: Number of MSI-X vectors to set
86  */
87 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
88 			    int msix_vec_count)
89 {
90 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
91 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
92 	void *hca_cap = NULL, *query_cap = NULL, *cap;
93 	int num_vf_msix, min_msix, max_msix;
94 	int ret;
95 
96 	num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
97 	if (!num_vf_msix)
98 		return 0;
99 
100 	if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
101 		return -EOPNOTSUPP;
102 
103 	min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
104 	max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
105 
106 	if (msix_vec_count < min_msix)
107 		return -EINVAL;
108 
109 	if (msix_vec_count > max_msix)
110 		return -EOVERFLOW;
111 
112 	query_cap = kzalloc(query_sz, GFP_KERNEL);
113 	hca_cap = kzalloc(set_sz, GFP_KERNEL);
114 	if (!hca_cap || !query_cap) {
115 		ret = -ENOMEM;
116 		goto out;
117 	}
118 
119 	ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
120 	if (ret)
121 		goto out;
122 
123 	cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
124 	memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
125 	       MLX5_UN_SZ_BYTES(hca_cap_union));
126 	MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
127 
128 	MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
129 	MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
130 	MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
131 
132 	MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
133 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
134 	ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
135 out:
136 	kfree(hca_cap);
137 	kfree(query_cap);
138 	return ret;
139 }
140 
141 static void irq_release(struct kref *kref)
142 {
143 	struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
144 	struct mlx5_irq_pool *pool = irq->pool;
145 
146 	xa_erase(&pool->irqs, irq->index);
147 	/* free_irq requires that affinity and rmap will be cleared
148 	 * before calling it. This is why there is asymmetry with set_rmap
149 	 * which should be called after alloc_irq but before request_irq.
150 	 */
151 	irq_set_affinity_hint(irq->irqn, NULL);
152 	free_cpumask_var(irq->mask);
153 	free_irq(irq->irqn, &irq->nh);
154 	kfree(irq);
155 }
156 
157 static void irq_put(struct mlx5_irq *irq)
158 {
159 	struct mlx5_irq_pool *pool = irq->pool;
160 
161 	mutex_lock(&pool->lock);
162 	kref_put(&irq->kref, irq_release);
163 	mutex_unlock(&pool->lock);
164 }
165 
166 static irqreturn_t irq_int_handler(int irq, void *nh)
167 {
168 	atomic_notifier_call_chain(nh, 0, NULL);
169 	return IRQ_HANDLED;
170 }
171 
172 static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
173 {
174 	snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
175 }
176 
177 static void irq_set_name(char *name, int vecidx)
178 {
179 	if (vecidx == 0) {
180 		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
181 		return;
182 	}
183 
184 	snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
185 		 vecidx - MLX5_IRQ_VEC_COMP_BASE);
186 }
187 
188 static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
189 {
190 	struct mlx5_core_dev *dev = pool->dev;
191 	char name[MLX5_MAX_IRQ_NAME];
192 	struct mlx5_irq *irq;
193 	int err;
194 
195 	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
196 	if (!irq)
197 		return ERR_PTR(-ENOMEM);
198 	irq->irqn = pci_irq_vector(dev->pdev, i);
199 	if (!pool->name[0])
200 		irq_set_name(name, i);
201 	else
202 		irq_sf_set_name(pool, name, i);
203 	ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
204 	snprintf(irq->name, MLX5_MAX_IRQ_NAME,
205 		 "%s@pci:%s", name, pci_name(dev->pdev));
206 	err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
207 			  &irq->nh);
208 	if (err) {
209 		mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
210 		goto err_req_irq;
211 	}
212 	if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
213 		mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
214 		err = -ENOMEM;
215 		goto err_cpumask;
216 	}
217 	kref_init(&irq->kref);
218 	irq->index = i;
219 	err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
220 	if (err) {
221 		mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
222 			      irq->index, err);
223 		goto err_xa;
224 	}
225 	irq->pool = pool;
226 	return irq;
227 err_xa:
228 	free_cpumask_var(irq->mask);
229 err_cpumask:
230 	free_irq(irq->irqn, &irq->nh);
231 err_req_irq:
232 	kfree(irq);
233 	return ERR_PTR(err);
234 }
235 
236 int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
237 {
238 	int err;
239 
240 	err = kref_get_unless_zero(&irq->kref);
241 	if (WARN_ON_ONCE(!err))
242 		/* Something very bad happens here, we are enabling EQ
243 		 * on non-existing IRQ.
244 		 */
245 		return -ENOENT;
246 	err = atomic_notifier_chain_register(&irq->nh, nb);
247 	if (err)
248 		irq_put(irq);
249 	return err;
250 }
251 
252 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
253 {
254 	irq_put(irq);
255 	return atomic_notifier_chain_unregister(&irq->nh, nb);
256 }
257 
258 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
259 {
260 	return irq->mask;
261 }
262 
263 int mlx5_irq_get_index(struct mlx5_irq *irq)
264 {
265 	return irq->index;
266 }
267 
268 /* irq_pool API */
269 
270 /* creating an irq from irq_pool */
271 static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
272 					    struct cpumask *affinity)
273 {
274 	struct mlx5_irq *irq;
275 	u32 irq_index;
276 	int err;
277 
278 	err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
279 		       GFP_KERNEL);
280 	if (err)
281 		return ERR_PTR(err);
282 	irq = irq_request(pool, irq_index);
283 	if (IS_ERR(irq))
284 		return irq;
285 	cpumask_copy(irq->mask, affinity);
286 	irq_set_affinity_hint(irq->irqn, irq->mask);
287 	return irq;
288 }
289 
290 /* looking for the irq with the smallest refcount and the same affinity */
291 static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
292 						   struct cpumask *affinity)
293 {
294 	int start = pool->xa_num_irqs.min;
295 	int end = pool->xa_num_irqs.max;
296 	struct mlx5_irq *irq = NULL;
297 	struct mlx5_irq *iter;
298 	unsigned long index;
299 
300 	lockdep_assert_held(&pool->lock);
301 	xa_for_each_range(&pool->irqs, index, iter, start, end) {
302 		if (!cpumask_equal(iter->mask, affinity))
303 			continue;
304 		if (kref_read(&iter->kref) < pool->min_threshold)
305 			return iter;
306 		if (!irq || kref_read(&iter->kref) <
307 		    kref_read(&irq->kref))
308 			irq = iter;
309 	}
310 	return irq;
311 }
312 
313 /* requesting an irq from a given pool according to given affinity */
314 static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
315 						  struct cpumask *affinity)
316 {
317 	struct mlx5_irq *least_loaded_irq, *new_irq;
318 
319 	mutex_lock(&pool->lock);
320 	least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
321 	if (least_loaded_irq &&
322 	    kref_read(&least_loaded_irq->kref) < pool->min_threshold)
323 		goto out;
324 	new_irq = irq_pool_create_irq(pool, affinity);
325 	if (IS_ERR(new_irq)) {
326 		if (!least_loaded_irq) {
327 			mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
328 				      cpumask_first(affinity));
329 			mutex_unlock(&pool->lock);
330 			return new_irq;
331 		}
332 		/* We failed to create a new IRQ for the requested affinity,
333 		 * sharing existing IRQ.
334 		 */
335 		goto out;
336 	}
337 	least_loaded_irq = new_irq;
338 	goto unlock;
339 out:
340 	kref_get(&least_loaded_irq->kref);
341 	if (kref_read(&least_loaded_irq->kref) > pool->max_threshold)
342 		mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
343 			      least_loaded_irq->irqn, pool->name,
344 			      kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ);
345 unlock:
346 	mutex_unlock(&pool->lock);
347 	return least_loaded_irq;
348 }
349 
350 /* requesting an irq from a given pool according to given index */
351 static struct mlx5_irq *
352 irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
353 			struct cpumask *affinity)
354 {
355 	struct mlx5_irq *irq;
356 
357 	mutex_lock(&pool->lock);
358 	irq = xa_load(&pool->irqs, vecidx);
359 	if (irq) {
360 		kref_get(&irq->kref);
361 		goto unlock;
362 	}
363 	irq = irq_request(pool, vecidx);
364 	if (IS_ERR(irq) || !affinity)
365 		goto unlock;
366 	cpumask_copy(irq->mask, affinity);
367 	irq_set_affinity_hint(irq->irqn, irq->mask);
368 unlock:
369 	mutex_unlock(&pool->lock);
370 	return irq;
371 }
372 
373 static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
374 					      int i, struct cpumask *affinity)
375 {
376 	if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
377 		return irq_table->sf_ctrl_pool;
378 	return irq_table->sf_comp_pool;
379 }
380 
381 /**
382  * mlx5_irq_release - release an IRQ back to the system.
383  * @irq: irq to be released.
384  */
385 void mlx5_irq_release(struct mlx5_irq *irq)
386 {
387 	synchronize_irq(irq->irqn);
388 	irq_put(irq);
389 }
390 
391 /**
392  * mlx5_irq_request - request an IRQ for mlx5 device.
393  * @dev: mlx5 device that requesting the IRQ.
394  * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
395  * provided.
396  * @affinity: cpumask requested for this IRQ.
397  *
398  * This function returns a pointer to IRQ, or ERR_PTR in case of error.
399  */
400 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
401 				  struct cpumask *affinity)
402 {
403 	struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
404 	struct mlx5_irq_pool *pool;
405 	struct mlx5_irq *irq;
406 
407 	if (mlx5_core_is_sf(dev)) {
408 		pool = find_sf_irq_pool(irq_table, vecidx, affinity);
409 		if (!pool)
410 			/* we don't have IRQs for SFs, using the PF IRQs */
411 			goto pf_irq;
412 		if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp"))
413 			/* In case an SF user request IRQ with vecidx */
414 			irq = irq_pool_request_vector(pool, vecidx, NULL);
415 		else
416 			irq = irq_pool_request_affinity(pool, affinity);
417 		goto out;
418 	}
419 pf_irq:
420 	pool = irq_table->pf_pool;
421 	irq = irq_pool_request_vector(pool, vecidx, affinity);
422 out:
423 	if (IS_ERR(irq))
424 		return irq;
425 	mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
426 		      irq->irqn, cpumask_pr_args(affinity),
427 		      kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ);
428 	return irq;
429 }
430 
431 static struct mlx5_irq_pool *
432 irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
433 	       u32 min_threshold, u32 max_threshold)
434 {
435 	struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
436 
437 	if (!pool)
438 		return ERR_PTR(-ENOMEM);
439 	pool->dev = dev;
440 	xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
441 	pool->xa_num_irqs.min = start;
442 	pool->xa_num_irqs.max = start + size - 1;
443 	if (name)
444 		snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
445 			 name);
446 	pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
447 	pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
448 	mutex_init(&pool->lock);
449 	mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
450 		      name, size, start);
451 	return pool;
452 }
453 
454 static void irq_pool_free(struct mlx5_irq_pool *pool)
455 {
456 	struct mlx5_irq *irq;
457 	unsigned long index;
458 
459 	xa_for_each(&pool->irqs, index, irq)
460 		irq_release(&irq->kref);
461 	xa_destroy(&pool->irqs);
462 	kvfree(pool);
463 }
464 
465 static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
466 {
467 	struct mlx5_irq_table *table = dev->priv.irq_table;
468 	int num_sf_ctrl_by_msix;
469 	int num_sf_ctrl_by_sfs;
470 	int num_sf_ctrl;
471 	int err;
472 
473 	/* init pf_pool */
474 	table->pf_pool = irq_pool_alloc(dev, 0, pf_vec, NULL,
475 					MLX5_EQ_SHARE_IRQ_MIN_COMP,
476 					MLX5_EQ_SHARE_IRQ_MAX_COMP);
477 	if (IS_ERR(table->pf_pool))
478 		return PTR_ERR(table->pf_pool);
479 	if (!mlx5_sf_max_functions(dev))
480 		return 0;
481 	if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
482 		mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
483 		return 0;
484 	}
485 
486 	/* init sf_ctrl_pool */
487 	num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
488 	num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
489 					  MLX5_SFS_PER_CTRL_IRQ);
490 	num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
491 	num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
492 	table->sf_ctrl_pool = irq_pool_alloc(dev, pf_vec, num_sf_ctrl,
493 					     "mlx5_sf_ctrl",
494 					     MLX5_EQ_SHARE_IRQ_MIN_CTRL,
495 					     MLX5_EQ_SHARE_IRQ_MAX_CTRL);
496 	if (IS_ERR(table->sf_ctrl_pool)) {
497 		err = PTR_ERR(table->sf_ctrl_pool);
498 		goto err_pf;
499 	}
500 	/* init sf_comp_pool */
501 	table->sf_comp_pool = irq_pool_alloc(dev, pf_vec + num_sf_ctrl,
502 					     sf_vec - num_sf_ctrl, "mlx5_sf_comp",
503 					     MLX5_EQ_SHARE_IRQ_MIN_COMP,
504 					     MLX5_EQ_SHARE_IRQ_MAX_COMP);
505 	if (IS_ERR(table->sf_comp_pool)) {
506 		err = PTR_ERR(table->sf_comp_pool);
507 		goto err_sf_ctrl;
508 	}
509 	return 0;
510 err_sf_ctrl:
511 	irq_pool_free(table->sf_ctrl_pool);
512 err_pf:
513 	irq_pool_free(table->pf_pool);
514 	return err;
515 }
516 
517 static void irq_pools_destroy(struct mlx5_irq_table *table)
518 {
519 	if (table->sf_ctrl_pool) {
520 		irq_pool_free(table->sf_comp_pool);
521 		irq_pool_free(table->sf_ctrl_pool);
522 	}
523 	irq_pool_free(table->pf_pool);
524 }
525 
526 /* irq_table API */
527 
528 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
529 {
530 	struct mlx5_irq_table *irq_table;
531 
532 	if (mlx5_core_is_sf(dev))
533 		return 0;
534 
535 	irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
536 	if (!irq_table)
537 		return -ENOMEM;
538 
539 	dev->priv.irq_table = irq_table;
540 	return 0;
541 }
542 
543 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
544 {
545 	if (mlx5_core_is_sf(dev))
546 		return;
547 
548 	kvfree(dev->priv.irq_table);
549 }
550 
551 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
552 {
553 	return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
554 }
555 
556 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
557 {
558 	int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
559 		      MLX5_CAP_GEN(dev, max_num_eqs) :
560 		      1 << MLX5_CAP_GEN(dev, log_max_eq);
561 	int total_vec;
562 	int pf_vec;
563 	int err;
564 
565 	if (mlx5_core_is_sf(dev))
566 		return 0;
567 
568 	pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
569 		 MLX5_IRQ_VEC_COMP_BASE;
570 	pf_vec = min_t(int, pf_vec, num_eqs);
571 	if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
572 		return -ENOMEM;
573 
574 	total_vec = pf_vec;
575 	if (mlx5_sf_max_functions(dev))
576 		total_vec += MLX5_IRQ_CTRL_SF_MAX +
577 			MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
578 
579 	total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
580 					  total_vec, PCI_IRQ_MSIX);
581 	if (total_vec < 0)
582 		return total_vec;
583 	pf_vec = min(pf_vec, total_vec);
584 
585 	err = irq_pools_init(dev, total_vec - pf_vec, pf_vec);
586 	if (err)
587 		pci_free_irq_vectors(dev->pdev);
588 
589 	return err;
590 }
591 
592 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
593 {
594 	struct mlx5_irq_table *table = dev->priv.irq_table;
595 
596 	if (mlx5_core_is_sf(dev))
597 		return;
598 
599 	/* There are cases where IRQs still will be in used when we reaching
600 	 * to here. Hence, making sure all the irqs are realeased.
601 	 */
602 	irq_pools_destroy(table);
603 	pci_free_irq_vectors(dev->pdev);
604 }
605 
606 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
607 {
608 	if (table->sf_comp_pool)
609 		return table->sf_comp_pool->xa_num_irqs.max -
610 			table->sf_comp_pool->xa_num_irqs.min + 1;
611 	else
612 		return mlx5_irq_table_get_num_comp(table);
613 }
614 
615 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
616 {
617 #ifdef CONFIG_MLX5_SF
618 	if (mlx5_core_is_sf(dev))
619 		return dev->priv.parent_mdev->priv.irq_table;
620 #endif
621 	return dev->priv.irq_table;
622 }
623