1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "mlx5_irq.h"
6 #include "pci_irq.h"
7 
cpu_put(struct mlx5_irq_pool * pool,int cpu)8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
9 {
10 	pool->irqs_per_cpu[cpu]--;
11 }
12 
cpu_get(struct mlx5_irq_pool * pool,int cpu)13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
14 {
15 	pool->irqs_per_cpu[cpu]++;
16 }
17 
18 /* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */
cpu_get_least_loaded(struct mlx5_irq_pool * pool,const struct cpumask * req_mask)19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
20 				const struct cpumask *req_mask)
21 {
22 	int best_cpu = -1;
23 	int cpu;
24 
25 	for_each_cpu_and(cpu, req_mask, cpu_online_mask) {
26 		/* CPU has zero IRQs on it. No need to search any more CPUs. */
27 		if (!pool->irqs_per_cpu[cpu]) {
28 			best_cpu = cpu;
29 			break;
30 		}
31 		if (best_cpu < 0)
32 			best_cpu = cpu;
33 		if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu])
34 			best_cpu = cpu;
35 	}
36 	if (best_cpu == -1) {
37 		/* There isn't online CPUs in req_mask */
38 		mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n",
39 			      cpumask_pr_args(req_mask));
40 		best_cpu = cpumask_first(cpu_online_mask);
41 	}
42 	pool->irqs_per_cpu[best_cpu]++;
43 	return best_cpu;
44 }
45 
46 /* Creating an IRQ from irq_pool */
47 static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool * pool,struct irq_affinity_desc * af_desc)48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
49 {
50 	struct irq_affinity_desc auto_desc = {};
51 	struct mlx5_irq *irq;
52 	u32 irq_index;
53 	int err;
54 
55 	err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
56 	if (err)
57 		return ERR_PTR(err);
58 	if (pool->irqs_per_cpu) {
59 		if (cpumask_weight(&af_desc->mask) > 1)
60 			/* if req_mask contain more then one CPU, set the least loadad CPU
61 			 * of req_mask
62 			 */
63 			cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
64 					&auto_desc.mask);
65 		else
66 			cpu_get(pool, cpumask_first(&af_desc->mask));
67 	}
68 	irq = mlx5_irq_alloc(pool, irq_index,
69 			     cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
70 			     NULL);
71 	if (IS_ERR(irq))
72 		xa_erase(&pool->irqs, irq_index);
73 	return irq;
74 }
75 
76 /* Looking for the IRQ with the smallest refcount that fits req_mask.
77  * If pool is sf_comp_pool, then we are looking for an IRQ with any of the
78  * requested CPUs in req_mask.
79  * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask
80  * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask,
81  * we don't skip it.
82  * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will
83  * fit. And since mask is subset of itself, we will pass the first if bellow.
84  */
85 static struct mlx5_irq *
irq_pool_find_least_loaded(struct mlx5_irq_pool * pool,const struct cpumask * req_mask)86 irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
87 {
88 	int start = pool->xa_num_irqs.min;
89 	int end = pool->xa_num_irqs.max;
90 	struct mlx5_irq *irq = NULL;
91 	struct mlx5_irq *iter;
92 	int irq_refcount = 0;
93 	unsigned long index;
94 
95 	lockdep_assert_held(&pool->lock);
96 	xa_for_each_range(&pool->irqs, index, iter, start, end) {
97 		struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter);
98 		int iter_refcount = mlx5_irq_read_locked(iter);
99 
100 		if (!cpumask_subset(iter_mask, req_mask))
101 			/* skip IRQs with a mask which is not subset of req_mask */
102 			continue;
103 		if (iter_refcount < pool->min_threshold)
104 			/* If we found an IRQ with less than min_thres, return it */
105 			return iter;
106 		if (!irq || iter_refcount < irq_refcount) {
107 			/* In case we won't find an IRQ with less than min_thres,
108 			 * keep a pointer to the least used IRQ
109 			 */
110 			irq_refcount = iter_refcount;
111 			irq = iter;
112 		}
113 	}
114 	return irq;
115 }
116 
117 /**
118  * mlx5_irq_affinity_request - request an IRQ according to the given mask.
119  * @pool: IRQ pool to request from.
120  * @af_desc: affinity descriptor for this IRQ.
121  *
122  * This function returns a pointer to IRQ, or ERR_PTR in case of error.
123  */
124 struct mlx5_irq *
mlx5_irq_affinity_request(struct mlx5_irq_pool * pool,struct irq_affinity_desc * af_desc)125 mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
126 {
127 	struct mlx5_irq *least_loaded_irq, *new_irq;
128 
129 	mutex_lock(&pool->lock);
130 	least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
131 	if (least_loaded_irq &&
132 	    mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
133 		goto out;
134 	/* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
135 	new_irq = irq_pool_request_irq(pool, af_desc);
136 	if (IS_ERR(new_irq)) {
137 		if (!least_loaded_irq) {
138 			/* We failed to create an IRQ and we didn't find an IRQ */
139 			mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
140 				      PTR_ERR(new_irq));
141 			mutex_unlock(&pool->lock);
142 			return new_irq;
143 		}
144 		/* We failed to create a new IRQ for the requested affinity,
145 		 * sharing existing IRQ.
146 		 */
147 		goto out;
148 	}
149 	least_loaded_irq = new_irq;
150 	goto unlock;
151 out:
152 	mlx5_irq_get_locked(least_loaded_irq);
153 	if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold)
154 		mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
155 			      pci_irq_vector(pool->dev->pdev,
156 					     mlx5_irq_get_index(least_loaded_irq)), pool->name,
157 			      mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
158 unlock:
159 	mutex_unlock(&pool->lock);
160 	return least_loaded_irq;
161 }
162 
mlx5_irq_affinity_irq_release(struct mlx5_core_dev * dev,struct mlx5_irq * irq)163 void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
164 {
165 	struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
166 	int cpu;
167 
168 	cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
169 	synchronize_irq(pci_irq_vector(pool->dev->pdev,
170 				       mlx5_irq_get_index(irq)));
171 	if (mlx5_irq_put(irq))
172 		if (pool->irqs_per_cpu)
173 			cpu_put(pool, cpu);
174 }
175