1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "mlx5_irq.h"
6 #include "pci_irq.h"
7 
8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
9 {
10 	pool->irqs_per_cpu[cpu]--;
11 }
12 
13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
14 {
15 	pool->irqs_per_cpu[cpu]++;
16 }
17 
18 /* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */
19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
20 				const struct cpumask *req_mask)
21 {
22 	int best_cpu = -1;
23 	int cpu;
24 
25 	for_each_cpu_and(cpu, req_mask, cpu_online_mask) {
26 		/* CPU has zero IRQs on it. No need to search any more CPUs. */
27 		if (!pool->irqs_per_cpu[cpu]) {
28 			best_cpu = cpu;
29 			break;
30 		}
31 		if (best_cpu < 0)
32 			best_cpu = cpu;
33 		if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu])
34 			best_cpu = cpu;
35 	}
36 	if (best_cpu == -1) {
37 		/* There isn't online CPUs in req_mask */
38 		mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n",
39 			      cpumask_pr_args(req_mask));
40 		best_cpu = cpumask_first(cpu_online_mask);
41 	}
42 	pool->irqs_per_cpu[best_cpu]++;
43 	return best_cpu;
44 }
45 
46 /* Creating an IRQ from irq_pool */
47 static struct mlx5_irq *
48 irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
49 {
50 	cpumask_var_t auto_mask;
51 	struct mlx5_irq *irq;
52 	u32 irq_index;
53 	int err;
54 
55 	if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL))
56 		return ERR_PTR(-ENOMEM);
57 	err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
58 	if (err)
59 		return ERR_PTR(err);
60 	if (pool->irqs_per_cpu) {
61 		if (cpumask_weight(req_mask) > 1)
62 			/* if req_mask contain more then one CPU, set the least loadad CPU
63 			 * of req_mask
64 			 */
65 			cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), auto_mask);
66 		else
67 			cpu_get(pool, cpumask_first(req_mask));
68 	}
69 	irq = mlx5_irq_alloc(pool, irq_index, cpumask_empty(auto_mask) ? req_mask : auto_mask);
70 	free_cpumask_var(auto_mask);
71 	return irq;
72 }
73 
74 /* Looking for the IRQ with the smallest refcount that fits req_mask.
75  * If pool is sf_comp_pool, then we are looking for an IRQ with any of the
76  * requested CPUs in req_mask.
77  * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask
78  * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask,
79  * we don't skip it.
80  * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will
81  * fit. And since mask is subset of itself, we will pass the first if bellow.
82  */
83 static struct mlx5_irq *
84 irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
85 {
86 	int start = pool->xa_num_irqs.min;
87 	int end = pool->xa_num_irqs.max;
88 	struct mlx5_irq *irq = NULL;
89 	struct mlx5_irq *iter;
90 	int irq_refcount = 0;
91 	unsigned long index;
92 
93 	lockdep_assert_held(&pool->lock);
94 	xa_for_each_range(&pool->irqs, index, iter, start, end) {
95 		struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter);
96 		int iter_refcount = mlx5_irq_read_locked(iter);
97 
98 		if (!cpumask_subset(iter_mask, req_mask))
99 			/* skip IRQs with a mask which is not subset of req_mask */
100 			continue;
101 		if (iter_refcount < pool->min_threshold)
102 			/* If we found an IRQ with less than min_thres, return it */
103 			return iter;
104 		if (!irq || iter_refcount < irq_refcount) {
105 			/* In case we won't find an IRQ with less than min_thres,
106 			 * keep a pointer to the least used IRQ
107 			 */
108 			irq_refcount = iter_refcount;
109 			irq = iter;
110 		}
111 	}
112 	return irq;
113 }
114 
115 /**
116  * mlx5_irq_affinity_request - request an IRQ according to the given mask.
117  * @pool: IRQ pool to request from.
118  * @req_mask: cpumask requested for this IRQ.
119  *
120  * This function returns a pointer to IRQ, or ERR_PTR in case of error.
121  */
122 struct mlx5_irq *
123 mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
124 {
125 	struct mlx5_irq *least_loaded_irq, *new_irq;
126 
127 	mutex_lock(&pool->lock);
128 	least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask);
129 	if (least_loaded_irq &&
130 	    mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
131 		goto out;
132 	/* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
133 	new_irq = irq_pool_request_irq(pool, req_mask);
134 	if (IS_ERR(new_irq)) {
135 		if (!least_loaded_irq) {
136 			/* We failed to create an IRQ and we didn't find an IRQ */
137 			mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
138 				      PTR_ERR(new_irq));
139 			mutex_unlock(&pool->lock);
140 			return new_irq;
141 		}
142 		/* We failed to create a new IRQ for the requested affinity,
143 		 * sharing existing IRQ.
144 		 */
145 		goto out;
146 	}
147 	least_loaded_irq = new_irq;
148 	goto unlock;
149 out:
150 	mlx5_irq_get_locked(least_loaded_irq);
151 	if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold)
152 		mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
153 			      pci_irq_vector(pool->dev->pdev,
154 					     mlx5_irq_get_index(least_loaded_irq)), pool->name,
155 			      mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
156 unlock:
157 	mutex_unlock(&pool->lock);
158 	return least_loaded_irq;
159 }
160 
161 void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
162 				    int num_irqs)
163 {
164 	struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
165 	int i;
166 
167 	for (i = 0; i < num_irqs; i++) {
168 		int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i]));
169 
170 		synchronize_irq(pci_irq_vector(pool->dev->pdev,
171 					       mlx5_irq_get_index(irqs[i])));
172 		if (mlx5_irq_put(irqs[i]))
173 			if (pool->irqs_per_cpu)
174 				cpu_put(pool, cpu);
175 	}
176 }
177 
178 /**
179  * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device.
180  * @dev: mlx5 device that is requesting the IRQs.
181  * @nirqs: number of IRQs to request.
182  * @irqs: an output array of IRQs pointers.
183  *
184  * Each IRQ is bounded to at most 1 CPU.
185  * This function is requesting IRQs according to the default assignment.
186  * The default assignment policy is:
187  * - in each iteration, request the least loaded IRQ which is not bound to any
188  *   CPU of the previous IRQs requested.
189  *
190  * This function returns the number of IRQs requested, (which might be smaller than
191  * @nirqs), if successful, or a negative error code in case of an error.
192  */
193 int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
194 					struct mlx5_irq **irqs)
195 {
196 	struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
197 	cpumask_var_t req_mask;
198 	struct mlx5_irq *irq;
199 	int i = 0;
200 
201 	if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
202 		return -ENOMEM;
203 	cpumask_copy(req_mask, cpu_online_mask);
204 	for (i = 0; i < nirqs; i++) {
205 		if (mlx5_irq_pool_is_sf_pool(pool))
206 			irq = mlx5_irq_affinity_request(pool, req_mask);
207 		else
208 			/* In case SF pool doesn't exists, fallback to the PF IRQs.
209 			 * The PF IRQs are already allocated and binded to CPU
210 			 * at this point. Hence, only an index is needed.
211 			 */
212 			irq = mlx5_irq_request(dev, i, NULL);
213 		if (IS_ERR(irq))
214 			break;
215 		irqs[i] = irq;
216 		cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), req_mask);
217 		mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
218 			      pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
219 			      cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
220 			      mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
221 	}
222 	free_cpumask_var(req_mask);
223 	if (!i)
224 		return PTR_ERR(irq);
225 	return i;
226 }
227