1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ 3 4 #include "mlx5_core.h" 5 #include "mlx5_irq.h" 6 #include "pci_irq.h" 7 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) 9 { 10 pool->irqs_per_cpu[cpu]--; 11 } 12 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) 14 { 15 pool->irqs_per_cpu[cpu]++; 16 } 17 18 /* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */ 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, 20 const struct cpumask *req_mask) 21 { 22 int best_cpu = -1; 23 int cpu; 24 25 for_each_cpu_and(cpu, req_mask, cpu_online_mask) { 26 /* CPU has zero IRQs on it. No need to search any more CPUs. */ 27 if (!pool->irqs_per_cpu[cpu]) { 28 best_cpu = cpu; 29 break; 30 } 31 if (best_cpu < 0) 32 best_cpu = cpu; 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) 34 best_cpu = cpu; 35 } 36 if (best_cpu == -1) { 37 /* There isn't online CPUs in req_mask */ 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", 39 cpumask_pr_args(req_mask)); 40 best_cpu = cpumask_first(cpu_online_mask); 41 } 42 pool->irqs_per_cpu[best_cpu]++; 43 return best_cpu; 44 } 45 46 /* Creating an IRQ from irq_pool */ 47 static struct mlx5_irq * 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) 49 { 50 struct irq_affinity_desc auto_desc = {}; 51 u32 irq_index; 52 int err; 53 54 err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL); 55 if (err) 56 return ERR_PTR(err); 57 if (pool->irqs_per_cpu) { 58 if (cpumask_weight(&af_desc->mask) > 1) 59 /* if req_mask contain more then one CPU, set the least loadad CPU 60 * of req_mask 61 */ 62 cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask), 63 &auto_desc.mask); 64 else 65 cpu_get(pool, cpumask_first(&af_desc->mask)); 66 } 67 return mlx5_irq_alloc(pool, irq_index, 68 cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc, 69 NULL); 70 } 71 72 /* Looking for the IRQ with the smallest refcount that fits req_mask. 73 * If pool is sf_comp_pool, then we are looking for an IRQ with any of the 74 * requested CPUs in req_mask. 75 * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask 76 * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask, 77 * we don't skip it. 78 * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will 79 * fit. And since mask is subset of itself, we will pass the first if bellow. 80 */ 81 static struct mlx5_irq * 82 irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) 83 { 84 int start = pool->xa_num_irqs.min; 85 int end = pool->xa_num_irqs.max; 86 struct mlx5_irq *irq = NULL; 87 struct mlx5_irq *iter; 88 int irq_refcount = 0; 89 unsigned long index; 90 91 lockdep_assert_held(&pool->lock); 92 xa_for_each_range(&pool->irqs, index, iter, start, end) { 93 struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter); 94 int iter_refcount = mlx5_irq_read_locked(iter); 95 96 if (!cpumask_subset(iter_mask, req_mask)) 97 /* skip IRQs with a mask which is not subset of req_mask */ 98 continue; 99 if (iter_refcount < pool->min_threshold) 100 /* If we found an IRQ with less than min_thres, return it */ 101 return iter; 102 if (!irq || iter_refcount < irq_refcount) { 103 /* In case we won't find an IRQ with less than min_thres, 104 * keep a pointer to the least used IRQ 105 */ 106 irq_refcount = iter_refcount; 107 irq = iter; 108 } 109 } 110 return irq; 111 } 112 113 /** 114 * mlx5_irq_affinity_request - request an IRQ according to the given mask. 115 * @pool: IRQ pool to request from. 116 * @af_desc: affinity descriptor for this IRQ. 117 * 118 * This function returns a pointer to IRQ, or ERR_PTR in case of error. 119 */ 120 struct mlx5_irq * 121 mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) 122 { 123 struct mlx5_irq *least_loaded_irq, *new_irq; 124 125 mutex_lock(&pool->lock); 126 least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask); 127 if (least_loaded_irq && 128 mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold) 129 goto out; 130 /* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */ 131 new_irq = irq_pool_request_irq(pool, af_desc); 132 if (IS_ERR(new_irq)) { 133 if (!least_loaded_irq) { 134 /* We failed to create an IRQ and we didn't find an IRQ */ 135 mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n", 136 PTR_ERR(new_irq)); 137 mutex_unlock(&pool->lock); 138 return new_irq; 139 } 140 /* We failed to create a new IRQ for the requested affinity, 141 * sharing existing IRQ. 142 */ 143 goto out; 144 } 145 least_loaded_irq = new_irq; 146 goto unlock; 147 out: 148 mlx5_irq_get_locked(least_loaded_irq); 149 if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold) 150 mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", 151 pci_irq_vector(pool->dev->pdev, 152 mlx5_irq_get_index(least_loaded_irq)), pool->name, 153 mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ); 154 unlock: 155 mutex_unlock(&pool->lock); 156 return least_loaded_irq; 157 } 158 159 void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs, 160 int num_irqs) 161 { 162 struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); 163 int i; 164 165 for (i = 0; i < num_irqs; i++) { 166 int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i])); 167 168 synchronize_irq(pci_irq_vector(pool->dev->pdev, 169 mlx5_irq_get_index(irqs[i]))); 170 if (mlx5_irq_put(irqs[i])) 171 if (pool->irqs_per_cpu) 172 cpu_put(pool, cpu); 173 } 174 } 175 176 /** 177 * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device. 178 * @dev: mlx5 device that is requesting the IRQs. 179 * @nirqs: number of IRQs to request. 180 * @irqs: an output array of IRQs pointers. 181 * 182 * Each IRQ is bounded to at most 1 CPU. 183 * This function is requesting IRQs according to the default assignment. 184 * The default assignment policy is: 185 * - in each iteration, request the least loaded IRQ which is not bound to any 186 * CPU of the previous IRQs requested. 187 * 188 * This function returns the number of IRQs requested, (which might be smaller than 189 * @nirqs), if successful, or a negative error code in case of an error. 190 */ 191 int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, 192 struct mlx5_irq **irqs) 193 { 194 struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); 195 struct irq_affinity_desc af_desc = {}; 196 struct mlx5_irq *irq; 197 int i = 0; 198 199 af_desc.is_managed = 1; 200 cpumask_copy(&af_desc.mask, cpu_online_mask); 201 for (i = 0; i < nirqs; i++) { 202 if (mlx5_irq_pool_is_sf_pool(pool)) 203 irq = mlx5_irq_affinity_request(pool, &af_desc); 204 else 205 /* In case SF pool doesn't exists, fallback to the PF IRQs. 206 * The PF IRQs are already allocated and binded to CPU 207 * at this point. Hence, only an index is needed. 208 */ 209 irq = mlx5_irq_request(dev, i, NULL, NULL); 210 if (IS_ERR(irq)) 211 break; 212 irqs[i] = irq; 213 cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask); 214 mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", 215 pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), 216 cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), 217 mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); 218 } 219 if (!i) 220 return PTR_ERR(irq); 221 return i; 222 } 223