1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 HGST, a Western Digital Company. 4 */ 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/slab.h> 8 #include <rdma/ib_verbs.h> 9 10 #include "core_priv.h" 11 12 #include <trace/events/rdma_core.h> 13 /* Max size for shared CQ, may require tuning */ 14 #define IB_MAX_SHARED_CQ_SZ 4096U 15 16 /* # of WCs to poll for with a single call to ib_poll_cq */ 17 #define IB_POLL_BATCH 16 18 #define IB_POLL_BATCH_DIRECT 8 19 20 /* # of WCs to iterate over before yielding */ 21 #define IB_POLL_BUDGET_IRQ 256 22 #define IB_POLL_BUDGET_WORKQUEUE 65536 23 24 #define IB_POLL_FLAGS \ 25 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 26 27 static const struct dim_cq_moder 28 rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = { 29 {1, 0, 1, 0}, 30 {1, 0, 4, 0}, 31 {2, 0, 4, 0}, 32 {2, 0, 8, 0}, 33 {4, 0, 8, 0}, 34 {16, 0, 8, 0}, 35 {16, 0, 16, 0}, 36 {32, 0, 16, 0}, 37 {32, 0, 32, 0}, 38 }; 39 40 static void ib_cq_rdma_dim_work(struct work_struct *w) 41 { 42 struct dim *dim = container_of(w, struct dim, work); 43 struct ib_cq *cq = dim->priv; 44 45 u16 usec = rdma_dim_prof[dim->profile_ix].usec; 46 u16 comps = rdma_dim_prof[dim->profile_ix].comps; 47 48 dim->state = DIM_START_MEASURE; 49 50 trace_cq_modify(cq, comps, usec); 51 cq->device->ops.modify_cq(cq, comps, usec); 52 } 53 54 static void rdma_dim_init(struct ib_cq *cq) 55 { 56 struct dim *dim; 57 58 if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || 59 cq->poll_ctx == IB_POLL_DIRECT) 60 return; 61 62 dim = kzalloc(sizeof(struct dim), GFP_KERNEL); 63 if (!dim) 64 return; 65 66 dim->state = DIM_START_MEASURE; 67 dim->tune_state = DIM_GOING_RIGHT; 68 dim->profile_ix = RDMA_DIM_START_PROFILE; 69 dim->priv = cq; 70 cq->dim = dim; 71 72 INIT_WORK(&dim->work, ib_cq_rdma_dim_work); 73 } 74 75 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) 76 { 77 int rc; 78 79 rc = ib_poll_cq(cq, num_entries, wc); 80 trace_cq_poll(cq, num_entries, rc); 81 return rc; 82 } 83 84 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, 85 int batch) 86 { 87 int i, n, completed = 0; 88 89 trace_cq_process(cq); 90 91 /* 92 * budget might be (-1) if the caller does not 93 * want to bound this call, thus we need unsigned 94 * minimum here. 95 */ 96 while ((n = __poll_cq(cq, min_t(u32, batch, 97 budget - completed), wcs)) > 0) { 98 for (i = 0; i < n; i++) { 99 struct ib_wc *wc = &wcs[i]; 100 101 if (wc->wr_cqe) 102 wc->wr_cqe->done(cq, wc); 103 else 104 WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); 105 } 106 107 completed += n; 108 109 if (n != batch || (budget != -1 && completed >= budget)) 110 break; 111 } 112 113 return completed; 114 } 115 116 /** 117 * ib_process_direct_cq - process a CQ in caller context 118 * @cq: CQ to process 119 * @budget: number of CQEs to poll for 120 * 121 * This function is used to process all outstanding CQ entries. 122 * It does not offload CQ processing to a different context and does 123 * not ask for completion interrupts from the HCA. 124 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger 125 * concurrent processing. 126 * 127 * Note: do not pass -1 as %budget unless it is guaranteed that the number 128 * of completions that will be processed is small. 129 */ 130 int ib_process_cq_direct(struct ib_cq *cq, int budget) 131 { 132 struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; 133 134 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); 135 } 136 EXPORT_SYMBOL(ib_process_cq_direct); 137 138 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) 139 { 140 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); 141 } 142 143 static int ib_poll_handler(struct irq_poll *iop, int budget) 144 { 145 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 146 struct dim *dim = cq->dim; 147 int completed; 148 149 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); 150 if (completed < budget) { 151 irq_poll_complete(&cq->iop); 152 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) { 153 trace_cq_reschedule(cq); 154 irq_poll_sched(&cq->iop); 155 } 156 } 157 158 if (dim) 159 rdma_dim(dim, completed); 160 161 return completed; 162 } 163 164 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) 165 { 166 trace_cq_schedule(cq); 167 irq_poll_sched(&cq->iop); 168 } 169 170 static void ib_cq_poll_work(struct work_struct *work) 171 { 172 struct ib_cq *cq = container_of(work, struct ib_cq, work); 173 int completed; 174 175 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, 176 IB_POLL_BATCH); 177 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 178 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 179 queue_work(cq->comp_wq, &cq->work); 180 else if (cq->dim) 181 rdma_dim(cq->dim, completed); 182 } 183 184 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) 185 { 186 trace_cq_schedule(cq); 187 queue_work(cq->comp_wq, &cq->work); 188 } 189 190 /** 191 * __ib_alloc_cq_user - allocate a completion queue 192 * @dev: device to allocate the CQ for 193 * @private: driver private data, accessible from cq->cq_context 194 * @nr_cqe: number of CQEs to allocate 195 * @comp_vector: HCA completion vectors for this CQ 196 * @poll_ctx: context to poll the CQ from. 197 * @caller: module owner name. 198 * @udata: Valid user data or NULL for kernel object 199 * 200 * This is the proper interface to allocate a CQ for in-kernel users. A 201 * CQ allocated with this interface will automatically be polled from the 202 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id 203 * to use this CQ abstraction. 204 */ 205 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, 206 int nr_cqe, int comp_vector, 207 enum ib_poll_context poll_ctx, 208 const char *caller, struct ib_udata *udata) 209 { 210 struct ib_cq_init_attr cq_attr = { 211 .cqe = nr_cqe, 212 .comp_vector = comp_vector, 213 }; 214 struct ib_cq *cq; 215 int ret = -ENOMEM; 216 217 cq = rdma_zalloc_drv_obj(dev, ib_cq); 218 if (!cq) 219 return ERR_PTR(ret); 220 221 cq->device = dev; 222 cq->cq_context = private; 223 cq->poll_ctx = poll_ctx; 224 atomic_set(&cq->usecnt, 0); 225 cq->comp_vector = comp_vector; 226 227 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); 228 if (!cq->wc) 229 goto out_free_cq; 230 231 cq->res.type = RDMA_RESTRACK_CQ; 232 rdma_restrack_set_task(&cq->res, caller); 233 234 ret = dev->ops.create_cq(cq, &cq_attr, NULL); 235 if (ret) 236 goto out_free_wc; 237 238 rdma_restrack_kadd(&cq->res); 239 240 rdma_dim_init(cq); 241 242 switch (cq->poll_ctx) { 243 case IB_POLL_DIRECT: 244 cq->comp_handler = ib_cq_completion_direct; 245 break; 246 case IB_POLL_SOFTIRQ: 247 cq->comp_handler = ib_cq_completion_softirq; 248 249 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); 250 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 251 break; 252 case IB_POLL_WORKQUEUE: 253 case IB_POLL_UNBOUND_WORKQUEUE: 254 cq->comp_handler = ib_cq_completion_workqueue; 255 INIT_WORK(&cq->work, ib_cq_poll_work); 256 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 257 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? 258 ib_comp_wq : ib_comp_unbound_wq; 259 break; 260 default: 261 ret = -EINVAL; 262 goto out_destroy_cq; 263 } 264 265 trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx); 266 return cq; 267 268 out_destroy_cq: 269 rdma_restrack_del(&cq->res); 270 cq->device->ops.destroy_cq(cq, udata); 271 out_free_wc: 272 kfree(cq->wc); 273 out_free_cq: 274 kfree(cq); 275 trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); 276 return ERR_PTR(ret); 277 } 278 EXPORT_SYMBOL(__ib_alloc_cq_user); 279 280 /** 281 * __ib_alloc_cq_any - allocate a completion queue 282 * @dev: device to allocate the CQ for 283 * @private: driver private data, accessible from cq->cq_context 284 * @nr_cqe: number of CQEs to allocate 285 * @poll_ctx: context to poll the CQ from 286 * @caller: module owner name 287 * 288 * Attempt to spread ULP Completion Queues over each device's interrupt 289 * vectors. A simple best-effort mechanism is used. 290 */ 291 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, 292 int nr_cqe, enum ib_poll_context poll_ctx, 293 const char *caller) 294 { 295 static atomic_t counter; 296 int comp_vector = 0; 297 298 if (dev->num_comp_vectors > 1) 299 comp_vector = 300 atomic_inc_return(&counter) % 301 min_t(int, dev->num_comp_vectors, num_online_cpus()); 302 303 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 304 caller, NULL); 305 } 306 EXPORT_SYMBOL(__ib_alloc_cq_any); 307 308 /** 309 * ib_free_cq_user - free a completion queue 310 * @cq: completion queue to free. 311 * @udata: User data or NULL for kernel object 312 */ 313 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) 314 { 315 if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) 316 return; 317 if (WARN_ON_ONCE(cq->cqe_used)) 318 return; 319 320 switch (cq->poll_ctx) { 321 case IB_POLL_DIRECT: 322 break; 323 case IB_POLL_SOFTIRQ: 324 irq_poll_disable(&cq->iop); 325 break; 326 case IB_POLL_WORKQUEUE: 327 case IB_POLL_UNBOUND_WORKQUEUE: 328 cancel_work_sync(&cq->work); 329 break; 330 default: 331 WARN_ON_ONCE(1); 332 } 333 334 trace_cq_free(cq); 335 rdma_restrack_del(&cq->res); 336 cq->device->ops.destroy_cq(cq, udata); 337 if (cq->dim) 338 cancel_work_sync(&cq->dim->work); 339 kfree(cq->dim); 340 kfree(cq->wc); 341 kfree(cq); 342 } 343 EXPORT_SYMBOL(ib_free_cq_user); 344 345 void ib_cq_pool_init(struct ib_device *dev) 346 { 347 unsigned int i; 348 349 spin_lock_init(&dev->cq_pools_lock); 350 for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) 351 INIT_LIST_HEAD(&dev->cq_pools[i]); 352 } 353 354 void ib_cq_pool_destroy(struct ib_device *dev) 355 { 356 struct ib_cq *cq, *n; 357 unsigned int i; 358 359 for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) { 360 list_for_each_entry_safe(cq, n, &dev->cq_pools[i], 361 pool_entry) { 362 WARN_ON(cq->cqe_used); 363 cq->shared = false; 364 ib_free_cq(cq); 365 } 366 } 367 } 368 369 static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, 370 enum ib_poll_context poll_ctx) 371 { 372 LIST_HEAD(tmp_list); 373 unsigned int nr_cqs, i; 374 struct ib_cq *cq; 375 int ret; 376 377 if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { 378 WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE); 379 return -EINVAL; 380 } 381 382 /* 383 * Allocate at least as many CQEs as requested, and otherwise 384 * a reasonable batch size so that we can share CQs between 385 * multiple users instead of allocating a larger number of CQs. 386 */ 387 nr_cqes = min_t(unsigned int, dev->attrs.max_cqe, 388 max(nr_cqes, IB_MAX_SHARED_CQ_SZ)); 389 nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus()); 390 for (i = 0; i < nr_cqs; i++) { 391 cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx); 392 if (IS_ERR(cq)) { 393 ret = PTR_ERR(cq); 394 goto out_free_cqs; 395 } 396 cq->shared = true; 397 list_add_tail(&cq->pool_entry, &tmp_list); 398 } 399 400 spin_lock_irq(&dev->cq_pools_lock); 401 list_splice(&tmp_list, &dev->cq_pools[poll_ctx]); 402 spin_unlock_irq(&dev->cq_pools_lock); 403 404 return 0; 405 406 out_free_cqs: 407 list_for_each_entry(cq, &tmp_list, pool_entry) { 408 cq->shared = false; 409 ib_free_cq(cq); 410 } 411 return ret; 412 } 413 414 /** 415 * ib_cq_pool_get() - Find the least used completion queue that matches 416 * a given cpu hint (or least used for wild card affinity) and fits 417 * nr_cqe. 418 * @dev: rdma device 419 * @nr_cqe: number of needed cqe entries 420 * @comp_vector_hint: completion vector hint (-1) for the driver to assign 421 * a comp vector based on internal counter 422 * @poll_ctx: cq polling context 423 * 424 * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and 425 * claim entries in it for us. In case there is no available cq, allocate 426 * a new cq with the requirements and add it to the device pool. 427 * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value 428 * for @poll_ctx. 429 */ 430 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, 431 int comp_vector_hint, 432 enum ib_poll_context poll_ctx) 433 { 434 static unsigned int default_comp_vector; 435 unsigned int vector, num_comp_vectors; 436 struct ib_cq *cq, *found = NULL; 437 int ret; 438 439 if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { 440 WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE); 441 return ERR_PTR(-EINVAL); 442 } 443 444 num_comp_vectors = 445 min_t(unsigned int, dev->num_comp_vectors, num_online_cpus()); 446 /* Project the affinty to the device completion vector range */ 447 if (comp_vector_hint < 0) { 448 comp_vector_hint = 449 (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors; 450 WRITE_ONCE(default_comp_vector, comp_vector_hint); 451 } 452 vector = comp_vector_hint % num_comp_vectors; 453 454 /* 455 * Find the least used CQ with correct affinity and 456 * enough free CQ entries 457 */ 458 while (!found) { 459 spin_lock_irq(&dev->cq_pools_lock); 460 list_for_each_entry(cq, &dev->cq_pools[poll_ctx], 461 pool_entry) { 462 /* 463 * Check to see if we have found a CQ with the 464 * correct completion vector 465 */ 466 if (vector != cq->comp_vector) 467 continue; 468 if (cq->cqe_used + nr_cqe > cq->cqe) 469 continue; 470 found = cq; 471 break; 472 } 473 474 if (found) { 475 found->cqe_used += nr_cqe; 476 spin_unlock_irq(&dev->cq_pools_lock); 477 478 return found; 479 } 480 spin_unlock_irq(&dev->cq_pools_lock); 481 482 /* 483 * Didn't find a match or ran out of CQs in the device 484 * pool, allocate a new array of CQs. 485 */ 486 ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx); 487 if (ret) 488 return ERR_PTR(ret); 489 } 490 491 return found; 492 } 493 EXPORT_SYMBOL(ib_cq_pool_get); 494 495 /** 496 * ib_cq_pool_put - Return a CQ taken from a shared pool. 497 * @cq: The CQ to return. 498 * @nr_cqe: The max number of cqes that the user had requested. 499 */ 500 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe) 501 { 502 if (WARN_ON_ONCE(nr_cqe > cq->cqe_used)) 503 return; 504 505 spin_lock_irq(&cq->device->cq_pools_lock); 506 cq->cqe_used -= nr_cqe; 507 spin_unlock_irq(&cq->device->cq_pools_lock); 508 } 509 EXPORT_SYMBOL(ib_cq_pool_put); 510