1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/interrupt.h> 42 #include <linux/spinlock.h> 43 #include <linux/sched.h> 44 #include <linux/slab.h> 45 #include <linux/pci.h> 46 #include <linux/delay.h> 47 #include <linux/prefetch.h> 48 #include <linux/if_ether.h> 49 #include <rdma/ib_mad.h> 50 51 #include "roce_hsi.h" 52 53 #include "qplib_res.h" 54 #include "qplib_rcfw.h" 55 #include "qplib_sp.h" 56 #include "qplib_fp.h" 57 58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); 59 60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) 61 { 62 qp->sq.condition = false; 63 qp->sq.send_phantom = false; 64 qp->sq.single = false; 65 } 66 67 /* Flush list */ 68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 69 { 70 struct bnxt_qplib_cq *scq, *rcq; 71 72 scq = qp->scq; 73 rcq = qp->rcq; 74 75 if (!qp->sq.flushed) { 76 dev_dbg(&scq->hwq.pdev->dev, 77 "FP: Adding to SQ Flush list = %p\n", qp); 78 bnxt_qplib_cancel_phantom_processing(qp); 79 list_add_tail(&qp->sq_flush, &scq->sqf_head); 80 qp->sq.flushed = true; 81 } 82 if (!qp->srq) { 83 if (!qp->rq.flushed) { 84 dev_dbg(&rcq->hwq.pdev->dev, 85 "FP: Adding to RQ Flush list = %p\n", qp); 86 list_add_tail(&qp->rq_flush, &rcq->rqf_head); 87 qp->rq.flushed = true; 88 } 89 } 90 } 91 92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, 93 unsigned long *flags) 94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) 95 { 96 spin_lock_irqsave(&qp->scq->flush_lock, *flags); 97 if (qp->scq == qp->rcq) 98 __acquire(&qp->rcq->flush_lock); 99 else 100 spin_lock(&qp->rcq->flush_lock); 101 } 102 103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, 104 unsigned long *flags) 105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) 106 { 107 if (qp->scq == qp->rcq) 108 __release(&qp->rcq->flush_lock); 109 else 110 spin_unlock(&qp->rcq->flush_lock); 111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); 112 } 113 114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 115 { 116 unsigned long flags; 117 118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags); 119 __bnxt_qplib_add_flush_qp(qp); 120 bnxt_qplib_release_cq_flush_locks(qp, &flags); 121 } 122 123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 124 { 125 if (qp->sq.flushed) { 126 qp->sq.flushed = false; 127 list_del(&qp->sq_flush); 128 } 129 if (!qp->srq) { 130 if (qp->rq.flushed) { 131 qp->rq.flushed = false; 132 list_del(&qp->rq_flush); 133 } 134 } 135 } 136 137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) 138 { 139 unsigned long flags; 140 141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags); 142 __clean_cq(qp->scq, (u64)(unsigned long)qp); 143 qp->sq.hwq.prod = 0; 144 qp->sq.hwq.cons = 0; 145 __clean_cq(qp->rcq, (u64)(unsigned long)qp); 146 qp->rq.hwq.prod = 0; 147 qp->rq.hwq.cons = 0; 148 149 __bnxt_qplib_del_flush_qp(qp); 150 bnxt_qplib_release_cq_flush_locks(qp, &flags); 151 } 152 153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 154 { 155 struct bnxt_qplib_nq_work *nq_work = 156 container_of(work, struct bnxt_qplib_nq_work, work); 157 158 struct bnxt_qplib_cq *cq = nq_work->cq; 159 struct bnxt_qplib_nq *nq = nq_work->nq; 160 161 if (cq && nq) { 162 spin_lock_bh(&cq->compl_lock); 163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { 164 dev_dbg(&nq->pdev->dev, 165 "%s:Trigger cq = %p event nq = %p\n", 166 __func__, cq, nq); 167 nq->cqn_handler(nq, cq); 168 } 169 spin_unlock_bh(&cq->compl_lock); 170 } 171 kfree(nq_work); 172 } 173 174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res, 175 struct bnxt_qplib_qp *qp) 176 { 177 struct bnxt_qplib_q *rq = &qp->rq; 178 struct bnxt_qplib_q *sq = &qp->sq; 179 180 if (qp->rq_hdr_buf) 181 dma_free_coherent(&res->pdev->dev, 182 rq->max_wqe * qp->rq_hdr_buf_size, 183 qp->rq_hdr_buf, qp->rq_hdr_buf_map); 184 if (qp->sq_hdr_buf) 185 dma_free_coherent(&res->pdev->dev, 186 sq->max_wqe * qp->sq_hdr_buf_size, 187 qp->sq_hdr_buf, qp->sq_hdr_buf_map); 188 qp->rq_hdr_buf = NULL; 189 qp->sq_hdr_buf = NULL; 190 qp->rq_hdr_buf_map = 0; 191 qp->sq_hdr_buf_map = 0; 192 qp->sq_hdr_buf_size = 0; 193 qp->rq_hdr_buf_size = 0; 194 } 195 196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, 197 struct bnxt_qplib_qp *qp) 198 { 199 struct bnxt_qplib_q *rq = &qp->rq; 200 struct bnxt_qplib_q *sq = &qp->sq; 201 int rc = 0; 202 203 if (qp->sq_hdr_buf_size && sq->max_wqe) { 204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 205 sq->max_wqe * qp->sq_hdr_buf_size, 206 &qp->sq_hdr_buf_map, GFP_KERNEL); 207 if (!qp->sq_hdr_buf) { 208 rc = -ENOMEM; 209 dev_err(&res->pdev->dev, 210 "Failed to create sq_hdr_buf\n"); 211 goto fail; 212 } 213 } 214 215 if (qp->rq_hdr_buf_size && rq->max_wqe) { 216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 217 rq->max_wqe * 218 qp->rq_hdr_buf_size, 219 &qp->rq_hdr_buf_map, 220 GFP_KERNEL); 221 if (!qp->rq_hdr_buf) { 222 rc = -ENOMEM; 223 dev_err(&res->pdev->dev, 224 "Failed to create rq_hdr_buf\n"); 225 goto fail; 226 } 227 } 228 return 0; 229 230 fail: 231 bnxt_qplib_free_qp_hdr_buf(res, qp); 232 return rc; 233 } 234 235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) 236 { 237 struct bnxt_qplib_hwq *hwq = &nq->hwq; 238 struct nq_base *nqe, **nq_ptr; 239 int budget = nq->budget; 240 u32 sw_cons, raw_cons; 241 uintptr_t q_handle; 242 u16 type; 243 244 spin_lock_bh(&hwq->lock); 245 /* Service the NQ until empty */ 246 raw_cons = hwq->cons; 247 while (budget--) { 248 sw_cons = HWQ_CMP(raw_cons, hwq); 249 nq_ptr = (struct nq_base **)hwq->pbl_ptr; 250 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]; 251 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements)) 252 break; 253 254 /* 255 * The valid test of the entry must be done first before 256 * reading any further. 257 */ 258 dma_rmb(); 259 260 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 261 switch (type) { 262 case NQ_BASE_TYPE_CQ_NOTIFICATION: 263 { 264 struct nq_cn *nqcne = (struct nq_cn *)nqe; 265 266 q_handle = le32_to_cpu(nqcne->cq_handle_low); 267 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 268 << 32; 269 if ((unsigned long)cq == q_handle) { 270 nqcne->cq_handle_low = 0; 271 nqcne->cq_handle_high = 0; 272 cq->cnq_events++; 273 } 274 break; 275 } 276 default: 277 break; 278 } 279 raw_cons++; 280 } 281 spin_unlock_bh(&hwq->lock); 282 } 283 284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with 285 * this CQ. 286 */ 287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) 288 { 289 u32 retry_cnt = 100; 290 291 while (retry_cnt--) { 292 if (cnq_events == cq->cnq_events) 293 return; 294 usleep_range(50, 100); 295 clean_nq(cq->nq, cq); 296 } 297 } 298 299 static void bnxt_qplib_service_nq(struct tasklet_struct *t) 300 { 301 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet); 302 struct bnxt_qplib_hwq *hwq = &nq->hwq; 303 struct bnxt_qplib_cq *cq; 304 int budget = nq->budget; 305 u32 sw_cons, raw_cons; 306 struct nq_base *nqe; 307 uintptr_t q_handle; 308 u16 type; 309 310 spin_lock_bh(&hwq->lock); 311 /* Service the NQ until empty */ 312 raw_cons = hwq->cons; 313 while (budget--) { 314 sw_cons = HWQ_CMP(raw_cons, hwq); 315 nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL); 316 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements)) 317 break; 318 319 /* 320 * The valid test of the entry must be done first before 321 * reading any further. 322 */ 323 dma_rmb(); 324 325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 326 switch (type) { 327 case NQ_BASE_TYPE_CQ_NOTIFICATION: 328 { 329 struct nq_cn *nqcne = (struct nq_cn *)nqe; 330 331 q_handle = le32_to_cpu(nqcne->cq_handle_low); 332 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 333 << 32; 334 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle; 335 if (!cq) 336 break; 337 bnxt_qplib_armen_db(&cq->dbinfo, 338 DBC_DBC_TYPE_CQ_ARMENA); 339 spin_lock_bh(&cq->compl_lock); 340 atomic_set(&cq->arm_state, 0); 341 if (nq->cqn_handler(nq, (cq))) 342 dev_warn(&nq->pdev->dev, 343 "cqn - type 0x%x not handled\n", type); 344 cq->cnq_events++; 345 spin_unlock_bh(&cq->compl_lock); 346 break; 347 } 348 case NQ_BASE_TYPE_SRQ_EVENT: 349 { 350 struct bnxt_qplib_srq *srq; 351 struct nq_srq_event *nqsrqe = 352 (struct nq_srq_event *)nqe; 353 354 q_handle = le32_to_cpu(nqsrqe->srq_handle_low); 355 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) 356 << 32; 357 srq = (struct bnxt_qplib_srq *)q_handle; 358 bnxt_qplib_armen_db(&srq->dbinfo, 359 DBC_DBC_TYPE_SRQ_ARMENA); 360 if (nq->srqn_handler(nq, 361 (struct bnxt_qplib_srq *)q_handle, 362 nqsrqe->event)) 363 dev_warn(&nq->pdev->dev, 364 "SRQ event 0x%x not handled\n", 365 nqsrqe->event); 366 break; 367 } 368 case NQ_BASE_TYPE_DBQ_EVENT: 369 break; 370 default: 371 dev_warn(&nq->pdev->dev, 372 "nqe with type = 0x%x not handled\n", type); 373 break; 374 } 375 raw_cons++; 376 } 377 if (hwq->cons != raw_cons) { 378 hwq->cons = raw_cons; 379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true); 380 } 381 spin_unlock_bh(&hwq->lock); 382 } 383 384 /* bnxt_re_synchronize_nq - self polling notification queue. 385 * @nq - notification queue pointer 386 * 387 * This function will start polling entries of a given notification queue 388 * for all pending entries. 389 * This function is useful to synchronize notification entries while resources 390 * are going away. 391 */ 392 393 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq) 394 { 395 int budget = nq->budget; 396 397 nq->budget = nq->hwq.max_elements; 398 bnxt_qplib_service_nq(&nq->nq_tasklet); 399 nq->budget = budget; 400 } 401 402 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) 403 { 404 struct bnxt_qplib_nq *nq = dev_instance; 405 struct bnxt_qplib_hwq *hwq = &nq->hwq; 406 u32 sw_cons; 407 408 /* Prefetch the NQ element */ 409 sw_cons = HWQ_CMP(hwq->cons, hwq); 410 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL)); 411 412 /* Fan out to CPU affinitized kthreads? */ 413 tasklet_schedule(&nq->nq_tasklet); 414 415 return IRQ_HANDLED; 416 } 417 418 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) 419 { 420 if (!nq->requested) 421 return; 422 423 nq->requested = false; 424 /* Mask h/w interrupt */ 425 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false); 426 /* Sync with last running IRQ handler */ 427 synchronize_irq(nq->msix_vec); 428 irq_set_affinity_hint(nq->msix_vec, NULL); 429 free_irq(nq->msix_vec, nq); 430 kfree(nq->name); 431 nq->name = NULL; 432 433 if (kill) 434 tasklet_kill(&nq->nq_tasklet); 435 tasklet_disable(&nq->nq_tasklet); 436 } 437 438 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 439 { 440 if (nq->cqn_wq) { 441 destroy_workqueue(nq->cqn_wq); 442 nq->cqn_wq = NULL; 443 } 444 445 /* Make sure the HW is stopped! */ 446 bnxt_qplib_nq_stop_irq(nq, true); 447 448 if (nq->nq_db.reg.bar_reg) { 449 iounmap(nq->nq_db.reg.bar_reg); 450 nq->nq_db.reg.bar_reg = NULL; 451 } 452 453 nq->cqn_handler = NULL; 454 nq->srqn_handler = NULL; 455 nq->msix_vec = 0; 456 } 457 458 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 459 int msix_vector, bool need_init) 460 { 461 struct bnxt_qplib_res *res = nq->res; 462 int rc; 463 464 if (nq->requested) 465 return -EFAULT; 466 467 nq->msix_vec = msix_vector; 468 if (need_init) 469 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq); 470 else 471 tasklet_enable(&nq->nq_tasklet); 472 473 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s", 474 nq_indx, pci_name(res->pdev)); 475 if (!nq->name) 476 return -ENOMEM; 477 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq); 478 if (rc) { 479 kfree(nq->name); 480 nq->name = NULL; 481 tasklet_disable(&nq->nq_tasklet); 482 return rc; 483 } 484 485 cpumask_clear(&nq->mask); 486 cpumask_set_cpu(nq_indx, &nq->mask); 487 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask); 488 if (rc) { 489 dev_warn(&nq->pdev->dev, 490 "set affinity failed; vector: %d nq_idx: %d\n", 491 nq->msix_vec, nq_indx); 492 } 493 nq->requested = true; 494 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true); 495 496 return rc; 497 } 498 499 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt) 500 { 501 resource_size_t reg_base; 502 struct bnxt_qplib_nq_db *nq_db; 503 struct pci_dev *pdev; 504 505 pdev = nq->pdev; 506 nq_db = &nq->nq_db; 507 508 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION; 509 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id); 510 if (!nq_db->reg.bar_base) { 511 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!", 512 nq_db->reg.bar_id); 513 return -ENOMEM; 514 } 515 516 reg_base = nq_db->reg.bar_base + reg_offt; 517 /* Unconditionally map 8 bytes to support 57500 series */ 518 nq_db->reg.len = 8; 519 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len); 520 if (!nq_db->reg.bar_reg) { 521 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed", 522 nq_db->reg.bar_id); 523 return -ENOMEM; 524 } 525 526 nq_db->dbinfo.db = nq_db->reg.bar_reg; 527 nq_db->dbinfo.hwq = &nq->hwq; 528 nq_db->dbinfo.xid = nq->ring_id; 529 530 return 0; 531 } 532 533 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 534 int nq_idx, int msix_vector, int bar_reg_offset, 535 cqn_handler_t cqn_handler, 536 srqn_handler_t srqn_handler) 537 { 538 int rc = -1; 539 540 nq->pdev = pdev; 541 nq->cqn_handler = cqn_handler; 542 nq->srqn_handler = srqn_handler; 543 544 /* Have a task to schedule CQ notifiers in post send case */ 545 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); 546 if (!nq->cqn_wq) 547 return -ENOMEM; 548 549 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset); 550 if (rc) 551 goto fail; 552 553 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); 554 if (rc) { 555 dev_err(&nq->pdev->dev, 556 "Failed to request irq for nq-idx %d\n", nq_idx); 557 goto fail; 558 } 559 560 return 0; 561 fail: 562 bnxt_qplib_disable_nq(nq); 563 return rc; 564 } 565 566 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq) 567 { 568 if (nq->hwq.max_elements) { 569 bnxt_qplib_free_hwq(nq->res, &nq->hwq); 570 nq->hwq.max_elements = 0; 571 } 572 } 573 574 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq) 575 { 576 struct bnxt_qplib_hwq_attr hwq_attr = {}; 577 struct bnxt_qplib_sg_info sginfo = {}; 578 579 nq->pdev = res->pdev; 580 nq->res = res; 581 if (!nq->hwq.max_elements || 582 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) 583 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 584 585 sginfo.pgsize = PAGE_SIZE; 586 sginfo.pgshft = PAGE_SHIFT; 587 hwq_attr.res = res; 588 hwq_attr.sginfo = &sginfo; 589 hwq_attr.depth = nq->hwq.max_elements; 590 hwq_attr.stride = sizeof(struct nq_base); 591 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res); 592 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) { 593 dev_err(&nq->pdev->dev, "FP NQ allocation failed"); 594 return -ENOMEM; 595 } 596 nq->budget = 8; 597 return 0; 598 } 599 600 /* SRQ */ 601 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, 602 struct bnxt_qplib_srq *srq) 603 { 604 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 605 struct creq_destroy_srq_resp resp = {}; 606 struct bnxt_qplib_cmdqmsg msg = {}; 607 struct cmdq_destroy_srq req = {}; 608 int rc; 609 610 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 611 CMDQ_BASE_OPCODE_DESTROY_SRQ, 612 sizeof(req)); 613 614 /* Configure the request */ 615 req.srq_cid = cpu_to_le32(srq->id); 616 617 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 619 kfree(srq->swq); 620 if (rc) 621 return; 622 bnxt_qplib_free_hwq(res, &srq->hwq); 623 } 624 625 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, 626 struct bnxt_qplib_srq *srq) 627 { 628 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 629 struct bnxt_qplib_hwq_attr hwq_attr = {}; 630 struct creq_create_srq_resp resp = {}; 631 struct bnxt_qplib_cmdqmsg msg = {}; 632 struct cmdq_create_srq req = {}; 633 struct bnxt_qplib_pbl *pbl; 634 u16 pg_sz_lvl; 635 int rc, idx; 636 637 hwq_attr.res = res; 638 hwq_attr.sginfo = &srq->sg_info; 639 hwq_attr.depth = srq->max_wqe; 640 hwq_attr.stride = srq->wqe_size; 641 hwq_attr.type = HWQ_TYPE_QUEUE; 642 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr); 643 if (rc) 644 return rc; 645 646 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq), 647 GFP_KERNEL); 648 if (!srq->swq) { 649 rc = -ENOMEM; 650 goto fail; 651 } 652 653 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 654 CMDQ_BASE_OPCODE_CREATE_SRQ, 655 sizeof(req)); 656 657 /* Configure the request */ 658 req.dpi = cpu_to_le32(srq->dpi->dpi); 659 req.srq_handle = cpu_to_le64((uintptr_t)srq); 660 661 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); 662 pbl = &srq->hwq.pbl[PBL_LVL_0]; 663 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) << 664 CMDQ_CREATE_SRQ_PG_SIZE_SFT); 665 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) << 666 CMDQ_CREATE_SRQ_LVL_SFT; 667 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl); 668 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 669 req.pd_id = cpu_to_le32(srq->pd->id); 670 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id); 671 672 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 673 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 674 if (rc) 675 goto fail; 676 677 spin_lock_init(&srq->lock); 678 srq->start_idx = 0; 679 srq->last_idx = srq->hwq.max_elements - 1; 680 for (idx = 0; idx < srq->hwq.max_elements; idx++) 681 srq->swq[idx].next_idx = idx + 1; 682 srq->swq[srq->last_idx].next_idx = -1; 683 684 srq->id = le32_to_cpu(resp.xid); 685 srq->dbinfo.hwq = &srq->hwq; 686 srq->dbinfo.xid = srq->id; 687 srq->dbinfo.db = srq->dpi->dbr; 688 srq->dbinfo.max_slot = 1; 689 srq->dbinfo.priv_db = res->dpi_tbl.priv_db; 690 if (srq->threshold) 691 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); 692 srq->arm_req = false; 693 694 return 0; 695 fail: 696 bnxt_qplib_free_hwq(res, &srq->hwq); 697 kfree(srq->swq); 698 699 return rc; 700 } 701 702 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, 703 struct bnxt_qplib_srq *srq) 704 { 705 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; 706 u32 sw_prod, sw_cons, count = 0; 707 708 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq); 709 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq); 710 711 count = sw_prod > sw_cons ? sw_prod - sw_cons : 712 srq_hwq->max_elements - sw_cons + sw_prod; 713 if (count > srq->threshold) { 714 srq->arm_req = false; 715 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); 716 } else { 717 /* Deferred arming */ 718 srq->arm_req = true; 719 } 720 721 return 0; 722 } 723 724 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, 725 struct bnxt_qplib_srq *srq) 726 { 727 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 728 struct creq_query_srq_resp resp = {}; 729 struct bnxt_qplib_cmdqmsg msg = {}; 730 struct bnxt_qplib_rcfw_sbuf *sbuf; 731 struct creq_query_srq_resp_sb *sb; 732 struct cmdq_query_srq req = {}; 733 int rc = 0; 734 735 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 736 CMDQ_BASE_OPCODE_QUERY_SRQ, 737 sizeof(req)); 738 739 /* Configure the request */ 740 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); 741 if (!sbuf) 742 return -ENOMEM; 743 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 744 req.srq_cid = cpu_to_le32(srq->id); 745 sb = sbuf->sb; 746 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), 747 sizeof(resp), 0); 748 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 749 srq->threshold = le16_to_cpu(sb->srq_limit); 750 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 751 752 return rc; 753 } 754 755 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, 756 struct bnxt_qplib_swqe *wqe) 757 { 758 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; 759 struct rq_wqe *srqe; 760 struct sq_sge *hw_sge; 761 u32 sw_prod, sw_cons, count = 0; 762 int i, next; 763 764 spin_lock(&srq_hwq->lock); 765 if (srq->start_idx == srq->last_idx) { 766 dev_err(&srq_hwq->pdev->dev, 767 "FP: SRQ (0x%x) is full!\n", srq->id); 768 spin_unlock(&srq_hwq->lock); 769 return -EINVAL; 770 } 771 next = srq->start_idx; 772 srq->start_idx = srq->swq[next].next_idx; 773 spin_unlock(&srq_hwq->lock); 774 775 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq); 776 srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL); 777 memset(srqe, 0, srq->wqe_size); 778 /* Calculate wqe_size16 and data_len */ 779 for (i = 0, hw_sge = (struct sq_sge *)srqe->data; 780 i < wqe->num_sge; i++, hw_sge++) { 781 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); 782 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); 783 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); 784 } 785 srqe->wqe_type = wqe->type; 786 srqe->flags = wqe->flags; 787 srqe->wqe_size = wqe->num_sge + 788 ((offsetof(typeof(*srqe), data) + 15) >> 4); 789 srqe->wr_id[0] = cpu_to_le32((u32)next); 790 srq->swq[next].wr_id = wqe->wr_id; 791 792 srq_hwq->prod++; 793 794 spin_lock(&srq_hwq->lock); 795 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq); 796 /* retaining srq_hwq->cons for this logic 797 * actually the lock is only required to 798 * read srq_hwq->cons. 799 */ 800 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq); 801 count = sw_prod > sw_cons ? sw_prod - sw_cons : 802 srq_hwq->max_elements - sw_cons + sw_prod; 803 spin_unlock(&srq_hwq->lock); 804 /* Ring DB */ 805 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); 806 if (srq->arm_req == true && count > srq->threshold) { 807 srq->arm_req = false; 808 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); 809 } 810 811 return 0; 812 } 813 814 /* QP */ 815 816 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que) 817 { 818 int indx; 819 820 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL); 821 if (!que->swq) 822 return -ENOMEM; 823 824 que->swq_start = 0; 825 que->swq_last = que->max_wqe - 1; 826 for (indx = 0; indx < que->max_wqe; indx++) 827 que->swq[indx].next_idx = indx + 1; 828 que->swq[que->swq_last].next_idx = 0; /* Make it circular */ 829 que->swq_last = 0; 830 831 return 0; 832 } 833 834 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 835 { 836 struct bnxt_qplib_hwq_attr hwq_attr = {}; 837 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 838 struct creq_create_qp1_resp resp = {}; 839 struct bnxt_qplib_cmdqmsg msg = {}; 840 struct bnxt_qplib_q *sq = &qp->sq; 841 struct bnxt_qplib_q *rq = &qp->rq; 842 struct cmdq_create_qp1 req = {}; 843 struct bnxt_qplib_pbl *pbl; 844 u32 qp_flags = 0; 845 u8 pg_sz_lvl; 846 u32 tbl_indx; 847 int rc; 848 849 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 850 CMDQ_BASE_OPCODE_CREATE_QP1, 851 sizeof(req)); 852 /* General */ 853 req.type = qp->type; 854 req.dpi = cpu_to_le32(qp->dpi->dpi); 855 req.qp_handle = cpu_to_le64(qp->qp_handle); 856 857 /* SQ */ 858 hwq_attr.res = res; 859 hwq_attr.sginfo = &sq->sg_info; 860 hwq_attr.stride = sizeof(struct sq_sge); 861 hwq_attr.depth = bnxt_qplib_get_depth(sq); 862 hwq_attr.type = HWQ_TYPE_QUEUE; 863 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); 864 if (rc) 865 return rc; 866 867 rc = bnxt_qplib_alloc_init_swq(sq); 868 if (rc) 869 goto fail_sq; 870 871 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 872 pbl = &sq->hwq.pbl[PBL_LVL_0]; 873 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 874 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) << 875 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT); 876 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK); 877 req.sq_pg_size_sq_lvl = pg_sz_lvl; 878 req.sq_fwo_sq_sge = 879 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) << 880 CMDQ_CREATE_QP1_SQ_SGE_SFT); 881 req.scq_cid = cpu_to_le32(qp->scq->id); 882 883 /* RQ */ 884 if (rq->max_wqe) { 885 hwq_attr.res = res; 886 hwq_attr.sginfo = &rq->sg_info; 887 hwq_attr.stride = sizeof(struct sq_sge); 888 hwq_attr.depth = bnxt_qplib_get_depth(rq); 889 hwq_attr.type = HWQ_TYPE_QUEUE; 890 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); 891 if (rc) 892 goto sq_swq; 893 rc = bnxt_qplib_alloc_init_swq(rq); 894 if (rc) 895 goto fail_rq; 896 req.rq_size = cpu_to_le32(rq->max_wqe); 897 pbl = &rq->hwq.pbl[PBL_LVL_0]; 898 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 899 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) << 900 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT); 901 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK); 902 req.rq_pg_size_rq_lvl = pg_sz_lvl; 903 req.rq_fwo_rq_sge = 904 cpu_to_le16((rq->max_sge & 905 CMDQ_CREATE_QP1_RQ_SGE_MASK) << 906 CMDQ_CREATE_QP1_RQ_SGE_SFT); 907 } 908 req.rcq_cid = cpu_to_le32(qp->rcq->id); 909 /* Header buffer - allow hdr_buf pass in */ 910 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp); 911 if (rc) { 912 rc = -ENOMEM; 913 goto rq_rwq; 914 } 915 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE; 916 req.qp_flags = cpu_to_le32(qp_flags); 917 req.pd_id = cpu_to_le32(qp->pd->id); 918 919 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 920 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 921 if (rc) 922 goto fail; 923 924 qp->id = le32_to_cpu(resp.xid); 925 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 926 qp->cctx = res->cctx; 927 sq->dbinfo.hwq = &sq->hwq; 928 sq->dbinfo.xid = qp->id; 929 sq->dbinfo.db = qp->dpi->dbr; 930 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode); 931 if (rq->max_wqe) { 932 rq->dbinfo.hwq = &rq->hwq; 933 rq->dbinfo.xid = qp->id; 934 rq->dbinfo.db = qp->dpi->dbr; 935 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); 936 } 937 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 938 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 939 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; 940 941 return 0; 942 943 fail: 944 bnxt_qplib_free_qp_hdr_buf(res, qp); 945 rq_rwq: 946 kfree(rq->swq); 947 fail_rq: 948 bnxt_qplib_free_hwq(res, &rq->hwq); 949 sq_swq: 950 kfree(sq->swq); 951 fail_sq: 952 bnxt_qplib_free_hwq(res, &sq->hwq); 953 return rc; 954 } 955 956 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) 957 { 958 struct bnxt_qplib_hwq *hwq; 959 struct bnxt_qplib_q *sq; 960 u64 fpsne, psn_pg; 961 u16 indx_pad = 0; 962 963 sq = &qp->sq; 964 hwq = &sq->hwq; 965 /* First psn entry */ 966 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg); 967 if (!IS_ALIGNED(fpsne, PAGE_SIZE)) 968 indx_pad = (fpsne & ~PAGE_MASK) / size; 969 hwq->pad_pgofft = indx_pad; 970 hwq->pad_pg = (u64 *)psn_pg; 971 hwq->pad_stride = size; 972 } 973 974 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 975 { 976 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 977 struct bnxt_qplib_hwq_attr hwq_attr = {}; 978 struct bnxt_qplib_sg_info sginfo = {}; 979 struct creq_create_qp_resp resp = {}; 980 struct bnxt_qplib_cmdqmsg msg = {}; 981 struct bnxt_qplib_q *sq = &qp->sq; 982 struct bnxt_qplib_q *rq = &qp->rq; 983 struct cmdq_create_qp req = {}; 984 int rc, req_size, psn_sz = 0; 985 struct bnxt_qplib_hwq *xrrq; 986 struct bnxt_qplib_pbl *pbl; 987 u32 qp_flags = 0; 988 u8 pg_sz_lvl; 989 u32 tbl_indx; 990 u16 nsge; 991 992 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 993 CMDQ_BASE_OPCODE_CREATE_QP, 994 sizeof(req)); 995 996 /* General */ 997 req.type = qp->type; 998 req.dpi = cpu_to_le32(qp->dpi->dpi); 999 req.qp_handle = cpu_to_le64(qp->qp_handle); 1000 1001 /* SQ */ 1002 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) { 1003 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 1004 sizeof(struct sq_psn_search_ext) : 1005 sizeof(struct sq_psn_search); 1006 } 1007 1008 hwq_attr.res = res; 1009 hwq_attr.sginfo = &sq->sg_info; 1010 hwq_attr.stride = sizeof(struct sq_sge); 1011 hwq_attr.depth = bnxt_qplib_get_depth(sq); 1012 hwq_attr.aux_stride = psn_sz; 1013 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode); 1014 hwq_attr.type = HWQ_TYPE_QUEUE; 1015 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); 1016 if (rc) 1017 return rc; 1018 1019 rc = bnxt_qplib_alloc_init_swq(sq); 1020 if (rc) 1021 goto fail_sq; 1022 1023 if (psn_sz) 1024 bnxt_qplib_init_psn_ptr(qp, psn_sz); 1025 1026 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 1027 pbl = &sq->hwq.pbl[PBL_LVL_0]; 1028 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1029 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) << 1030 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT); 1031 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK); 1032 req.sq_pg_size_sq_lvl = pg_sz_lvl; 1033 req.sq_fwo_sq_sge = 1034 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) << 1035 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0); 1036 req.scq_cid = cpu_to_le32(qp->scq->id); 1037 1038 /* RQ */ 1039 if (!qp->srq) { 1040 hwq_attr.res = res; 1041 hwq_attr.sginfo = &rq->sg_info; 1042 hwq_attr.stride = sizeof(struct sq_sge); 1043 hwq_attr.depth = bnxt_qplib_get_depth(rq); 1044 hwq_attr.aux_stride = 0; 1045 hwq_attr.aux_depth = 0; 1046 hwq_attr.type = HWQ_TYPE_QUEUE; 1047 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); 1048 if (rc) 1049 goto sq_swq; 1050 rc = bnxt_qplib_alloc_init_swq(rq); 1051 if (rc) 1052 goto fail_rq; 1053 1054 req.rq_size = cpu_to_le32(rq->max_wqe); 1055 pbl = &rq->hwq.pbl[PBL_LVL_0]; 1056 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1057 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) << 1058 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT); 1059 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK); 1060 req.rq_pg_size_rq_lvl = pg_sz_lvl; 1061 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 1062 6 : rq->max_sge; 1063 req.rq_fwo_rq_sge = 1064 cpu_to_le16(((nsge & 1065 CMDQ_CREATE_QP_RQ_SGE_MASK) << 1066 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0); 1067 } else { 1068 /* SRQ */ 1069 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED; 1070 req.srq_cid = cpu_to_le32(qp->srq->id); 1071 } 1072 req.rcq_cid = cpu_to_le32(qp->rcq->id); 1073 1074 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE; 1075 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED; 1076 if (qp->sig_type) 1077 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; 1078 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) 1079 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED; 1080 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf) 1081 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED; 1082 1083 req.qp_flags = cpu_to_le32(qp_flags); 1084 1085 /* ORRQ and IRRQ */ 1086 if (psn_sz) { 1087 xrrq = &qp->orrq; 1088 xrrq->max_elements = 1089 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 1090 req_size = xrrq->max_elements * 1091 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; 1092 req_size &= ~(PAGE_SIZE - 1); 1093 sginfo.pgsize = req_size; 1094 sginfo.pgshft = PAGE_SHIFT; 1095 1096 hwq_attr.res = res; 1097 hwq_attr.sginfo = &sginfo; 1098 hwq_attr.depth = xrrq->max_elements; 1099 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE; 1100 hwq_attr.aux_stride = 0; 1101 hwq_attr.aux_depth = 0; 1102 hwq_attr.type = HWQ_TYPE_CTX; 1103 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); 1104 if (rc) 1105 goto rq_swq; 1106 pbl = &xrrq->pbl[PBL_LVL_0]; 1107 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 1108 1109 xrrq = &qp->irrq; 1110 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS( 1111 qp->max_dest_rd_atomic); 1112 req_size = xrrq->max_elements * 1113 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; 1114 req_size &= ~(PAGE_SIZE - 1); 1115 sginfo.pgsize = req_size; 1116 hwq_attr.depth = xrrq->max_elements; 1117 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE; 1118 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); 1119 if (rc) 1120 goto fail_orrq; 1121 1122 pbl = &xrrq->pbl[PBL_LVL_0]; 1123 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 1124 } 1125 req.pd_id = cpu_to_le32(qp->pd->id); 1126 1127 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 1128 sizeof(resp), 0); 1129 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1130 if (rc) 1131 goto fail; 1132 1133 qp->id = le32_to_cpu(resp.xid); 1134 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 1135 INIT_LIST_HEAD(&qp->sq_flush); 1136 INIT_LIST_HEAD(&qp->rq_flush); 1137 qp->cctx = res->cctx; 1138 sq->dbinfo.hwq = &sq->hwq; 1139 sq->dbinfo.xid = qp->id; 1140 sq->dbinfo.db = qp->dpi->dbr; 1141 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode); 1142 if (rq->max_wqe) { 1143 rq->dbinfo.hwq = &rq->hwq; 1144 rq->dbinfo.xid = qp->id; 1145 rq->dbinfo.db = qp->dpi->dbr; 1146 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); 1147 } 1148 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 1149 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 1150 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; 1151 1152 return 0; 1153 fail: 1154 bnxt_qplib_free_hwq(res, &qp->irrq); 1155 fail_orrq: 1156 bnxt_qplib_free_hwq(res, &qp->orrq); 1157 rq_swq: 1158 kfree(rq->swq); 1159 fail_rq: 1160 bnxt_qplib_free_hwq(res, &rq->hwq); 1161 sq_swq: 1162 kfree(sq->swq); 1163 fail_sq: 1164 bnxt_qplib_free_hwq(res, &sq->hwq); 1165 return rc; 1166 } 1167 1168 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) 1169 { 1170 switch (qp->state) { 1171 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1172 /* INIT->RTR, configure the path_mtu to the default 1173 * 2048 if not being requested 1174 */ 1175 if (!(qp->modify_flags & 1176 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) { 1177 qp->modify_flags |= 1178 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1179 qp->path_mtu = 1180 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1181 } 1182 qp->modify_flags &= 1183 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; 1184 /* Bono FW require the max_dest_rd_atomic to be >= 1 */ 1185 if (qp->max_dest_rd_atomic < 1) 1186 qp->max_dest_rd_atomic = 1; 1187 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC; 1188 /* Bono FW 20.6.5 requires SGID_INDEX configuration */ 1189 if (!(qp->modify_flags & 1190 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) { 1191 qp->modify_flags |= 1192 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX; 1193 qp->ah.sgid_index = 0; 1194 } 1195 break; 1196 default: 1197 break; 1198 } 1199 } 1200 1201 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp) 1202 { 1203 switch (qp->state) { 1204 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1205 /* Bono FW requires the max_rd_atomic to be >= 1 */ 1206 if (qp->max_rd_atomic < 1) 1207 qp->max_rd_atomic = 1; 1208 /* Bono FW does not allow PKEY_INDEX, 1209 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT, 1210 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN, 1211 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID 1212 * modification 1213 */ 1214 qp->modify_flags &= 1215 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY | 1216 CMDQ_MODIFY_QP_MODIFY_MASK_DGID | 1217 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | 1218 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | 1219 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | 1220 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | 1221 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | 1222 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU | 1223 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN | 1224 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER | 1225 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC | 1226 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID); 1227 break; 1228 default: 1229 break; 1230 } 1231 } 1232 1233 static void __filter_modify_flags(struct bnxt_qplib_qp *qp) 1234 { 1235 switch (qp->cur_qp_state) { 1236 case CMDQ_MODIFY_QP_NEW_STATE_RESET: 1237 break; 1238 case CMDQ_MODIFY_QP_NEW_STATE_INIT: 1239 __modify_flags_from_init_state(qp); 1240 break; 1241 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1242 __modify_flags_from_rtr_state(qp); 1243 break; 1244 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1245 break; 1246 case CMDQ_MODIFY_QP_NEW_STATE_SQD: 1247 break; 1248 case CMDQ_MODIFY_QP_NEW_STATE_SQE: 1249 break; 1250 case CMDQ_MODIFY_QP_NEW_STATE_ERR: 1251 break; 1252 default: 1253 break; 1254 } 1255 } 1256 1257 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 1258 { 1259 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1260 struct creq_modify_qp_resp resp = {}; 1261 struct bnxt_qplib_cmdqmsg msg = {}; 1262 struct cmdq_modify_qp req = {}; 1263 u32 temp32[4]; 1264 u32 bmask; 1265 int rc; 1266 1267 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1268 CMDQ_BASE_OPCODE_MODIFY_QP, 1269 sizeof(req)); 1270 1271 /* Filter out the qp_attr_mask based on the state->new transition */ 1272 __filter_modify_flags(qp); 1273 bmask = qp->modify_flags; 1274 req.modify_mask = cpu_to_le32(qp->modify_flags); 1275 req.qp_cid = cpu_to_le32(qp->id); 1276 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { 1277 req.network_type_en_sqd_async_notify_new_state = 1278 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) | 1279 (qp->en_sqd_async_notify ? 1280 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0); 1281 } 1282 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type; 1283 1284 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) 1285 req.access = qp->access; 1286 1287 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) 1288 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); 1289 1290 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) 1291 req.qkey = cpu_to_le32(qp->qkey); 1292 1293 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) { 1294 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid)); 1295 req.dgid[0] = cpu_to_le32(temp32[0]); 1296 req.dgid[1] = cpu_to_le32(temp32[1]); 1297 req.dgid[2] = cpu_to_le32(temp32[2]); 1298 req.dgid[3] = cpu_to_le32(temp32[3]); 1299 } 1300 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) 1301 req.flow_label = cpu_to_le32(qp->ah.flow_label); 1302 1303 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) 1304 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id 1305 [qp->ah.sgid_index]); 1306 1307 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) 1308 req.hop_limit = qp->ah.hop_limit; 1309 1310 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) 1311 req.traffic_class = qp->ah.traffic_class; 1312 1313 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) 1314 memcpy(req.dest_mac, qp->ah.dmac, 6); 1315 1316 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) 1317 req.path_mtu_pingpong_push_enable |= qp->path_mtu; 1318 1319 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) 1320 req.timeout = qp->timeout; 1321 1322 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) 1323 req.retry_cnt = qp->retry_cnt; 1324 1325 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) 1326 req.rnr_retry = qp->rnr_retry; 1327 1328 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) 1329 req.min_rnr_timer = qp->min_rnr_timer; 1330 1331 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) 1332 req.rq_psn = cpu_to_le32(qp->rq.psn); 1333 1334 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) 1335 req.sq_psn = cpu_to_le32(qp->sq.psn); 1336 1337 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) 1338 req.max_rd_atomic = 1339 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 1340 1341 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) 1342 req.max_dest_rd_atomic = 1343 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic); 1344 1345 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements); 1346 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements); 1347 req.sq_sge = cpu_to_le16(qp->sq.max_sge); 1348 req.rq_sge = cpu_to_le16(qp->rq.max_sge); 1349 req.max_inline_data = cpu_to_le32(qp->max_inline_data); 1350 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID) 1351 req.dest_qp_id = cpu_to_le32(qp->dest_qpn); 1352 1353 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 1354 1355 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 1356 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1357 if (rc) 1358 return rc; 1359 qp->cur_qp_state = qp->state; 1360 return 0; 1361 } 1362 1363 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 1364 { 1365 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1366 struct creq_query_qp_resp resp = {}; 1367 struct bnxt_qplib_cmdqmsg msg = {}; 1368 struct bnxt_qplib_rcfw_sbuf *sbuf; 1369 struct creq_query_qp_resp_sb *sb; 1370 struct cmdq_query_qp req = {}; 1371 u32 temp32[4]; 1372 int i, rc = 0; 1373 1374 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1375 CMDQ_BASE_OPCODE_QUERY_QP, 1376 sizeof(req)); 1377 1378 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); 1379 if (!sbuf) 1380 return -ENOMEM; 1381 sb = sbuf->sb; 1382 1383 req.qp_cid = cpu_to_le32(qp->id); 1384 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 1385 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req), 1386 sizeof(resp), 0); 1387 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1388 if (rc) 1389 goto bail; 1390 /* Extract the context from the side buffer */ 1391 qp->state = sb->en_sqd_async_notify_state & 1392 CREQ_QUERY_QP_RESP_SB_STATE_MASK; 1393 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state & 1394 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ? 1395 true : false; 1396 qp->access = sb->access; 1397 qp->pkey_index = le16_to_cpu(sb->pkey); 1398 qp->qkey = le32_to_cpu(sb->qkey); 1399 1400 temp32[0] = le32_to_cpu(sb->dgid[0]); 1401 temp32[1] = le32_to_cpu(sb->dgid[1]); 1402 temp32[2] = le32_to_cpu(sb->dgid[2]); 1403 temp32[3] = le32_to_cpu(sb->dgid[3]); 1404 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data)); 1405 1406 qp->ah.flow_label = le32_to_cpu(sb->flow_label); 1407 1408 qp->ah.sgid_index = 0; 1409 for (i = 0; i < res->sgid_tbl.max; i++) { 1410 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) { 1411 qp->ah.sgid_index = i; 1412 break; 1413 } 1414 } 1415 if (i == res->sgid_tbl.max) 1416 dev_warn(&res->pdev->dev, "SGID not found??\n"); 1417 1418 qp->ah.hop_limit = sb->hop_limit; 1419 qp->ah.traffic_class = sb->traffic_class; 1420 memcpy(qp->ah.dmac, sb->dest_mac, 6); 1421 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 1422 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >> 1423 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT; 1424 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 1425 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >> 1426 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT; 1427 qp->timeout = sb->timeout; 1428 qp->retry_cnt = sb->retry_cnt; 1429 qp->rnr_retry = sb->rnr_retry; 1430 qp->min_rnr_timer = sb->min_rnr_timer; 1431 qp->rq.psn = le32_to_cpu(sb->rq_psn); 1432 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic); 1433 qp->sq.psn = le32_to_cpu(sb->sq_psn); 1434 qp->max_dest_rd_atomic = 1435 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic); 1436 qp->sq.max_wqe = qp->sq.hwq.max_elements; 1437 qp->rq.max_wqe = qp->rq.hwq.max_elements; 1438 qp->sq.max_sge = le16_to_cpu(sb->sq_sge); 1439 qp->rq.max_sge = le16_to_cpu(sb->rq_sge); 1440 qp->max_inline_data = le32_to_cpu(sb->max_inline_data); 1441 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 1442 memcpy(qp->smac, sb->src_mac, 6); 1443 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 1444 bail: 1445 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 1446 return rc; 1447 } 1448 1449 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) 1450 { 1451 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; 1452 struct cq_base *hw_cqe; 1453 int i; 1454 1455 for (i = 0; i < cq_hwq->max_elements; i++) { 1456 hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL); 1457 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements)) 1458 continue; 1459 /* 1460 * The valid test of the entry must be done first before 1461 * reading any further. 1462 */ 1463 dma_rmb(); 1464 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { 1465 case CQ_BASE_CQE_TYPE_REQ: 1466 case CQ_BASE_CQE_TYPE_TERMINAL: 1467 { 1468 struct cq_req *cqe = (struct cq_req *)hw_cqe; 1469 1470 if (qp == le64_to_cpu(cqe->qp_handle)) 1471 cqe->qp_handle = 0; 1472 break; 1473 } 1474 case CQ_BASE_CQE_TYPE_RES_RC: 1475 case CQ_BASE_CQE_TYPE_RES_UD: 1476 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 1477 { 1478 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; 1479 1480 if (qp == le64_to_cpu(cqe->qp_handle)) 1481 cqe->qp_handle = 0; 1482 break; 1483 } 1484 default: 1485 break; 1486 } 1487 } 1488 } 1489 1490 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, 1491 struct bnxt_qplib_qp *qp) 1492 { 1493 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1494 struct creq_destroy_qp_resp resp = {}; 1495 struct bnxt_qplib_cmdqmsg msg = {}; 1496 struct cmdq_destroy_qp req = {}; 1497 u32 tbl_indx; 1498 int rc; 1499 1500 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 1501 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; 1502 rcfw->qp_tbl[tbl_indx].qp_handle = NULL; 1503 1504 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1505 CMDQ_BASE_OPCODE_DESTROY_QP, 1506 sizeof(req)); 1507 1508 req.qp_cid = cpu_to_le32(qp->id); 1509 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 1510 sizeof(resp), 0); 1511 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1512 if (rc) { 1513 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 1514 rcfw->qp_tbl[tbl_indx].qp_handle = qp; 1515 return rc; 1516 } 1517 1518 return 0; 1519 } 1520 1521 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, 1522 struct bnxt_qplib_qp *qp) 1523 { 1524 bnxt_qplib_free_qp_hdr_buf(res, qp); 1525 bnxt_qplib_free_hwq(res, &qp->sq.hwq); 1526 kfree(qp->sq.swq); 1527 1528 bnxt_qplib_free_hwq(res, &qp->rq.hwq); 1529 kfree(qp->rq.swq); 1530 1531 if (qp->irrq.max_elements) 1532 bnxt_qplib_free_hwq(res, &qp->irrq); 1533 if (qp->orrq.max_elements) 1534 bnxt_qplib_free_hwq(res, &qp->orrq); 1535 1536 } 1537 1538 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 1539 struct bnxt_qplib_sge *sge) 1540 { 1541 struct bnxt_qplib_q *sq = &qp->sq; 1542 u32 sw_prod; 1543 1544 memset(sge, 0, sizeof(*sge)); 1545 1546 if (qp->sq_hdr_buf) { 1547 sw_prod = sq->swq_start; 1548 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map + 1549 sw_prod * qp->sq_hdr_buf_size); 1550 sge->lkey = 0xFFFFFFFF; 1551 sge->size = qp->sq_hdr_buf_size; 1552 return qp->sq_hdr_buf + sw_prod * sge->size; 1553 } 1554 return NULL; 1555 } 1556 1557 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp) 1558 { 1559 struct bnxt_qplib_q *rq = &qp->rq; 1560 1561 return rq->swq_start; 1562 } 1563 1564 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index) 1565 { 1566 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size); 1567 } 1568 1569 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 1570 struct bnxt_qplib_sge *sge) 1571 { 1572 struct bnxt_qplib_q *rq = &qp->rq; 1573 u32 sw_prod; 1574 1575 memset(sge, 0, sizeof(*sge)); 1576 1577 if (qp->rq_hdr_buf) { 1578 sw_prod = rq->swq_start; 1579 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map + 1580 sw_prod * qp->rq_hdr_buf_size); 1581 sge->lkey = 0xFFFFFFFF; 1582 sge->size = qp->rq_hdr_buf_size; 1583 return qp->rq_hdr_buf + sw_prod * sge->size; 1584 } 1585 return NULL; 1586 } 1587 1588 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, 1589 struct bnxt_qplib_swqe *wqe, 1590 struct bnxt_qplib_swq *swq) 1591 { 1592 struct sq_psn_search_ext *psns_ext; 1593 struct sq_psn_search *psns; 1594 u32 flg_npsn; 1595 u32 op_spsn; 1596 1597 if (!swq->psn_search) 1598 return; 1599 psns = swq->psn_search; 1600 psns_ext = swq->psn_ext; 1601 1602 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) & 1603 SQ_PSN_SEARCH_START_PSN_MASK); 1604 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) & 1605 SQ_PSN_SEARCH_OPCODE_MASK); 1606 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) & 1607 SQ_PSN_SEARCH_NEXT_PSN_MASK); 1608 1609 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) { 1610 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn); 1611 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn); 1612 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx); 1613 } else { 1614 psns->opcode_start_psn = cpu_to_le32(op_spsn); 1615 psns->flags_next_psn = cpu_to_le32(flg_npsn); 1616 } 1617 } 1618 1619 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp, 1620 struct bnxt_qplib_swqe *wqe, 1621 u16 *idx) 1622 { 1623 struct bnxt_qplib_hwq *hwq; 1624 int len, t_len, offt; 1625 bool pull_dst = true; 1626 void *il_dst = NULL; 1627 void *il_src = NULL; 1628 int t_cplen, cplen; 1629 int indx; 1630 1631 hwq = &qp->sq.hwq; 1632 t_len = 0; 1633 for (indx = 0; indx < wqe->num_sge; indx++) { 1634 len = wqe->sg_list[indx].size; 1635 il_src = (void *)wqe->sg_list[indx].addr; 1636 t_len += len; 1637 if (t_len > qp->max_inline_data) 1638 return -ENOMEM; 1639 while (len) { 1640 if (pull_dst) { 1641 pull_dst = false; 1642 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx); 1643 (*idx)++; 1644 t_cplen = 0; 1645 offt = 0; 1646 } 1647 cplen = min_t(int, len, sizeof(struct sq_sge)); 1648 cplen = min_t(int, cplen, 1649 (sizeof(struct sq_sge) - offt)); 1650 memcpy(il_dst, il_src, cplen); 1651 t_cplen += cplen; 1652 il_src += cplen; 1653 il_dst += cplen; 1654 offt += cplen; 1655 len -= cplen; 1656 if (t_cplen == sizeof(struct sq_sge)) 1657 pull_dst = true; 1658 } 1659 } 1660 1661 return t_len; 1662 } 1663 1664 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq, 1665 struct bnxt_qplib_sge *ssge, 1666 u16 nsge, u16 *idx) 1667 { 1668 struct sq_sge *dsge; 1669 int indx, len = 0; 1670 1671 for (indx = 0; indx < nsge; indx++, (*idx)++) { 1672 dsge = bnxt_qplib_get_prod_qe(hwq, *idx); 1673 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr); 1674 dsge->l_key = cpu_to_le32(ssge[indx].lkey); 1675 dsge->size = cpu_to_le32(ssge[indx].size); 1676 len += ssge[indx].size; 1677 } 1678 1679 return len; 1680 } 1681 1682 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp, 1683 struct bnxt_qplib_swqe *wqe, 1684 u16 *wqe_sz, u16 *qdf, u8 mode) 1685 { 1686 u32 ilsize, bytes; 1687 u16 nsge; 1688 u16 slot; 1689 1690 nsge = wqe->num_sge; 1691 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */ 1692 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge); 1693 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) { 1694 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data); 1695 bytes = ALIGN(ilsize, sizeof(struct sq_sge)); 1696 bytes += sizeof(struct sq_send_hdr); 1697 } 1698 1699 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes); 1700 slot = bytes >> 4; 1701 *wqe_sz = slot; 1702 if (mode == BNXT_QPLIB_WQE_MODE_STATIC) 1703 slot = 8; 1704 return slot; 1705 } 1706 1707 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq, 1708 struct bnxt_qplib_swq *swq) 1709 { 1710 struct bnxt_qplib_hwq *hwq; 1711 u32 pg_num, pg_indx; 1712 void *buff; 1713 u32 tail; 1714 1715 hwq = &sq->hwq; 1716 if (!hwq->pad_pg) 1717 return; 1718 tail = swq->slot_idx / sq->dbinfo.max_slot; 1719 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride); 1720 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride); 1721 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride); 1722 swq->psn_ext = buff; 1723 swq->psn_search = buff; 1724 } 1725 1726 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) 1727 { 1728 struct bnxt_qplib_q *sq = &qp->sq; 1729 1730 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ); 1731 } 1732 1733 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 1734 struct bnxt_qplib_swqe *wqe) 1735 { 1736 struct bnxt_qplib_nq_work *nq_work = NULL; 1737 int i, rc = 0, data_len = 0, pkt_num = 0; 1738 struct bnxt_qplib_q *sq = &qp->sq; 1739 struct bnxt_qplib_hwq *hwq; 1740 struct bnxt_qplib_swq *swq; 1741 bool sch_handler = false; 1742 u16 wqe_sz, qdf = 0; 1743 void *base_hdr; 1744 void *ext_hdr; 1745 __le32 temp32; 1746 u32 wqe_idx; 1747 u32 slots; 1748 u16 idx; 1749 1750 hwq = &sq->hwq; 1751 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS && 1752 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1753 dev_err(&hwq->pdev->dev, 1754 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 1755 qp->id, qp->state); 1756 rc = -EINVAL; 1757 goto done; 1758 } 1759 1760 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode); 1761 if (bnxt_qplib_queue_full(sq, slots + qdf)) { 1762 dev_err(&hwq->pdev->dev, 1763 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n", 1764 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta); 1765 rc = -ENOMEM; 1766 goto done; 1767 } 1768 1769 swq = bnxt_qplib_get_swqe(sq, &wqe_idx); 1770 bnxt_qplib_pull_psn_buff(sq, swq); 1771 1772 idx = 0; 1773 swq->slot_idx = hwq->prod; 1774 swq->slots = slots; 1775 swq->wr_id = wqe->wr_id; 1776 swq->type = wqe->type; 1777 swq->flags = wqe->flags; 1778 swq->start_psn = sq->psn & BTH_PSN_MASK; 1779 if (qp->sig_type) 1780 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP; 1781 1782 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1783 sch_handler = true; 1784 dev_dbg(&hwq->pdev->dev, 1785 "%s Error QP. Scheduling for poll_cq\n", __func__); 1786 goto queue_err; 1787 } 1788 1789 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 1790 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 1791 memset(base_hdr, 0, sizeof(struct sq_sge)); 1792 memset(ext_hdr, 0, sizeof(struct sq_sge)); 1793 1794 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) 1795 /* Copy the inline data */ 1796 data_len = bnxt_qplib_put_inline(qp, wqe, &idx); 1797 else 1798 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, 1799 &idx); 1800 if (data_len < 0) 1801 goto queue_err; 1802 /* Specifics */ 1803 switch (wqe->type) { 1804 case BNXT_QPLIB_SWQE_TYPE_SEND: 1805 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) { 1806 struct sq_send_raweth_qp1_hdr *sqe = base_hdr; 1807 struct sq_raw_ext_hdr *ext_sqe = ext_hdr; 1808 /* Assemble info for Raw Ethertype QPs */ 1809 1810 sqe->wqe_type = wqe->type; 1811 sqe->flags = wqe->flags; 1812 sqe->wqe_size = wqe_sz; 1813 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); 1814 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); 1815 sqe->length = cpu_to_le32(data_len); 1816 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & 1817 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) << 1818 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT); 1819 1820 break; 1821 } 1822 fallthrough; 1823 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: 1824 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: 1825 { 1826 struct sq_ud_ext_hdr *ext_sqe = ext_hdr; 1827 struct sq_send_hdr *sqe = base_hdr; 1828 1829 sqe->wqe_type = wqe->type; 1830 sqe->flags = wqe->flags; 1831 sqe->wqe_size = wqe_sz; 1832 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key); 1833 if (qp->type == CMDQ_CREATE_QP_TYPE_UD || 1834 qp->type == CMDQ_CREATE_QP_TYPE_GSI) { 1835 sqe->q_key = cpu_to_le32(wqe->send.q_key); 1836 sqe->length = cpu_to_le32(data_len); 1837 sq->psn = (sq->psn + 1) & BTH_PSN_MASK; 1838 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp & 1839 SQ_SEND_DST_QP_MASK); 1840 ext_sqe->avid = cpu_to_le32(wqe->send.avid & 1841 SQ_SEND_AVID_MASK); 1842 } else { 1843 sqe->length = cpu_to_le32(data_len); 1844 if (qp->mtu) 1845 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1846 if (!pkt_num) 1847 pkt_num = 1; 1848 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1849 } 1850 break; 1851 } 1852 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: 1853 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: 1854 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: 1855 { 1856 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr; 1857 struct sq_rdma_hdr *sqe = base_hdr; 1858 1859 sqe->wqe_type = wqe->type; 1860 sqe->flags = wqe->flags; 1861 sqe->wqe_size = wqe_sz; 1862 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); 1863 sqe->length = cpu_to_le32((u32)data_len); 1864 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); 1865 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); 1866 if (qp->mtu) 1867 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1868 if (!pkt_num) 1869 pkt_num = 1; 1870 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1871 break; 1872 } 1873 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: 1874 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: 1875 { 1876 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr; 1877 struct sq_atomic_hdr *sqe = base_hdr; 1878 1879 sqe->wqe_type = wqe->type; 1880 sqe->flags = wqe->flags; 1881 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key); 1882 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va); 1883 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data); 1884 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data); 1885 if (qp->mtu) 1886 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1887 if (!pkt_num) 1888 pkt_num = 1; 1889 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1890 break; 1891 } 1892 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: 1893 { 1894 struct sq_localinvalidate *sqe = base_hdr; 1895 1896 sqe->wqe_type = wqe->type; 1897 sqe->flags = wqe->flags; 1898 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key); 1899 1900 break; 1901 } 1902 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR: 1903 { 1904 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr; 1905 struct sq_fr_pmr_hdr *sqe = base_hdr; 1906 1907 sqe->wqe_type = wqe->type; 1908 sqe->flags = wqe->flags; 1909 sqe->access_cntl = wqe->frmr.access_cntl | 1910 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; 1911 sqe->zero_based_page_size_log = 1912 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) << 1913 SQ_FR_PMR_PAGE_SIZE_LOG_SFT | 1914 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0); 1915 sqe->l_key = cpu_to_le32(wqe->frmr.l_key); 1916 temp32 = cpu_to_le32(wqe->frmr.length); 1917 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length)); 1918 sqe->numlevels_pbl_page_size_log = 1919 ((wqe->frmr.pbl_pg_sz_log << 1920 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) & 1921 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) | 1922 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) & 1923 SQ_FR_PMR_NUMLEVELS_MASK); 1924 1925 for (i = 0; i < wqe->frmr.page_list_len; i++) 1926 wqe->frmr.pbl_ptr[i] = cpu_to_le64( 1927 wqe->frmr.page_list[i] | 1928 PTU_PTE_VALID); 1929 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr); 1930 ext_sqe->va = cpu_to_le64(wqe->frmr.va); 1931 1932 break; 1933 } 1934 case BNXT_QPLIB_SWQE_TYPE_BIND_MW: 1935 { 1936 struct sq_bind_ext_hdr *ext_sqe = ext_hdr; 1937 struct sq_bind_hdr *sqe = base_hdr; 1938 1939 sqe->wqe_type = wqe->type; 1940 sqe->flags = wqe->flags; 1941 sqe->access_cntl = wqe->bind.access_cntl; 1942 sqe->mw_type_zero_based = wqe->bind.mw_type | 1943 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0); 1944 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key); 1945 sqe->l_key = cpu_to_le32(wqe->bind.r_key); 1946 ext_sqe->va = cpu_to_le64(wqe->bind.va); 1947 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length); 1948 break; 1949 } 1950 default: 1951 /* Bad wqe, return error */ 1952 rc = -EINVAL; 1953 goto done; 1954 } 1955 swq->next_psn = sq->psn & BTH_PSN_MASK; 1956 bnxt_qplib_fill_psn_search(qp, wqe, swq); 1957 queue_err: 1958 bnxt_qplib_swq_mod_start(sq, wqe_idx); 1959 bnxt_qplib_hwq_incr_prod(hwq, swq->slots); 1960 qp->wqe_cnt++; 1961 done: 1962 if (sch_handler) { 1963 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); 1964 if (nq_work) { 1965 nq_work->cq = qp->scq; 1966 nq_work->nq = qp->scq->nq; 1967 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); 1968 queue_work(qp->scq->nq->cqn_wq, &nq_work->work); 1969 } else { 1970 dev_err(&hwq->pdev->dev, 1971 "FP: Failed to allocate SQ nq_work!\n"); 1972 rc = -ENOMEM; 1973 } 1974 } 1975 return rc; 1976 } 1977 1978 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) 1979 { 1980 struct bnxt_qplib_q *rq = &qp->rq; 1981 1982 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ); 1983 } 1984 1985 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 1986 struct bnxt_qplib_swqe *wqe) 1987 { 1988 struct bnxt_qplib_nq_work *nq_work = NULL; 1989 struct bnxt_qplib_q *rq = &qp->rq; 1990 struct rq_wqe_hdr *base_hdr; 1991 struct rq_ext_hdr *ext_hdr; 1992 struct bnxt_qplib_hwq *hwq; 1993 struct bnxt_qplib_swq *swq; 1994 bool sch_handler = false; 1995 u16 wqe_sz, idx; 1996 u32 wqe_idx; 1997 int rc = 0; 1998 1999 hwq = &rq->hwq; 2000 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 2001 dev_err(&hwq->pdev->dev, 2002 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 2003 qp->id, qp->state); 2004 rc = -EINVAL; 2005 goto done; 2006 } 2007 2008 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) { 2009 dev_err(&hwq->pdev->dev, 2010 "FP: QP (0x%x) RQ is full!\n", qp->id); 2011 rc = -EINVAL; 2012 goto done; 2013 } 2014 2015 swq = bnxt_qplib_get_swqe(rq, &wqe_idx); 2016 swq->wr_id = wqe->wr_id; 2017 swq->slots = rq->dbinfo.max_slot; 2018 2019 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 2020 sch_handler = true; 2021 dev_dbg(&hwq->pdev->dev, 2022 "%s: Error QP. Scheduling for poll_cq\n", __func__); 2023 goto queue_err; 2024 } 2025 2026 idx = 0; 2027 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 2028 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 2029 memset(base_hdr, 0, sizeof(struct sq_sge)); 2030 memset(ext_hdr, 0, sizeof(struct sq_sge)); 2031 wqe_sz = (sizeof(struct rq_wqe_hdr) + 2032 wqe->num_sge * sizeof(struct sq_sge)) >> 4; 2033 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx); 2034 if (!wqe->num_sge) { 2035 struct sq_sge *sge; 2036 2037 sge = bnxt_qplib_get_prod_qe(hwq, idx++); 2038 sge->size = 0; 2039 wqe_sz++; 2040 } 2041 base_hdr->wqe_type = wqe->type; 2042 base_hdr->flags = wqe->flags; 2043 base_hdr->wqe_size = wqe_sz; 2044 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx); 2045 queue_err: 2046 bnxt_qplib_swq_mod_start(rq, wqe_idx); 2047 bnxt_qplib_hwq_incr_prod(hwq, swq->slots); 2048 done: 2049 if (sch_handler) { 2050 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); 2051 if (nq_work) { 2052 nq_work->cq = qp->rcq; 2053 nq_work->nq = qp->rcq->nq; 2054 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); 2055 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work); 2056 } else { 2057 dev_err(&hwq->pdev->dev, 2058 "FP: Failed to allocate RQ nq_work!\n"); 2059 rc = -ENOMEM; 2060 } 2061 } 2062 2063 return rc; 2064 } 2065 2066 /* CQ */ 2067 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 2068 { 2069 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2070 struct bnxt_qplib_hwq_attr hwq_attr = {}; 2071 struct creq_create_cq_resp resp = {}; 2072 struct bnxt_qplib_cmdqmsg msg = {}; 2073 struct cmdq_create_cq req = {}; 2074 struct bnxt_qplib_pbl *pbl; 2075 u32 pg_sz_lvl; 2076 int rc; 2077 2078 if (!cq->dpi) { 2079 dev_err(&rcfw->pdev->dev, 2080 "FP: CREATE_CQ failed due to NULL DPI\n"); 2081 return -EINVAL; 2082 } 2083 2084 hwq_attr.res = res; 2085 hwq_attr.depth = cq->max_wqe; 2086 hwq_attr.stride = sizeof(struct cq_base); 2087 hwq_attr.type = HWQ_TYPE_QUEUE; 2088 hwq_attr.sginfo = &cq->sg_info; 2089 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr); 2090 if (rc) 2091 return rc; 2092 2093 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2094 CMDQ_BASE_OPCODE_CREATE_CQ, 2095 sizeof(req)); 2096 2097 req.dpi = cpu_to_le32(cq->dpi->dpi); 2098 req.cq_handle = cpu_to_le64(cq->cq_handle); 2099 req.cq_size = cpu_to_le32(cq->hwq.max_elements); 2100 pbl = &cq->hwq.pbl[PBL_LVL_0]; 2101 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) << 2102 CMDQ_CREATE_CQ_PG_SIZE_SFT); 2103 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK); 2104 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl); 2105 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 2106 req.cq_fco_cnq_id = cpu_to_le32( 2107 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 2108 CMDQ_CREATE_CQ_CNQ_ID_SFT); 2109 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2110 sizeof(resp), 0); 2111 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2112 if (rc) 2113 goto fail; 2114 2115 cq->id = le32_to_cpu(resp.xid); 2116 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 2117 init_waitqueue_head(&cq->waitq); 2118 INIT_LIST_HEAD(&cq->sqf_head); 2119 INIT_LIST_HEAD(&cq->rqf_head); 2120 spin_lock_init(&cq->compl_lock); 2121 spin_lock_init(&cq->flush_lock); 2122 2123 cq->dbinfo.hwq = &cq->hwq; 2124 cq->dbinfo.xid = cq->id; 2125 cq->dbinfo.db = cq->dpi->dbr; 2126 cq->dbinfo.priv_db = res->dpi_tbl.priv_db; 2127 2128 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); 2129 2130 return 0; 2131 2132 fail: 2133 bnxt_qplib_free_hwq(res, &cq->hwq); 2134 return rc; 2135 } 2136 2137 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, 2138 struct bnxt_qplib_cq *cq) 2139 { 2140 bnxt_qplib_free_hwq(res, &cq->hwq); 2141 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq)); 2142 } 2143 2144 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, 2145 int new_cqes) 2146 { 2147 struct bnxt_qplib_hwq_attr hwq_attr = {}; 2148 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2149 struct creq_resize_cq_resp resp = {}; 2150 struct bnxt_qplib_cmdqmsg msg = {}; 2151 struct cmdq_resize_cq req = {}; 2152 struct bnxt_qplib_pbl *pbl; 2153 u32 pg_sz, lvl, new_sz; 2154 int rc; 2155 2156 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2157 CMDQ_BASE_OPCODE_RESIZE_CQ, 2158 sizeof(req)); 2159 hwq_attr.sginfo = &cq->sg_info; 2160 hwq_attr.res = res; 2161 hwq_attr.depth = new_cqes; 2162 hwq_attr.stride = sizeof(struct cq_base); 2163 hwq_attr.type = HWQ_TYPE_QUEUE; 2164 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr); 2165 if (rc) 2166 return rc; 2167 2168 req.cq_cid = cpu_to_le32(cq->id); 2169 pbl = &cq->resize_hwq.pbl[PBL_LVL_0]; 2170 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq); 2171 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) & 2172 CMDQ_RESIZE_CQ_LVL_MASK; 2173 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) & 2174 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK; 2175 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl); 2176 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 2177 2178 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2179 sizeof(resp), 0); 2180 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2181 return rc; 2182 } 2183 2184 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 2185 { 2186 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2187 struct creq_destroy_cq_resp resp = {}; 2188 struct bnxt_qplib_cmdqmsg msg = {}; 2189 struct cmdq_destroy_cq req = {}; 2190 u16 total_cnq_events; 2191 int rc; 2192 2193 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2194 CMDQ_BASE_OPCODE_DESTROY_CQ, 2195 sizeof(req)); 2196 2197 req.cq_cid = cpu_to_le32(cq->id); 2198 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2199 sizeof(resp), 0); 2200 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2201 if (rc) 2202 return rc; 2203 total_cnq_events = le16_to_cpu(resp.total_cnq_events); 2204 __wait_for_all_nqes(cq, total_cnq_events); 2205 bnxt_qplib_free_hwq(res, &cq->hwq); 2206 return 0; 2207 } 2208 2209 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, 2210 struct bnxt_qplib_cqe **pcqe, int *budget) 2211 { 2212 struct bnxt_qplib_cqe *cqe; 2213 u32 start, last; 2214 int rc = 0; 2215 2216 /* Now complete all outstanding SQEs with FLUSHED_ERR */ 2217 start = sq->swq_start; 2218 cqe = *pcqe; 2219 while (*budget) { 2220 last = sq->swq_last; 2221 if (start == last) 2222 break; 2223 /* Skip the FENCE WQE completions */ 2224 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) { 2225 bnxt_qplib_cancel_phantom_processing(qp); 2226 goto skip_compl; 2227 } 2228 memset(cqe, 0, sizeof(*cqe)); 2229 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; 2230 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2231 cqe->qp_handle = (u64)(unsigned long)qp; 2232 cqe->wr_id = sq->swq[last].wr_id; 2233 cqe->src_qp = qp->id; 2234 cqe->type = sq->swq[last].type; 2235 cqe++; 2236 (*budget)--; 2237 skip_compl: 2238 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots); 2239 sq->swq_last = sq->swq[last].next_idx; 2240 } 2241 *pcqe = cqe; 2242 if (!(*budget) && sq->swq_last != start) 2243 /* Out of budget */ 2244 rc = -EAGAIN; 2245 2246 return rc; 2247 } 2248 2249 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, 2250 struct bnxt_qplib_cqe **pcqe, int *budget) 2251 { 2252 struct bnxt_qplib_cqe *cqe; 2253 u32 start, last; 2254 int opcode = 0; 2255 int rc = 0; 2256 2257 switch (qp->type) { 2258 case CMDQ_CREATE_QP1_TYPE_GSI: 2259 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1; 2260 break; 2261 case CMDQ_CREATE_QP_TYPE_RC: 2262 opcode = CQ_BASE_CQE_TYPE_RES_RC; 2263 break; 2264 case CMDQ_CREATE_QP_TYPE_UD: 2265 case CMDQ_CREATE_QP_TYPE_GSI: 2266 opcode = CQ_BASE_CQE_TYPE_RES_UD; 2267 break; 2268 } 2269 2270 /* Flush the rest of the RQ */ 2271 start = rq->swq_start; 2272 cqe = *pcqe; 2273 while (*budget) { 2274 last = rq->swq_last; 2275 if (last == start) 2276 break; 2277 memset(cqe, 0, sizeof(*cqe)); 2278 cqe->status = 2279 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR; 2280 cqe->opcode = opcode; 2281 cqe->qp_handle = (unsigned long)qp; 2282 cqe->wr_id = rq->swq[last].wr_id; 2283 cqe++; 2284 (*budget)--; 2285 bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots); 2286 rq->swq_last = rq->swq[last].next_idx; 2287 } 2288 *pcqe = cqe; 2289 if (!*budget && rq->swq_last != start) 2290 /* Out of budget */ 2291 rc = -EAGAIN; 2292 2293 return rc; 2294 } 2295 2296 void bnxt_qplib_mark_qp_error(void *qp_handle) 2297 { 2298 struct bnxt_qplib_qp *qp = qp_handle; 2299 2300 if (!qp) 2301 return; 2302 2303 /* Must block new posting of SQ and RQ */ 2304 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2305 bnxt_qplib_cancel_phantom_processing(qp); 2306 } 2307 2308 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2309 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 2310 */ 2311 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, 2312 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons) 2313 { 2314 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; 2315 struct bnxt_qplib_q *sq = &qp->sq; 2316 struct cq_req *peek_req_hwcqe; 2317 struct bnxt_qplib_qp *peek_qp; 2318 struct bnxt_qplib_q *peek_sq; 2319 struct bnxt_qplib_swq *swq; 2320 struct cq_base *peek_hwcqe; 2321 int i, rc = 0; 2322 2323 /* Normal mode */ 2324 /* Check for the psn_search marking before completing */ 2325 swq = &sq->swq[swq_last]; 2326 if (swq->psn_search && 2327 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { 2328 /* Unmark */ 2329 swq->psn_search->flags_next_psn = cpu_to_le32 2330 (le32_to_cpu(swq->psn_search->flags_next_psn) 2331 & ~0x80000000); 2332 dev_dbg(&cq->hwq.pdev->dev, 2333 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", 2334 cq_cons, qp->id, swq_last, cqe_sq_cons); 2335 sq->condition = true; 2336 sq->send_phantom = true; 2337 2338 /* TODO: Only ARM if the previous SQE is ARMALL */ 2339 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL); 2340 rc = -EAGAIN; 2341 goto out; 2342 } 2343 if (sq->condition) { 2344 /* Peek at the completions */ 2345 peek_raw_cq_cons = cq->hwq.cons; 2346 peek_sw_cq_cons = cq_cons; 2347 i = cq->hwq.max_elements; 2348 while (i--) { 2349 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); 2350 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq, 2351 peek_sw_cq_cons, NULL); 2352 /* If the next hwcqe is VALID */ 2353 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, 2354 cq->hwq.max_elements)) { 2355 /* 2356 * The valid test of the entry must be done first before 2357 * reading any further. 2358 */ 2359 dma_rmb(); 2360 /* If the next hwcqe is a REQ */ 2361 if ((peek_hwcqe->cqe_type_toggle & 2362 CQ_BASE_CQE_TYPE_MASK) == 2363 CQ_BASE_CQE_TYPE_REQ) { 2364 peek_req_hwcqe = (struct cq_req *) 2365 peek_hwcqe; 2366 peek_qp = (struct bnxt_qplib_qp *) 2367 ((unsigned long) 2368 le64_to_cpu 2369 (peek_req_hwcqe->qp_handle)); 2370 peek_sq = &peek_qp->sq; 2371 peek_sq_cons_idx = 2372 ((le16_to_cpu( 2373 peek_req_hwcqe->sq_cons_idx) 2374 - 1) % sq->max_wqe); 2375 /* If the hwcqe's sq's wr_id matches */ 2376 if (peek_sq == sq && 2377 sq->swq[peek_sq_cons_idx].wr_id == 2378 BNXT_QPLIB_FENCE_WRID) { 2379 /* 2380 * Unbreak only if the phantom 2381 * comes back 2382 */ 2383 dev_dbg(&cq->hwq.pdev->dev, 2384 "FP: Got Phantom CQE\n"); 2385 sq->condition = false; 2386 sq->single = true; 2387 rc = 0; 2388 goto out; 2389 } 2390 } 2391 /* Valid but not the phantom, so keep looping */ 2392 } else { 2393 /* Not valid yet, just exit and wait */ 2394 rc = -EINVAL; 2395 goto out; 2396 } 2397 peek_sw_cq_cons++; 2398 peek_raw_cq_cons++; 2399 } 2400 dev_err(&cq->hwq.pdev->dev, 2401 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n", 2402 cq_cons, qp->id, swq_last, cqe_sq_cons); 2403 rc = -EINVAL; 2404 } 2405 out: 2406 return rc; 2407 } 2408 2409 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 2410 struct cq_req *hwcqe, 2411 struct bnxt_qplib_cqe **pcqe, int *budget, 2412 u32 cq_cons, struct bnxt_qplib_qp **lib_qp) 2413 { 2414 struct bnxt_qplib_swq *swq; 2415 struct bnxt_qplib_cqe *cqe; 2416 struct bnxt_qplib_qp *qp; 2417 struct bnxt_qplib_q *sq; 2418 u32 cqe_sq_cons; 2419 int rc = 0; 2420 2421 qp = (struct bnxt_qplib_qp *)((unsigned long) 2422 le64_to_cpu(hwcqe->qp_handle)); 2423 if (!qp) { 2424 dev_err(&cq->hwq.pdev->dev, 2425 "FP: Process Req qp is NULL\n"); 2426 return -EINVAL; 2427 } 2428 sq = &qp->sq; 2429 2430 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe; 2431 if (qp->sq.flushed) { 2432 dev_dbg(&cq->hwq.pdev->dev, 2433 "%s: QP in Flush QP = %p\n", __func__, qp); 2434 goto done; 2435 } 2436 /* Require to walk the sq's swq to fabricate CQEs for all previously 2437 * signaled SWQEs due to CQE aggregation from the current sq cons 2438 * to the cqe_sq_cons 2439 */ 2440 cqe = *pcqe; 2441 while (*budget) { 2442 if (sq->swq_last == cqe_sq_cons) 2443 /* Done */ 2444 break; 2445 2446 swq = &sq->swq[sq->swq_last]; 2447 memset(cqe, 0, sizeof(*cqe)); 2448 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2449 cqe->qp_handle = (u64)(unsigned long)qp; 2450 cqe->src_qp = qp->id; 2451 cqe->wr_id = swq->wr_id; 2452 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) 2453 goto skip; 2454 cqe->type = swq->type; 2455 2456 /* For the last CQE, check for status. For errors, regardless 2457 * of the request being signaled or not, it must complete with 2458 * the hwcqe error status 2459 */ 2460 if (swq->next_idx == cqe_sq_cons && 2461 hwcqe->status != CQ_REQ_STATUS_OK) { 2462 cqe->status = hwcqe->status; 2463 dev_err(&cq->hwq.pdev->dev, 2464 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n", 2465 sq->swq_last, cqe->wr_id, cqe->status); 2466 cqe++; 2467 (*budget)--; 2468 bnxt_qplib_mark_qp_error(qp); 2469 /* Add qp to flush list of the CQ */ 2470 bnxt_qplib_add_flush_qp(qp); 2471 } else { 2472 /* Before we complete, do WA 9060 */ 2473 if (do_wa9060(qp, cq, cq_cons, sq->swq_last, 2474 cqe_sq_cons)) { 2475 *lib_qp = qp; 2476 goto out; 2477 } 2478 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2479 cqe->status = CQ_REQ_STATUS_OK; 2480 cqe++; 2481 (*budget)--; 2482 } 2483 } 2484 skip: 2485 bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots); 2486 sq->swq_last = swq->next_idx; 2487 if (sq->single) 2488 break; 2489 } 2490 out: 2491 *pcqe = cqe; 2492 if (sq->swq_last != cqe_sq_cons) { 2493 /* Out of budget */ 2494 rc = -EAGAIN; 2495 goto done; 2496 } 2497 /* 2498 * Back to normal completion mode only after it has completed all of 2499 * the WC for this CQE 2500 */ 2501 sq->single = false; 2502 done: 2503 return rc; 2504 } 2505 2506 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag) 2507 { 2508 spin_lock(&srq->hwq.lock); 2509 srq->swq[srq->last_idx].next_idx = (int)tag; 2510 srq->last_idx = (int)tag; 2511 srq->swq[srq->last_idx].next_idx = -1; 2512 srq->hwq.cons++; /* Support for SRQE counter */ 2513 spin_unlock(&srq->hwq.lock); 2514 } 2515 2516 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, 2517 struct cq_res_rc *hwcqe, 2518 struct bnxt_qplib_cqe **pcqe, 2519 int *budget) 2520 { 2521 struct bnxt_qplib_srq *srq; 2522 struct bnxt_qplib_cqe *cqe; 2523 struct bnxt_qplib_qp *qp; 2524 struct bnxt_qplib_q *rq; 2525 u32 wr_id_idx; 2526 2527 qp = (struct bnxt_qplib_qp *)((unsigned long) 2528 le64_to_cpu(hwcqe->qp_handle)); 2529 if (!qp) { 2530 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n"); 2531 return -EINVAL; 2532 } 2533 if (qp->rq.flushed) { 2534 dev_dbg(&cq->hwq.pdev->dev, 2535 "%s: QP in Flush QP = %p\n", __func__, qp); 2536 return 0; 2537 } 2538 2539 cqe = *pcqe; 2540 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2541 cqe->length = le32_to_cpu(hwcqe->length); 2542 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key); 2543 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle); 2544 cqe->flags = le16_to_cpu(hwcqe->flags); 2545 cqe->status = hwcqe->status; 2546 cqe->qp_handle = (u64)(unsigned long)qp; 2547 2548 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) & 2549 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK; 2550 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { 2551 srq = qp->srq; 2552 if (!srq) 2553 return -EINVAL; 2554 if (wr_id_idx >= srq->hwq.max_elements) { 2555 dev_err(&cq->hwq.pdev->dev, 2556 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2557 wr_id_idx, srq->hwq.max_elements); 2558 return -EINVAL; 2559 } 2560 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2561 bnxt_qplib_release_srqe(srq, wr_id_idx); 2562 cqe++; 2563 (*budget)--; 2564 *pcqe = cqe; 2565 } else { 2566 struct bnxt_qplib_swq *swq; 2567 2568 rq = &qp->rq; 2569 if (wr_id_idx > (rq->max_wqe - 1)) { 2570 dev_err(&cq->hwq.pdev->dev, 2571 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n", 2572 wr_id_idx, rq->max_wqe); 2573 return -EINVAL; 2574 } 2575 if (wr_id_idx != rq->swq_last) 2576 return -EINVAL; 2577 swq = &rq->swq[rq->swq_last]; 2578 cqe->wr_id = swq->wr_id; 2579 cqe++; 2580 (*budget)--; 2581 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots); 2582 rq->swq_last = swq->next_idx; 2583 *pcqe = cqe; 2584 2585 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2586 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2587 /* Add qp to flush list of the CQ */ 2588 bnxt_qplib_add_flush_qp(qp); 2589 } 2590 } 2591 2592 return 0; 2593 } 2594 2595 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, 2596 struct cq_res_ud *hwcqe, 2597 struct bnxt_qplib_cqe **pcqe, 2598 int *budget) 2599 { 2600 struct bnxt_qplib_srq *srq; 2601 struct bnxt_qplib_cqe *cqe; 2602 struct bnxt_qplib_qp *qp; 2603 struct bnxt_qplib_q *rq; 2604 u32 wr_id_idx; 2605 2606 qp = (struct bnxt_qplib_qp *)((unsigned long) 2607 le64_to_cpu(hwcqe->qp_handle)); 2608 if (!qp) { 2609 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n"); 2610 return -EINVAL; 2611 } 2612 if (qp->rq.flushed) { 2613 dev_dbg(&cq->hwq.pdev->dev, 2614 "%s: QP in Flush QP = %p\n", __func__, qp); 2615 return 0; 2616 } 2617 cqe = *pcqe; 2618 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2619 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK; 2620 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata); 2621 cqe->invrkey = le32_to_cpu(hwcqe->imm_data); 2622 cqe->flags = le16_to_cpu(hwcqe->flags); 2623 cqe->status = hwcqe->status; 2624 cqe->qp_handle = (u64)(unsigned long)qp; 2625 /*FIXME: Endianness fix needed for smace */ 2626 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN); 2627 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) 2628 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK; 2629 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) | 2630 ((le32_to_cpu( 2631 hwcqe->src_qp_high_srq_or_rq_wr_id) & 2632 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8); 2633 2634 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { 2635 srq = qp->srq; 2636 if (!srq) 2637 return -EINVAL; 2638 2639 if (wr_id_idx >= srq->hwq.max_elements) { 2640 dev_err(&cq->hwq.pdev->dev, 2641 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2642 wr_id_idx, srq->hwq.max_elements); 2643 return -EINVAL; 2644 } 2645 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2646 bnxt_qplib_release_srqe(srq, wr_id_idx); 2647 cqe++; 2648 (*budget)--; 2649 *pcqe = cqe; 2650 } else { 2651 struct bnxt_qplib_swq *swq; 2652 2653 rq = &qp->rq; 2654 if (wr_id_idx > (rq->max_wqe - 1)) { 2655 dev_err(&cq->hwq.pdev->dev, 2656 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n", 2657 wr_id_idx, rq->max_wqe); 2658 return -EINVAL; 2659 } 2660 2661 if (rq->swq_last != wr_id_idx) 2662 return -EINVAL; 2663 swq = &rq->swq[rq->swq_last]; 2664 cqe->wr_id = swq->wr_id; 2665 cqe++; 2666 (*budget)--; 2667 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots); 2668 rq->swq_last = swq->next_idx; 2669 *pcqe = cqe; 2670 2671 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2672 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2673 /* Add qp to flush list of the CQ */ 2674 bnxt_qplib_add_flush_qp(qp); 2675 } 2676 } 2677 2678 return 0; 2679 } 2680 2681 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2682 { 2683 struct cq_base *hw_cqe; 2684 u32 sw_cons, raw_cons; 2685 bool rc = true; 2686 2687 raw_cons = cq->hwq.cons; 2688 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2689 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL); 2690 /* Check for Valid bit. If the CQE is valid, return false */ 2691 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); 2692 return rc; 2693 } 2694 2695 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, 2696 struct cq_res_raweth_qp1 *hwcqe, 2697 struct bnxt_qplib_cqe **pcqe, 2698 int *budget) 2699 { 2700 struct bnxt_qplib_qp *qp; 2701 struct bnxt_qplib_q *rq; 2702 struct bnxt_qplib_srq *srq; 2703 struct bnxt_qplib_cqe *cqe; 2704 u32 wr_id_idx; 2705 2706 qp = (struct bnxt_qplib_qp *)((unsigned long) 2707 le64_to_cpu(hwcqe->qp_handle)); 2708 if (!qp) { 2709 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n"); 2710 return -EINVAL; 2711 } 2712 if (qp->rq.flushed) { 2713 dev_dbg(&cq->hwq.pdev->dev, 2714 "%s: QP in Flush QP = %p\n", __func__, qp); 2715 return 0; 2716 } 2717 cqe = *pcqe; 2718 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2719 cqe->flags = le16_to_cpu(hwcqe->flags); 2720 cqe->qp_handle = (u64)(unsigned long)qp; 2721 2722 wr_id_idx = 2723 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id) 2724 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK; 2725 cqe->src_qp = qp->id; 2726 if (qp->id == 1 && !cqe->length) { 2727 /* Add workaround for the length misdetection */ 2728 cqe->length = 296; 2729 } else { 2730 cqe->length = le16_to_cpu(hwcqe->length); 2731 } 2732 cqe->pkey_index = qp->pkey_index; 2733 memcpy(cqe->smac, qp->smac, 6); 2734 2735 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags); 2736 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2); 2737 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata); 2738 2739 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) { 2740 srq = qp->srq; 2741 if (!srq) { 2742 dev_err(&cq->hwq.pdev->dev, 2743 "FP: SRQ used but not defined??\n"); 2744 return -EINVAL; 2745 } 2746 if (wr_id_idx >= srq->hwq.max_elements) { 2747 dev_err(&cq->hwq.pdev->dev, 2748 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2749 wr_id_idx, srq->hwq.max_elements); 2750 return -EINVAL; 2751 } 2752 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2753 bnxt_qplib_release_srqe(srq, wr_id_idx); 2754 cqe++; 2755 (*budget)--; 2756 *pcqe = cqe; 2757 } else { 2758 struct bnxt_qplib_swq *swq; 2759 2760 rq = &qp->rq; 2761 if (wr_id_idx > (rq->max_wqe - 1)) { 2762 dev_err(&cq->hwq.pdev->dev, 2763 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n", 2764 wr_id_idx, rq->max_wqe); 2765 return -EINVAL; 2766 } 2767 if (rq->swq_last != wr_id_idx) 2768 return -EINVAL; 2769 swq = &rq->swq[rq->swq_last]; 2770 cqe->wr_id = swq->wr_id; 2771 cqe++; 2772 (*budget)--; 2773 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots); 2774 rq->swq_last = swq->next_idx; 2775 *pcqe = cqe; 2776 2777 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2778 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2779 /* Add qp to flush list of the CQ */ 2780 bnxt_qplib_add_flush_qp(qp); 2781 } 2782 } 2783 2784 return 0; 2785 } 2786 2787 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, 2788 struct cq_terminal *hwcqe, 2789 struct bnxt_qplib_cqe **pcqe, 2790 int *budget) 2791 { 2792 struct bnxt_qplib_qp *qp; 2793 struct bnxt_qplib_q *sq, *rq; 2794 struct bnxt_qplib_cqe *cqe; 2795 u32 swq_last = 0, cqe_cons; 2796 int rc = 0; 2797 2798 /* Check the Status */ 2799 if (hwcqe->status != CQ_TERMINAL_STATUS_OK) 2800 dev_warn(&cq->hwq.pdev->dev, 2801 "FP: CQ Process Terminal Error status = 0x%x\n", 2802 hwcqe->status); 2803 2804 qp = (struct bnxt_qplib_qp *)((unsigned long) 2805 le64_to_cpu(hwcqe->qp_handle)); 2806 if (!qp) 2807 return -EINVAL; 2808 2809 /* Must block new posting of SQ and RQ */ 2810 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2811 2812 sq = &qp->sq; 2813 rq = &qp->rq; 2814 2815 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx); 2816 if (cqe_cons == 0xFFFF) 2817 goto do_rq; 2818 cqe_cons %= sq->max_wqe; 2819 2820 if (qp->sq.flushed) { 2821 dev_dbg(&cq->hwq.pdev->dev, 2822 "%s: QP in Flush QP = %p\n", __func__, qp); 2823 goto sq_done; 2824 } 2825 2826 /* Terminal CQE can also include aggregated successful CQEs prior. 2827 * So we must complete all CQEs from the current sq's cons to the 2828 * cq_cons with status OK 2829 */ 2830 cqe = *pcqe; 2831 while (*budget) { 2832 swq_last = sq->swq_last; 2833 if (swq_last == cqe_cons) 2834 break; 2835 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2836 memset(cqe, 0, sizeof(*cqe)); 2837 cqe->status = CQ_REQ_STATUS_OK; 2838 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2839 cqe->qp_handle = (u64)(unsigned long)qp; 2840 cqe->src_qp = qp->id; 2841 cqe->wr_id = sq->swq[swq_last].wr_id; 2842 cqe->type = sq->swq[swq_last].type; 2843 cqe++; 2844 (*budget)--; 2845 } 2846 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots); 2847 sq->swq_last = sq->swq[swq_last].next_idx; 2848 } 2849 *pcqe = cqe; 2850 if (!(*budget) && swq_last != cqe_cons) { 2851 /* Out of budget */ 2852 rc = -EAGAIN; 2853 goto sq_done; 2854 } 2855 sq_done: 2856 if (rc) 2857 return rc; 2858 do_rq: 2859 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx); 2860 if (cqe_cons == 0xFFFF) { 2861 goto done; 2862 } else if (cqe_cons > rq->max_wqe - 1) { 2863 dev_err(&cq->hwq.pdev->dev, 2864 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n", 2865 cqe_cons, rq->max_wqe); 2866 rc = -EINVAL; 2867 goto done; 2868 } 2869 2870 if (qp->rq.flushed) { 2871 dev_dbg(&cq->hwq.pdev->dev, 2872 "%s: QP in Flush QP = %p\n", __func__, qp); 2873 rc = 0; 2874 goto done; 2875 } 2876 2877 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR 2878 * from the current rq->cons to the rq->prod regardless what the 2879 * rq->cons the terminal CQE indicates 2880 */ 2881 2882 /* Add qp to flush list of the CQ */ 2883 bnxt_qplib_add_flush_qp(qp); 2884 done: 2885 return rc; 2886 } 2887 2888 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, 2889 struct cq_cutoff *hwcqe) 2890 { 2891 /* Check the Status */ 2892 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) { 2893 dev_err(&cq->hwq.pdev->dev, 2894 "FP: CQ Process Cutoff Error status = 0x%x\n", 2895 hwcqe->status); 2896 return -EINVAL; 2897 } 2898 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); 2899 wake_up_interruptible(&cq->waitq); 2900 2901 return 0; 2902 } 2903 2904 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, 2905 struct bnxt_qplib_cqe *cqe, 2906 int num_cqes) 2907 { 2908 struct bnxt_qplib_qp *qp = NULL; 2909 u32 budget = num_cqes; 2910 unsigned long flags; 2911 2912 spin_lock_irqsave(&cq->flush_lock, flags); 2913 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 2914 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp); 2915 __flush_sq(&qp->sq, qp, &cqe, &budget); 2916 } 2917 2918 list_for_each_entry(qp, &cq->rqf_head, rq_flush) { 2919 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp); 2920 __flush_rq(&qp->rq, qp, &cqe, &budget); 2921 } 2922 spin_unlock_irqrestore(&cq->flush_lock, flags); 2923 2924 return num_cqes - budget; 2925 } 2926 2927 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2928 int num_cqes, struct bnxt_qplib_qp **lib_qp) 2929 { 2930 struct cq_base *hw_cqe; 2931 u32 sw_cons, raw_cons; 2932 int budget, rc = 0; 2933 u8 type; 2934 2935 raw_cons = cq->hwq.cons; 2936 budget = num_cqes; 2937 2938 while (budget) { 2939 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2940 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL); 2941 2942 /* Check for Valid bit */ 2943 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements)) 2944 break; 2945 2946 /* 2947 * The valid test of the entry must be done first before 2948 * reading any further. 2949 */ 2950 dma_rmb(); 2951 /* From the device's respective CQE format to qplib_wc*/ 2952 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2953 switch (type) { 2954 case CQ_BASE_CQE_TYPE_REQ: 2955 rc = bnxt_qplib_cq_process_req(cq, 2956 (struct cq_req *)hw_cqe, 2957 &cqe, &budget, 2958 sw_cons, lib_qp); 2959 break; 2960 case CQ_BASE_CQE_TYPE_RES_RC: 2961 rc = bnxt_qplib_cq_process_res_rc(cq, 2962 (struct cq_res_rc *) 2963 hw_cqe, &cqe, 2964 &budget); 2965 break; 2966 case CQ_BASE_CQE_TYPE_RES_UD: 2967 rc = bnxt_qplib_cq_process_res_ud 2968 (cq, (struct cq_res_ud *)hw_cqe, &cqe, 2969 &budget); 2970 break; 2971 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 2972 rc = bnxt_qplib_cq_process_res_raweth_qp1 2973 (cq, (struct cq_res_raweth_qp1 *) 2974 hw_cqe, &cqe, &budget); 2975 break; 2976 case CQ_BASE_CQE_TYPE_TERMINAL: 2977 rc = bnxt_qplib_cq_process_terminal 2978 (cq, (struct cq_terminal *)hw_cqe, 2979 &cqe, &budget); 2980 break; 2981 case CQ_BASE_CQE_TYPE_CUT_OFF: 2982 bnxt_qplib_cq_process_cutoff 2983 (cq, (struct cq_cutoff *)hw_cqe); 2984 /* Done processing this CQ */ 2985 goto exit; 2986 default: 2987 dev_err(&cq->hwq.pdev->dev, 2988 "process_cq unknown type 0x%lx\n", 2989 hw_cqe->cqe_type_toggle & 2990 CQ_BASE_CQE_TYPE_MASK); 2991 rc = -EINVAL; 2992 break; 2993 } 2994 if (rc < 0) { 2995 if (rc == -EAGAIN) 2996 break; 2997 /* Error while processing the CQE, just skip to the 2998 * next one 2999 */ 3000 if (type != CQ_BASE_CQE_TYPE_TERMINAL) 3001 dev_err(&cq->hwq.pdev->dev, 3002 "process_cqe error rc = 0x%x\n", rc); 3003 } 3004 raw_cons++; 3005 } 3006 if (cq->hwq.cons != raw_cons) { 3007 cq->hwq.cons = raw_cons; 3008 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ); 3009 } 3010 exit: 3011 return num_cqes - budget; 3012 } 3013 3014 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 3015 { 3016 if (arm_type) 3017 bnxt_qplib_ring_db(&cq->dbinfo, arm_type); 3018 /* Using cq->arm_state variable to track whether to issue cq handler */ 3019 atomic_set(&cq->arm_state, 1); 3020 } 3021 3022 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 3023 { 3024 flush_workqueue(qp->scq->nq->cqn_wq); 3025 if (qp->scq != qp->rcq) 3026 flush_workqueue(qp->rcq->nq->cqn_wq); 3027 } 3028