1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/interrupt.h> 42 #include <linux/spinlock.h> 43 #include <linux/sched.h> 44 #include <linux/slab.h> 45 #include <linux/pci.h> 46 #include <linux/delay.h> 47 #include <linux/prefetch.h> 48 #include <linux/if_ether.h> 49 #include <rdma/ib_mad.h> 50 51 #include "roce_hsi.h" 52 53 #include "qplib_res.h" 54 #include "qplib_rcfw.h" 55 #include "qplib_sp.h" 56 #include "qplib_fp.h" 57 58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); 59 60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) 61 { 62 qp->sq.condition = false; 63 qp->sq.send_phantom = false; 64 qp->sq.single = false; 65 } 66 67 /* Flush list */ 68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 69 { 70 struct bnxt_qplib_cq *scq, *rcq; 71 72 scq = qp->scq; 73 rcq = qp->rcq; 74 75 if (!qp->sq.flushed) { 76 dev_dbg(&scq->hwq.pdev->dev, 77 "FP: Adding to SQ Flush list = %p\n", qp); 78 bnxt_qplib_cancel_phantom_processing(qp); 79 list_add_tail(&qp->sq_flush, &scq->sqf_head); 80 qp->sq.flushed = true; 81 } 82 if (!qp->srq) { 83 if (!qp->rq.flushed) { 84 dev_dbg(&rcq->hwq.pdev->dev, 85 "FP: Adding to RQ Flush list = %p\n", qp); 86 list_add_tail(&qp->rq_flush, &rcq->rqf_head); 87 qp->rq.flushed = true; 88 } 89 } 90 } 91 92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, 93 unsigned long *flags) 94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) 95 { 96 spin_lock_irqsave(&qp->scq->flush_lock, *flags); 97 if (qp->scq == qp->rcq) 98 __acquire(&qp->rcq->flush_lock); 99 else 100 spin_lock(&qp->rcq->flush_lock); 101 } 102 103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, 104 unsigned long *flags) 105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) 106 { 107 if (qp->scq == qp->rcq) 108 __release(&qp->rcq->flush_lock); 109 else 110 spin_unlock(&qp->rcq->flush_lock); 111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); 112 } 113 114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 115 { 116 unsigned long flags; 117 118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags); 119 __bnxt_qplib_add_flush_qp(qp); 120 bnxt_qplib_release_cq_flush_locks(qp, &flags); 121 } 122 123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 124 { 125 if (qp->sq.flushed) { 126 qp->sq.flushed = false; 127 list_del(&qp->sq_flush); 128 } 129 if (!qp->srq) { 130 if (qp->rq.flushed) { 131 qp->rq.flushed = false; 132 list_del(&qp->rq_flush); 133 } 134 } 135 } 136 137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) 138 { 139 unsigned long flags; 140 141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags); 142 __clean_cq(qp->scq, (u64)(unsigned long)qp); 143 qp->sq.hwq.prod = 0; 144 qp->sq.hwq.cons = 0; 145 __clean_cq(qp->rcq, (u64)(unsigned long)qp); 146 qp->rq.hwq.prod = 0; 147 qp->rq.hwq.cons = 0; 148 149 __bnxt_qplib_del_flush_qp(qp); 150 bnxt_qplib_release_cq_flush_locks(qp, &flags); 151 } 152 153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 154 { 155 struct bnxt_qplib_nq_work *nq_work = 156 container_of(work, struct bnxt_qplib_nq_work, work); 157 158 struct bnxt_qplib_cq *cq = nq_work->cq; 159 struct bnxt_qplib_nq *nq = nq_work->nq; 160 161 if (cq && nq) { 162 spin_lock_bh(&cq->compl_lock); 163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { 164 dev_dbg(&nq->pdev->dev, 165 "%s:Trigger cq = %p event nq = %p\n", 166 __func__, cq, nq); 167 nq->cqn_handler(nq, cq); 168 } 169 spin_unlock_bh(&cq->compl_lock); 170 } 171 kfree(nq_work); 172 } 173 174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res, 175 struct bnxt_qplib_qp *qp) 176 { 177 struct bnxt_qplib_q *rq = &qp->rq; 178 struct bnxt_qplib_q *sq = &qp->sq; 179 180 if (qp->rq_hdr_buf) 181 dma_free_coherent(&res->pdev->dev, 182 rq->max_wqe * qp->rq_hdr_buf_size, 183 qp->rq_hdr_buf, qp->rq_hdr_buf_map); 184 if (qp->sq_hdr_buf) 185 dma_free_coherent(&res->pdev->dev, 186 sq->max_wqe * qp->sq_hdr_buf_size, 187 qp->sq_hdr_buf, qp->sq_hdr_buf_map); 188 qp->rq_hdr_buf = NULL; 189 qp->sq_hdr_buf = NULL; 190 qp->rq_hdr_buf_map = 0; 191 qp->sq_hdr_buf_map = 0; 192 qp->sq_hdr_buf_size = 0; 193 qp->rq_hdr_buf_size = 0; 194 } 195 196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, 197 struct bnxt_qplib_qp *qp) 198 { 199 struct bnxt_qplib_q *rq = &qp->rq; 200 struct bnxt_qplib_q *sq = &qp->sq; 201 int rc = 0; 202 203 if (qp->sq_hdr_buf_size && sq->max_wqe) { 204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 205 sq->max_wqe * qp->sq_hdr_buf_size, 206 &qp->sq_hdr_buf_map, GFP_KERNEL); 207 if (!qp->sq_hdr_buf) { 208 rc = -ENOMEM; 209 dev_err(&res->pdev->dev, 210 "Failed to create sq_hdr_buf\n"); 211 goto fail; 212 } 213 } 214 215 if (qp->rq_hdr_buf_size && rq->max_wqe) { 216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 217 rq->max_wqe * 218 qp->rq_hdr_buf_size, 219 &qp->rq_hdr_buf_map, 220 GFP_KERNEL); 221 if (!qp->rq_hdr_buf) { 222 rc = -ENOMEM; 223 dev_err(&res->pdev->dev, 224 "Failed to create rq_hdr_buf\n"); 225 goto fail; 226 } 227 } 228 return 0; 229 230 fail: 231 bnxt_qplib_free_qp_hdr_buf(res, qp); 232 return rc; 233 } 234 235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) 236 { 237 struct bnxt_qplib_hwq *hwq = &nq->hwq; 238 struct nq_base *nqe, **nq_ptr; 239 int budget = nq->budget; 240 uintptr_t q_handle; 241 u16 type; 242 243 spin_lock_bh(&hwq->lock); 244 /* Service the NQ until empty */ 245 while (budget--) { 246 nq_ptr = (struct nq_base **)hwq->pbl_ptr; 247 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)]; 248 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) 249 break; 250 251 /* 252 * The valid test of the entry must be done first before 253 * reading any further. 254 */ 255 dma_rmb(); 256 257 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 258 switch (type) { 259 case NQ_BASE_TYPE_CQ_NOTIFICATION: 260 { 261 struct nq_cn *nqcne = (struct nq_cn *)nqe; 262 263 q_handle = le32_to_cpu(nqcne->cq_handle_low); 264 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 265 << 32; 266 if ((unsigned long)cq == q_handle) { 267 nqcne->cq_handle_low = 0; 268 nqcne->cq_handle_high = 0; 269 cq->cnq_events++; 270 } 271 break; 272 } 273 default: 274 break; 275 } 276 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons, 277 1, &nq->nq_db.dbinfo.flags); 278 } 279 spin_unlock_bh(&hwq->lock); 280 } 281 282 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with 283 * this CQ. 284 */ 285 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) 286 { 287 u32 retry_cnt = 100; 288 289 while (retry_cnt--) { 290 if (cnq_events == cq->cnq_events) 291 return; 292 usleep_range(50, 100); 293 clean_nq(cq->nq, cq); 294 } 295 } 296 297 static void bnxt_qplib_service_nq(struct tasklet_struct *t) 298 { 299 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet); 300 struct bnxt_qplib_hwq *hwq = &nq->hwq; 301 struct bnxt_qplib_cq *cq; 302 int budget = nq->budget; 303 struct nq_base *nqe; 304 uintptr_t q_handle; 305 u32 hw_polled = 0; 306 u16 type; 307 308 spin_lock_bh(&hwq->lock); 309 /* Service the NQ until empty */ 310 while (budget--) { 311 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL); 312 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) 313 break; 314 315 /* 316 * The valid test of the entry must be done first before 317 * reading any further. 318 */ 319 dma_rmb(); 320 321 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 322 switch (type) { 323 case NQ_BASE_TYPE_CQ_NOTIFICATION: 324 { 325 struct nq_cn *nqcne = (struct nq_cn *)nqe; 326 327 q_handle = le32_to_cpu(nqcne->cq_handle_low); 328 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 329 << 32; 330 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle; 331 if (!cq) 332 break; 333 bnxt_qplib_armen_db(&cq->dbinfo, 334 DBC_DBC_TYPE_CQ_ARMENA); 335 spin_lock_bh(&cq->compl_lock); 336 atomic_set(&cq->arm_state, 0); 337 if (nq->cqn_handler(nq, (cq))) 338 dev_warn(&nq->pdev->dev, 339 "cqn - type 0x%x not handled\n", type); 340 cq->cnq_events++; 341 spin_unlock_bh(&cq->compl_lock); 342 break; 343 } 344 case NQ_BASE_TYPE_SRQ_EVENT: 345 { 346 struct bnxt_qplib_srq *srq; 347 struct nq_srq_event *nqsrqe = 348 (struct nq_srq_event *)nqe; 349 350 q_handle = le32_to_cpu(nqsrqe->srq_handle_low); 351 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) 352 << 32; 353 srq = (struct bnxt_qplib_srq *)q_handle; 354 bnxt_qplib_armen_db(&srq->dbinfo, 355 DBC_DBC_TYPE_SRQ_ARMENA); 356 if (nq->srqn_handler(nq, 357 (struct bnxt_qplib_srq *)q_handle, 358 nqsrqe->event)) 359 dev_warn(&nq->pdev->dev, 360 "SRQ event 0x%x not handled\n", 361 nqsrqe->event); 362 break; 363 } 364 case NQ_BASE_TYPE_DBQ_EVENT: 365 break; 366 default: 367 dev_warn(&nq->pdev->dev, 368 "nqe with type = 0x%x not handled\n", type); 369 break; 370 } 371 hw_polled++; 372 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons, 373 1, &nq->nq_db.dbinfo.flags); 374 } 375 if (hw_polled) 376 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true); 377 spin_unlock_bh(&hwq->lock); 378 } 379 380 /* bnxt_re_synchronize_nq - self polling notification queue. 381 * @nq - notification queue pointer 382 * 383 * This function will start polling entries of a given notification queue 384 * for all pending entries. 385 * This function is useful to synchronize notification entries while resources 386 * are going away. 387 */ 388 389 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq) 390 { 391 int budget = nq->budget; 392 393 nq->budget = nq->hwq.max_elements; 394 bnxt_qplib_service_nq(&nq->nq_tasklet); 395 nq->budget = budget; 396 } 397 398 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) 399 { 400 struct bnxt_qplib_nq *nq = dev_instance; 401 struct bnxt_qplib_hwq *hwq = &nq->hwq; 402 u32 sw_cons; 403 404 /* Prefetch the NQ element */ 405 sw_cons = HWQ_CMP(hwq->cons, hwq); 406 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL)); 407 408 /* Fan out to CPU affinitized kthreads? */ 409 tasklet_schedule(&nq->nq_tasklet); 410 411 return IRQ_HANDLED; 412 } 413 414 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) 415 { 416 if (!nq->requested) 417 return; 418 419 nq->requested = false; 420 /* Mask h/w interrupt */ 421 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false); 422 /* Sync with last running IRQ handler */ 423 synchronize_irq(nq->msix_vec); 424 irq_set_affinity_hint(nq->msix_vec, NULL); 425 free_irq(nq->msix_vec, nq); 426 kfree(nq->name); 427 nq->name = NULL; 428 429 if (kill) 430 tasklet_kill(&nq->nq_tasklet); 431 tasklet_disable(&nq->nq_tasklet); 432 } 433 434 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 435 { 436 if (nq->cqn_wq) { 437 destroy_workqueue(nq->cqn_wq); 438 nq->cqn_wq = NULL; 439 } 440 441 /* Make sure the HW is stopped! */ 442 bnxt_qplib_nq_stop_irq(nq, true); 443 444 if (nq->nq_db.reg.bar_reg) { 445 iounmap(nq->nq_db.reg.bar_reg); 446 nq->nq_db.reg.bar_reg = NULL; 447 } 448 449 nq->cqn_handler = NULL; 450 nq->srqn_handler = NULL; 451 nq->msix_vec = 0; 452 } 453 454 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 455 int msix_vector, bool need_init) 456 { 457 struct bnxt_qplib_res *res = nq->res; 458 int rc; 459 460 if (nq->requested) 461 return -EFAULT; 462 463 nq->msix_vec = msix_vector; 464 if (need_init) 465 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq); 466 else 467 tasklet_enable(&nq->nq_tasklet); 468 469 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s", 470 nq_indx, pci_name(res->pdev)); 471 if (!nq->name) 472 return -ENOMEM; 473 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq); 474 if (rc) { 475 kfree(nq->name); 476 nq->name = NULL; 477 tasklet_disable(&nq->nq_tasklet); 478 return rc; 479 } 480 481 cpumask_clear(&nq->mask); 482 cpumask_set_cpu(nq_indx, &nq->mask); 483 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask); 484 if (rc) { 485 dev_warn(&nq->pdev->dev, 486 "set affinity failed; vector: %d nq_idx: %d\n", 487 nq->msix_vec, nq_indx); 488 } 489 nq->requested = true; 490 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true); 491 492 return rc; 493 } 494 495 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt) 496 { 497 resource_size_t reg_base; 498 struct bnxt_qplib_nq_db *nq_db; 499 struct pci_dev *pdev; 500 501 pdev = nq->pdev; 502 nq_db = &nq->nq_db; 503 504 nq_db->dbinfo.flags = 0; 505 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION; 506 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id); 507 if (!nq_db->reg.bar_base) { 508 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!", 509 nq_db->reg.bar_id); 510 return -ENOMEM; 511 } 512 513 reg_base = nq_db->reg.bar_base + reg_offt; 514 /* Unconditionally map 8 bytes to support 57500 series */ 515 nq_db->reg.len = 8; 516 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len); 517 if (!nq_db->reg.bar_reg) { 518 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed", 519 nq_db->reg.bar_id); 520 return -ENOMEM; 521 } 522 523 nq_db->dbinfo.db = nq_db->reg.bar_reg; 524 nq_db->dbinfo.hwq = &nq->hwq; 525 nq_db->dbinfo.xid = nq->ring_id; 526 527 return 0; 528 } 529 530 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 531 int nq_idx, int msix_vector, int bar_reg_offset, 532 cqn_handler_t cqn_handler, 533 srqn_handler_t srqn_handler) 534 { 535 int rc; 536 537 nq->pdev = pdev; 538 nq->cqn_handler = cqn_handler; 539 nq->srqn_handler = srqn_handler; 540 541 /* Have a task to schedule CQ notifiers in post send case */ 542 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); 543 if (!nq->cqn_wq) 544 return -ENOMEM; 545 546 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset); 547 if (rc) 548 goto fail; 549 550 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); 551 if (rc) { 552 dev_err(&nq->pdev->dev, 553 "Failed to request irq for nq-idx %d\n", nq_idx); 554 goto fail; 555 } 556 557 return 0; 558 fail: 559 bnxt_qplib_disable_nq(nq); 560 return rc; 561 } 562 563 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq) 564 { 565 if (nq->hwq.max_elements) { 566 bnxt_qplib_free_hwq(nq->res, &nq->hwq); 567 nq->hwq.max_elements = 0; 568 } 569 } 570 571 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq) 572 { 573 struct bnxt_qplib_hwq_attr hwq_attr = {}; 574 struct bnxt_qplib_sg_info sginfo = {}; 575 576 nq->pdev = res->pdev; 577 nq->res = res; 578 if (!nq->hwq.max_elements || 579 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) 580 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 581 582 sginfo.pgsize = PAGE_SIZE; 583 sginfo.pgshft = PAGE_SHIFT; 584 hwq_attr.res = res; 585 hwq_attr.sginfo = &sginfo; 586 hwq_attr.depth = nq->hwq.max_elements; 587 hwq_attr.stride = sizeof(struct nq_base); 588 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res); 589 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) { 590 dev_err(&nq->pdev->dev, "FP NQ allocation failed"); 591 return -ENOMEM; 592 } 593 nq->budget = 8; 594 return 0; 595 } 596 597 /* SRQ */ 598 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, 599 struct bnxt_qplib_srq *srq) 600 { 601 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 602 struct creq_destroy_srq_resp resp = {}; 603 struct bnxt_qplib_cmdqmsg msg = {}; 604 struct cmdq_destroy_srq req = {}; 605 int rc; 606 607 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 608 CMDQ_BASE_OPCODE_DESTROY_SRQ, 609 sizeof(req)); 610 611 /* Configure the request */ 612 req.srq_cid = cpu_to_le32(srq->id); 613 614 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 615 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 616 kfree(srq->swq); 617 if (rc) 618 return; 619 bnxt_qplib_free_hwq(res, &srq->hwq); 620 } 621 622 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, 623 struct bnxt_qplib_srq *srq) 624 { 625 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 626 struct bnxt_qplib_hwq_attr hwq_attr = {}; 627 struct creq_create_srq_resp resp = {}; 628 struct bnxt_qplib_cmdqmsg msg = {}; 629 struct cmdq_create_srq req = {}; 630 struct bnxt_qplib_pbl *pbl; 631 u16 pg_sz_lvl; 632 int rc, idx; 633 634 hwq_attr.res = res; 635 hwq_attr.sginfo = &srq->sg_info; 636 hwq_attr.depth = srq->max_wqe; 637 hwq_attr.stride = srq->wqe_size; 638 hwq_attr.type = HWQ_TYPE_QUEUE; 639 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr); 640 if (rc) 641 return rc; 642 643 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq), 644 GFP_KERNEL); 645 if (!srq->swq) { 646 rc = -ENOMEM; 647 goto fail; 648 } 649 srq->dbinfo.flags = 0; 650 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 651 CMDQ_BASE_OPCODE_CREATE_SRQ, 652 sizeof(req)); 653 654 /* Configure the request */ 655 req.dpi = cpu_to_le32(srq->dpi->dpi); 656 req.srq_handle = cpu_to_le64((uintptr_t)srq); 657 658 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); 659 pbl = &srq->hwq.pbl[PBL_LVL_0]; 660 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) << 661 CMDQ_CREATE_SRQ_PG_SIZE_SFT); 662 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) << 663 CMDQ_CREATE_SRQ_LVL_SFT; 664 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl); 665 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 666 req.pd_id = cpu_to_le32(srq->pd->id); 667 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id); 668 669 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 670 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 671 if (rc) 672 goto fail; 673 674 spin_lock_init(&srq->lock); 675 srq->start_idx = 0; 676 srq->last_idx = srq->hwq.max_elements - 1; 677 for (idx = 0; idx < srq->hwq.max_elements; idx++) 678 srq->swq[idx].next_idx = idx + 1; 679 srq->swq[srq->last_idx].next_idx = -1; 680 681 srq->id = le32_to_cpu(resp.xid); 682 srq->dbinfo.hwq = &srq->hwq; 683 srq->dbinfo.xid = srq->id; 684 srq->dbinfo.db = srq->dpi->dbr; 685 srq->dbinfo.max_slot = 1; 686 srq->dbinfo.priv_db = res->dpi_tbl.priv_db; 687 if (srq->threshold) 688 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); 689 srq->arm_req = false; 690 691 return 0; 692 fail: 693 bnxt_qplib_free_hwq(res, &srq->hwq); 694 kfree(srq->swq); 695 696 return rc; 697 } 698 699 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, 700 struct bnxt_qplib_srq *srq) 701 { 702 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; 703 u32 count; 704 705 count = __bnxt_qplib_get_avail(srq_hwq); 706 if (count > srq->threshold) { 707 srq->arm_req = false; 708 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); 709 } else { 710 /* Deferred arming */ 711 srq->arm_req = true; 712 } 713 714 return 0; 715 } 716 717 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, 718 struct bnxt_qplib_srq *srq) 719 { 720 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 721 struct creq_query_srq_resp resp = {}; 722 struct bnxt_qplib_cmdqmsg msg = {}; 723 struct bnxt_qplib_rcfw_sbuf sbuf; 724 struct creq_query_srq_resp_sb *sb; 725 struct cmdq_query_srq req = {}; 726 int rc; 727 728 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 729 CMDQ_BASE_OPCODE_QUERY_SRQ, 730 sizeof(req)); 731 732 /* Configure the request */ 733 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 734 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 735 &sbuf.dma_addr, GFP_KERNEL); 736 if (!sbuf.sb) 737 return -ENOMEM; 738 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 739 req.srq_cid = cpu_to_le32(srq->id); 740 sb = sbuf.sb; 741 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 742 sizeof(resp), 0); 743 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 744 if (!rc) 745 srq->threshold = le16_to_cpu(sb->srq_limit); 746 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 747 sbuf.sb, sbuf.dma_addr); 748 749 return rc; 750 } 751 752 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, 753 struct bnxt_qplib_swqe *wqe) 754 { 755 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; 756 struct rq_wqe *srqe; 757 struct sq_sge *hw_sge; 758 u32 count = 0; 759 int i, next; 760 761 spin_lock(&srq_hwq->lock); 762 if (srq->start_idx == srq->last_idx) { 763 dev_err(&srq_hwq->pdev->dev, 764 "FP: SRQ (0x%x) is full!\n", srq->id); 765 spin_unlock(&srq_hwq->lock); 766 return -EINVAL; 767 } 768 next = srq->start_idx; 769 srq->start_idx = srq->swq[next].next_idx; 770 spin_unlock(&srq_hwq->lock); 771 772 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL); 773 memset(srqe, 0, srq->wqe_size); 774 /* Calculate wqe_size16 and data_len */ 775 for (i = 0, hw_sge = (struct sq_sge *)srqe->data; 776 i < wqe->num_sge; i++, hw_sge++) { 777 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); 778 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); 779 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); 780 } 781 srqe->wqe_type = wqe->type; 782 srqe->flags = wqe->flags; 783 srqe->wqe_size = wqe->num_sge + 784 ((offsetof(typeof(*srqe), data) + 15) >> 4); 785 srqe->wr_id[0] = cpu_to_le32((u32)next); 786 srq->swq[next].wr_id = wqe->wr_id; 787 788 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); 789 790 spin_lock(&srq_hwq->lock); 791 count = __bnxt_qplib_get_avail(srq_hwq); 792 spin_unlock(&srq_hwq->lock); 793 /* Ring DB */ 794 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); 795 if (srq->arm_req == true && count > srq->threshold) { 796 srq->arm_req = false; 797 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); 798 } 799 800 return 0; 801 } 802 803 /* QP */ 804 805 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que) 806 { 807 int indx; 808 809 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL); 810 if (!que->swq) 811 return -ENOMEM; 812 813 que->swq_start = 0; 814 que->swq_last = que->max_wqe - 1; 815 for (indx = 0; indx < que->max_wqe; indx++) 816 que->swq[indx].next_idx = indx + 1; 817 que->swq[que->swq_last].next_idx = 0; /* Make it circular */ 818 que->swq_last = 0; 819 820 return 0; 821 } 822 823 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 824 { 825 struct bnxt_qplib_hwq_attr hwq_attr = {}; 826 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 827 struct creq_create_qp1_resp resp = {}; 828 struct bnxt_qplib_cmdqmsg msg = {}; 829 struct bnxt_qplib_q *sq = &qp->sq; 830 struct bnxt_qplib_q *rq = &qp->rq; 831 struct cmdq_create_qp1 req = {}; 832 struct bnxt_qplib_pbl *pbl; 833 u32 qp_flags = 0; 834 u8 pg_sz_lvl; 835 u32 tbl_indx; 836 int rc; 837 838 sq->dbinfo.flags = 0; 839 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 840 CMDQ_BASE_OPCODE_CREATE_QP1, 841 sizeof(req)); 842 /* General */ 843 req.type = qp->type; 844 req.dpi = cpu_to_le32(qp->dpi->dpi); 845 req.qp_handle = cpu_to_le64(qp->qp_handle); 846 847 /* SQ */ 848 hwq_attr.res = res; 849 hwq_attr.sginfo = &sq->sg_info; 850 hwq_attr.stride = sizeof(struct sq_sge); 851 hwq_attr.depth = bnxt_qplib_get_depth(sq); 852 hwq_attr.type = HWQ_TYPE_QUEUE; 853 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); 854 if (rc) 855 return rc; 856 857 rc = bnxt_qplib_alloc_init_swq(sq); 858 if (rc) 859 goto fail_sq; 860 861 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 862 pbl = &sq->hwq.pbl[PBL_LVL_0]; 863 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 864 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) << 865 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT); 866 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK); 867 req.sq_pg_size_sq_lvl = pg_sz_lvl; 868 req.sq_fwo_sq_sge = 869 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) << 870 CMDQ_CREATE_QP1_SQ_SGE_SFT); 871 req.scq_cid = cpu_to_le32(qp->scq->id); 872 873 /* RQ */ 874 if (rq->max_wqe) { 875 rq->dbinfo.flags = 0; 876 hwq_attr.res = res; 877 hwq_attr.sginfo = &rq->sg_info; 878 hwq_attr.stride = sizeof(struct sq_sge); 879 hwq_attr.depth = bnxt_qplib_get_depth(rq); 880 hwq_attr.type = HWQ_TYPE_QUEUE; 881 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); 882 if (rc) 883 goto sq_swq; 884 rc = bnxt_qplib_alloc_init_swq(rq); 885 if (rc) 886 goto fail_rq; 887 req.rq_size = cpu_to_le32(rq->max_wqe); 888 pbl = &rq->hwq.pbl[PBL_LVL_0]; 889 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 890 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) << 891 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT); 892 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK); 893 req.rq_pg_size_rq_lvl = pg_sz_lvl; 894 req.rq_fwo_rq_sge = 895 cpu_to_le16((rq->max_sge & 896 CMDQ_CREATE_QP1_RQ_SGE_MASK) << 897 CMDQ_CREATE_QP1_RQ_SGE_SFT); 898 } 899 req.rcq_cid = cpu_to_le32(qp->rcq->id); 900 /* Header buffer - allow hdr_buf pass in */ 901 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp); 902 if (rc) { 903 rc = -ENOMEM; 904 goto rq_rwq; 905 } 906 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE; 907 req.qp_flags = cpu_to_le32(qp_flags); 908 req.pd_id = cpu_to_le32(qp->pd->id); 909 910 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 911 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 912 if (rc) 913 goto fail; 914 915 qp->id = le32_to_cpu(resp.xid); 916 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 917 qp->cctx = res->cctx; 918 sq->dbinfo.hwq = &sq->hwq; 919 sq->dbinfo.xid = qp->id; 920 sq->dbinfo.db = qp->dpi->dbr; 921 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode); 922 if (rq->max_wqe) { 923 rq->dbinfo.hwq = &rq->hwq; 924 rq->dbinfo.xid = qp->id; 925 rq->dbinfo.db = qp->dpi->dbr; 926 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); 927 } 928 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 929 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 930 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; 931 932 return 0; 933 934 fail: 935 bnxt_qplib_free_qp_hdr_buf(res, qp); 936 rq_rwq: 937 kfree(rq->swq); 938 fail_rq: 939 bnxt_qplib_free_hwq(res, &rq->hwq); 940 sq_swq: 941 kfree(sq->swq); 942 fail_sq: 943 bnxt_qplib_free_hwq(res, &sq->hwq); 944 return rc; 945 } 946 947 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) 948 { 949 struct bnxt_qplib_hwq *hwq; 950 struct bnxt_qplib_q *sq; 951 u64 fpsne, psn_pg; 952 u16 indx_pad = 0; 953 954 sq = &qp->sq; 955 hwq = &sq->hwq; 956 /* First psn entry */ 957 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg); 958 if (!IS_ALIGNED(fpsne, PAGE_SIZE)) 959 indx_pad = (fpsne & ~PAGE_MASK) / size; 960 hwq->pad_pgofft = indx_pad; 961 hwq->pad_pg = (u64 *)psn_pg; 962 hwq->pad_stride = size; 963 } 964 965 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 966 { 967 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 968 struct bnxt_qplib_hwq_attr hwq_attr = {}; 969 struct bnxt_qplib_sg_info sginfo = {}; 970 struct creq_create_qp_resp resp = {}; 971 struct bnxt_qplib_cmdqmsg msg = {}; 972 struct bnxt_qplib_q *sq = &qp->sq; 973 struct bnxt_qplib_q *rq = &qp->rq; 974 struct cmdq_create_qp req = {}; 975 int rc, req_size, psn_sz = 0; 976 struct bnxt_qplib_hwq *xrrq; 977 struct bnxt_qplib_pbl *pbl; 978 u32 qp_flags = 0; 979 u8 pg_sz_lvl; 980 u32 tbl_indx; 981 u16 nsge; 982 983 if (res->dattr) 984 qp->dev_cap_flags = res->dattr->dev_cap_flags; 985 986 sq->dbinfo.flags = 0; 987 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 988 CMDQ_BASE_OPCODE_CREATE_QP, 989 sizeof(req)); 990 991 /* General */ 992 req.type = qp->type; 993 req.dpi = cpu_to_le32(qp->dpi->dpi); 994 req.qp_handle = cpu_to_le64(qp->qp_handle); 995 996 /* SQ */ 997 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) { 998 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? 999 sizeof(struct sq_psn_search_ext) : 1000 sizeof(struct sq_psn_search); 1001 1002 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) { 1003 psn_sz = sizeof(struct sq_msn_search); 1004 qp->msn = 0; 1005 } 1006 } 1007 1008 hwq_attr.res = res; 1009 hwq_attr.sginfo = &sq->sg_info; 1010 hwq_attr.stride = sizeof(struct sq_sge); 1011 hwq_attr.depth = bnxt_qplib_get_depth(sq); 1012 hwq_attr.aux_stride = psn_sz; 1013 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode) 1014 : 0; 1015 /* Update msn tbl size */ 1016 if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) { 1017 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 1018 qp->msn_tbl_sz = hwq_attr.aux_depth; 1019 qp->msn = 0; 1020 } 1021 1022 hwq_attr.type = HWQ_TYPE_QUEUE; 1023 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); 1024 if (rc) 1025 return rc; 1026 1027 rc = bnxt_qplib_alloc_init_swq(sq); 1028 if (rc) 1029 goto fail_sq; 1030 1031 if (psn_sz) 1032 bnxt_qplib_init_psn_ptr(qp, psn_sz); 1033 1034 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 1035 pbl = &sq->hwq.pbl[PBL_LVL_0]; 1036 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1037 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) << 1038 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT); 1039 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK); 1040 req.sq_pg_size_sq_lvl = pg_sz_lvl; 1041 req.sq_fwo_sq_sge = 1042 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) << 1043 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0); 1044 req.scq_cid = cpu_to_le32(qp->scq->id); 1045 1046 /* RQ */ 1047 if (!qp->srq) { 1048 rq->dbinfo.flags = 0; 1049 hwq_attr.res = res; 1050 hwq_attr.sginfo = &rq->sg_info; 1051 hwq_attr.stride = sizeof(struct sq_sge); 1052 hwq_attr.depth = bnxt_qplib_get_depth(rq); 1053 hwq_attr.aux_stride = 0; 1054 hwq_attr.aux_depth = 0; 1055 hwq_attr.type = HWQ_TYPE_QUEUE; 1056 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); 1057 if (rc) 1058 goto sq_swq; 1059 rc = bnxt_qplib_alloc_init_swq(rq); 1060 if (rc) 1061 goto fail_rq; 1062 1063 req.rq_size = cpu_to_le32(rq->max_wqe); 1064 pbl = &rq->hwq.pbl[PBL_LVL_0]; 1065 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1066 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) << 1067 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT); 1068 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK); 1069 req.rq_pg_size_rq_lvl = pg_sz_lvl; 1070 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 1071 6 : rq->max_sge; 1072 req.rq_fwo_rq_sge = 1073 cpu_to_le16(((nsge & 1074 CMDQ_CREATE_QP_RQ_SGE_MASK) << 1075 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0); 1076 } else { 1077 /* SRQ */ 1078 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED; 1079 req.srq_cid = cpu_to_le32(qp->srq->id); 1080 } 1081 req.rcq_cid = cpu_to_le32(qp->rcq->id); 1082 1083 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE; 1084 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED; 1085 if (qp->sig_type) 1086 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; 1087 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) 1088 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED; 1089 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf) 1090 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED; 1091 1092 req.qp_flags = cpu_to_le32(qp_flags); 1093 1094 /* ORRQ and IRRQ */ 1095 if (psn_sz) { 1096 xrrq = &qp->orrq; 1097 xrrq->max_elements = 1098 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 1099 req_size = xrrq->max_elements * 1100 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; 1101 req_size &= ~(PAGE_SIZE - 1); 1102 sginfo.pgsize = req_size; 1103 sginfo.pgshft = PAGE_SHIFT; 1104 1105 hwq_attr.res = res; 1106 hwq_attr.sginfo = &sginfo; 1107 hwq_attr.depth = xrrq->max_elements; 1108 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE; 1109 hwq_attr.aux_stride = 0; 1110 hwq_attr.aux_depth = 0; 1111 hwq_attr.type = HWQ_TYPE_CTX; 1112 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); 1113 if (rc) 1114 goto rq_swq; 1115 pbl = &xrrq->pbl[PBL_LVL_0]; 1116 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 1117 1118 xrrq = &qp->irrq; 1119 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS( 1120 qp->max_dest_rd_atomic); 1121 req_size = xrrq->max_elements * 1122 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; 1123 req_size &= ~(PAGE_SIZE - 1); 1124 sginfo.pgsize = req_size; 1125 hwq_attr.depth = xrrq->max_elements; 1126 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE; 1127 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); 1128 if (rc) 1129 goto fail_orrq; 1130 1131 pbl = &xrrq->pbl[PBL_LVL_0]; 1132 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 1133 } 1134 req.pd_id = cpu_to_le32(qp->pd->id); 1135 1136 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 1137 sizeof(resp), 0); 1138 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1139 if (rc) 1140 goto fail; 1141 1142 qp->id = le32_to_cpu(resp.xid); 1143 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 1144 INIT_LIST_HEAD(&qp->sq_flush); 1145 INIT_LIST_HEAD(&qp->rq_flush); 1146 qp->cctx = res->cctx; 1147 sq->dbinfo.hwq = &sq->hwq; 1148 sq->dbinfo.xid = qp->id; 1149 sq->dbinfo.db = qp->dpi->dbr; 1150 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode); 1151 if (rq->max_wqe) { 1152 rq->dbinfo.hwq = &rq->hwq; 1153 rq->dbinfo.xid = qp->id; 1154 rq->dbinfo.db = qp->dpi->dbr; 1155 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); 1156 } 1157 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 1158 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 1159 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; 1160 1161 return 0; 1162 fail: 1163 bnxt_qplib_free_hwq(res, &qp->irrq); 1164 fail_orrq: 1165 bnxt_qplib_free_hwq(res, &qp->orrq); 1166 rq_swq: 1167 kfree(rq->swq); 1168 fail_rq: 1169 bnxt_qplib_free_hwq(res, &rq->hwq); 1170 sq_swq: 1171 kfree(sq->swq); 1172 fail_sq: 1173 bnxt_qplib_free_hwq(res, &sq->hwq); 1174 return rc; 1175 } 1176 1177 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) 1178 { 1179 switch (qp->state) { 1180 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1181 /* INIT->RTR, configure the path_mtu to the default 1182 * 2048 if not being requested 1183 */ 1184 if (!(qp->modify_flags & 1185 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) { 1186 qp->modify_flags |= 1187 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1188 qp->path_mtu = 1189 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1190 } 1191 qp->modify_flags &= 1192 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; 1193 /* Bono FW require the max_dest_rd_atomic to be >= 1 */ 1194 if (qp->max_dest_rd_atomic < 1) 1195 qp->max_dest_rd_atomic = 1; 1196 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC; 1197 /* Bono FW 20.6.5 requires SGID_INDEX configuration */ 1198 if (!(qp->modify_flags & 1199 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) { 1200 qp->modify_flags |= 1201 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX; 1202 qp->ah.sgid_index = 0; 1203 } 1204 break; 1205 default: 1206 break; 1207 } 1208 } 1209 1210 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp) 1211 { 1212 switch (qp->state) { 1213 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1214 /* Bono FW requires the max_rd_atomic to be >= 1 */ 1215 if (qp->max_rd_atomic < 1) 1216 qp->max_rd_atomic = 1; 1217 /* Bono FW does not allow PKEY_INDEX, 1218 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT, 1219 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN, 1220 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID 1221 * modification 1222 */ 1223 qp->modify_flags &= 1224 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY | 1225 CMDQ_MODIFY_QP_MODIFY_MASK_DGID | 1226 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | 1227 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | 1228 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | 1229 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | 1230 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | 1231 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU | 1232 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN | 1233 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER | 1234 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC | 1235 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID); 1236 break; 1237 default: 1238 break; 1239 } 1240 } 1241 1242 static void __filter_modify_flags(struct bnxt_qplib_qp *qp) 1243 { 1244 switch (qp->cur_qp_state) { 1245 case CMDQ_MODIFY_QP_NEW_STATE_RESET: 1246 break; 1247 case CMDQ_MODIFY_QP_NEW_STATE_INIT: 1248 __modify_flags_from_init_state(qp); 1249 break; 1250 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1251 __modify_flags_from_rtr_state(qp); 1252 break; 1253 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1254 break; 1255 case CMDQ_MODIFY_QP_NEW_STATE_SQD: 1256 break; 1257 case CMDQ_MODIFY_QP_NEW_STATE_SQE: 1258 break; 1259 case CMDQ_MODIFY_QP_NEW_STATE_ERR: 1260 break; 1261 default: 1262 break; 1263 } 1264 } 1265 1266 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 1267 { 1268 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1269 struct creq_modify_qp_resp resp = {}; 1270 struct bnxt_qplib_cmdqmsg msg = {}; 1271 struct cmdq_modify_qp req = {}; 1272 u32 temp32[4]; 1273 u32 bmask; 1274 int rc; 1275 1276 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1277 CMDQ_BASE_OPCODE_MODIFY_QP, 1278 sizeof(req)); 1279 1280 /* Filter out the qp_attr_mask based on the state->new transition */ 1281 __filter_modify_flags(qp); 1282 bmask = qp->modify_flags; 1283 req.modify_mask = cpu_to_le32(qp->modify_flags); 1284 req.qp_cid = cpu_to_le32(qp->id); 1285 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { 1286 req.network_type_en_sqd_async_notify_new_state = 1287 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) | 1288 (qp->en_sqd_async_notify ? 1289 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0); 1290 } 1291 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type; 1292 1293 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) 1294 req.access = qp->access; 1295 1296 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) 1297 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); 1298 1299 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) 1300 req.qkey = cpu_to_le32(qp->qkey); 1301 1302 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) { 1303 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid)); 1304 req.dgid[0] = cpu_to_le32(temp32[0]); 1305 req.dgid[1] = cpu_to_le32(temp32[1]); 1306 req.dgid[2] = cpu_to_le32(temp32[2]); 1307 req.dgid[3] = cpu_to_le32(temp32[3]); 1308 } 1309 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) 1310 req.flow_label = cpu_to_le32(qp->ah.flow_label); 1311 1312 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) 1313 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id 1314 [qp->ah.sgid_index]); 1315 1316 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) 1317 req.hop_limit = qp->ah.hop_limit; 1318 1319 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) 1320 req.traffic_class = qp->ah.traffic_class; 1321 1322 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) 1323 memcpy(req.dest_mac, qp->ah.dmac, 6); 1324 1325 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) 1326 req.path_mtu_pingpong_push_enable |= qp->path_mtu; 1327 1328 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) 1329 req.timeout = qp->timeout; 1330 1331 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) 1332 req.retry_cnt = qp->retry_cnt; 1333 1334 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) 1335 req.rnr_retry = qp->rnr_retry; 1336 1337 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) 1338 req.min_rnr_timer = qp->min_rnr_timer; 1339 1340 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) 1341 req.rq_psn = cpu_to_le32(qp->rq.psn); 1342 1343 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) 1344 req.sq_psn = cpu_to_le32(qp->sq.psn); 1345 1346 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) 1347 req.max_rd_atomic = 1348 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 1349 1350 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) 1351 req.max_dest_rd_atomic = 1352 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic); 1353 1354 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements); 1355 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements); 1356 req.sq_sge = cpu_to_le16(qp->sq.max_sge); 1357 req.rq_sge = cpu_to_le16(qp->rq.max_sge); 1358 req.max_inline_data = cpu_to_le32(qp->max_inline_data); 1359 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID) 1360 req.dest_qp_id = cpu_to_le32(qp->dest_qpn); 1361 1362 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 1363 1364 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 1365 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1366 if (rc) 1367 return rc; 1368 qp->cur_qp_state = qp->state; 1369 return 0; 1370 } 1371 1372 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 1373 { 1374 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1375 struct creq_query_qp_resp resp = {}; 1376 struct bnxt_qplib_cmdqmsg msg = {}; 1377 struct bnxt_qplib_rcfw_sbuf sbuf; 1378 struct creq_query_qp_resp_sb *sb; 1379 struct cmdq_query_qp req = {}; 1380 u32 temp32[4]; 1381 int i, rc; 1382 1383 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 1384 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 1385 &sbuf.dma_addr, GFP_KERNEL); 1386 if (!sbuf.sb) 1387 return -ENOMEM; 1388 sb = sbuf.sb; 1389 1390 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1391 CMDQ_BASE_OPCODE_QUERY_QP, 1392 sizeof(req)); 1393 1394 req.qp_cid = cpu_to_le32(qp->id); 1395 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 1396 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 1397 sizeof(resp), 0); 1398 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1399 if (rc) 1400 goto bail; 1401 /* Extract the context from the side buffer */ 1402 qp->state = sb->en_sqd_async_notify_state & 1403 CREQ_QUERY_QP_RESP_SB_STATE_MASK; 1404 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state & 1405 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY; 1406 qp->access = sb->access; 1407 qp->pkey_index = le16_to_cpu(sb->pkey); 1408 qp->qkey = le32_to_cpu(sb->qkey); 1409 1410 temp32[0] = le32_to_cpu(sb->dgid[0]); 1411 temp32[1] = le32_to_cpu(sb->dgid[1]); 1412 temp32[2] = le32_to_cpu(sb->dgid[2]); 1413 temp32[3] = le32_to_cpu(sb->dgid[3]); 1414 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data)); 1415 1416 qp->ah.flow_label = le32_to_cpu(sb->flow_label); 1417 1418 qp->ah.sgid_index = 0; 1419 for (i = 0; i < res->sgid_tbl.max; i++) { 1420 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) { 1421 qp->ah.sgid_index = i; 1422 break; 1423 } 1424 } 1425 if (i == res->sgid_tbl.max) 1426 dev_warn(&res->pdev->dev, "SGID not found??\n"); 1427 1428 qp->ah.hop_limit = sb->hop_limit; 1429 qp->ah.traffic_class = sb->traffic_class; 1430 memcpy(qp->ah.dmac, sb->dest_mac, 6); 1431 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 1432 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >> 1433 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT; 1434 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 1435 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >> 1436 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT; 1437 qp->timeout = sb->timeout; 1438 qp->retry_cnt = sb->retry_cnt; 1439 qp->rnr_retry = sb->rnr_retry; 1440 qp->min_rnr_timer = sb->min_rnr_timer; 1441 qp->rq.psn = le32_to_cpu(sb->rq_psn); 1442 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic); 1443 qp->sq.psn = le32_to_cpu(sb->sq_psn); 1444 qp->max_dest_rd_atomic = 1445 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic); 1446 qp->sq.max_wqe = qp->sq.hwq.max_elements; 1447 qp->rq.max_wqe = qp->rq.hwq.max_elements; 1448 qp->sq.max_sge = le16_to_cpu(sb->sq_sge); 1449 qp->rq.max_sge = le16_to_cpu(sb->rq_sge); 1450 qp->max_inline_data = le32_to_cpu(sb->max_inline_data); 1451 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 1452 memcpy(qp->smac, sb->src_mac, 6); 1453 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 1454 bail: 1455 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 1456 sbuf.sb, sbuf.dma_addr); 1457 return rc; 1458 } 1459 1460 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) 1461 { 1462 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; 1463 u32 peek_flags, peek_cons; 1464 struct cq_base *hw_cqe; 1465 int i; 1466 1467 peek_flags = cq->dbinfo.flags; 1468 peek_cons = cq_hwq->cons; 1469 for (i = 0; i < cq_hwq->max_elements; i++) { 1470 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL); 1471 if (!CQE_CMP_VALID(hw_cqe, peek_flags)) 1472 continue; 1473 /* 1474 * The valid test of the entry must be done first before 1475 * reading any further. 1476 */ 1477 dma_rmb(); 1478 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { 1479 case CQ_BASE_CQE_TYPE_REQ: 1480 case CQ_BASE_CQE_TYPE_TERMINAL: 1481 { 1482 struct cq_req *cqe = (struct cq_req *)hw_cqe; 1483 1484 if (qp == le64_to_cpu(cqe->qp_handle)) 1485 cqe->qp_handle = 0; 1486 break; 1487 } 1488 case CQ_BASE_CQE_TYPE_RES_RC: 1489 case CQ_BASE_CQE_TYPE_RES_UD: 1490 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 1491 { 1492 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; 1493 1494 if (qp == le64_to_cpu(cqe->qp_handle)) 1495 cqe->qp_handle = 0; 1496 break; 1497 } 1498 default: 1499 break; 1500 } 1501 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons, 1502 1, &peek_flags); 1503 } 1504 } 1505 1506 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, 1507 struct bnxt_qplib_qp *qp) 1508 { 1509 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1510 struct creq_destroy_qp_resp resp = {}; 1511 struct bnxt_qplib_cmdqmsg msg = {}; 1512 struct cmdq_destroy_qp req = {}; 1513 u32 tbl_indx; 1514 int rc; 1515 1516 spin_lock_bh(&rcfw->tbl_lock); 1517 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 1518 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; 1519 rcfw->qp_tbl[tbl_indx].qp_handle = NULL; 1520 spin_unlock_bh(&rcfw->tbl_lock); 1521 1522 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1523 CMDQ_BASE_OPCODE_DESTROY_QP, 1524 sizeof(req)); 1525 1526 req.qp_cid = cpu_to_le32(qp->id); 1527 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 1528 sizeof(resp), 0); 1529 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1530 if (rc) { 1531 spin_lock_bh(&rcfw->tbl_lock); 1532 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 1533 rcfw->qp_tbl[tbl_indx].qp_handle = qp; 1534 spin_unlock_bh(&rcfw->tbl_lock); 1535 return rc; 1536 } 1537 1538 return 0; 1539 } 1540 1541 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, 1542 struct bnxt_qplib_qp *qp) 1543 { 1544 bnxt_qplib_free_qp_hdr_buf(res, qp); 1545 bnxt_qplib_free_hwq(res, &qp->sq.hwq); 1546 kfree(qp->sq.swq); 1547 1548 bnxt_qplib_free_hwq(res, &qp->rq.hwq); 1549 kfree(qp->rq.swq); 1550 1551 if (qp->irrq.max_elements) 1552 bnxt_qplib_free_hwq(res, &qp->irrq); 1553 if (qp->orrq.max_elements) 1554 bnxt_qplib_free_hwq(res, &qp->orrq); 1555 1556 } 1557 1558 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 1559 struct bnxt_qplib_sge *sge) 1560 { 1561 struct bnxt_qplib_q *sq = &qp->sq; 1562 u32 sw_prod; 1563 1564 memset(sge, 0, sizeof(*sge)); 1565 1566 if (qp->sq_hdr_buf) { 1567 sw_prod = sq->swq_start; 1568 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map + 1569 sw_prod * qp->sq_hdr_buf_size); 1570 sge->lkey = 0xFFFFFFFF; 1571 sge->size = qp->sq_hdr_buf_size; 1572 return qp->sq_hdr_buf + sw_prod * sge->size; 1573 } 1574 return NULL; 1575 } 1576 1577 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp) 1578 { 1579 struct bnxt_qplib_q *rq = &qp->rq; 1580 1581 return rq->swq_start; 1582 } 1583 1584 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index) 1585 { 1586 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size); 1587 } 1588 1589 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 1590 struct bnxt_qplib_sge *sge) 1591 { 1592 struct bnxt_qplib_q *rq = &qp->rq; 1593 u32 sw_prod; 1594 1595 memset(sge, 0, sizeof(*sge)); 1596 1597 if (qp->rq_hdr_buf) { 1598 sw_prod = rq->swq_start; 1599 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map + 1600 sw_prod * qp->rq_hdr_buf_size); 1601 sge->lkey = 0xFFFFFFFF; 1602 sge->size = qp->rq_hdr_buf_size; 1603 return qp->rq_hdr_buf + sw_prod * sge->size; 1604 } 1605 return NULL; 1606 } 1607 1608 /* Fil the MSN table into the next psn row */ 1609 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp, 1610 struct bnxt_qplib_swqe *wqe, 1611 struct bnxt_qplib_swq *swq) 1612 { 1613 struct sq_msn_search *msns; 1614 u32 start_psn, next_psn; 1615 u16 start_idx; 1616 1617 msns = (struct sq_msn_search *)swq->psn_search; 1618 msns->start_idx_next_psn_start_psn = 0; 1619 1620 start_psn = swq->start_psn; 1621 next_psn = swq->next_psn; 1622 start_idx = swq->slot_idx; 1623 msns->start_idx_next_psn_start_psn |= 1624 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn); 1625 qp->msn++; 1626 qp->msn %= qp->msn_tbl_sz; 1627 } 1628 1629 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, 1630 struct bnxt_qplib_swqe *wqe, 1631 struct bnxt_qplib_swq *swq) 1632 { 1633 struct sq_psn_search_ext *psns_ext; 1634 struct sq_psn_search *psns; 1635 u32 flg_npsn; 1636 u32 op_spsn; 1637 1638 if (!swq->psn_search) 1639 return; 1640 /* Handle MSN differently on cap flags */ 1641 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) { 1642 bnxt_qplib_fill_msn_search(qp, wqe, swq); 1643 return; 1644 } 1645 psns = (struct sq_psn_search *)swq->psn_search; 1646 psns = swq->psn_search; 1647 psns_ext = swq->psn_ext; 1648 1649 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) & 1650 SQ_PSN_SEARCH_START_PSN_MASK); 1651 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) & 1652 SQ_PSN_SEARCH_OPCODE_MASK); 1653 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) & 1654 SQ_PSN_SEARCH_NEXT_PSN_MASK); 1655 1656 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) { 1657 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn); 1658 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn); 1659 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx); 1660 } else { 1661 psns->opcode_start_psn = cpu_to_le32(op_spsn); 1662 psns->flags_next_psn = cpu_to_le32(flg_npsn); 1663 } 1664 } 1665 1666 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp, 1667 struct bnxt_qplib_swqe *wqe, 1668 u16 *idx) 1669 { 1670 struct bnxt_qplib_hwq *hwq; 1671 int len, t_len, offt; 1672 bool pull_dst = true; 1673 void *il_dst = NULL; 1674 void *il_src = NULL; 1675 int t_cplen, cplen; 1676 int indx; 1677 1678 hwq = &qp->sq.hwq; 1679 t_len = 0; 1680 for (indx = 0; indx < wqe->num_sge; indx++) { 1681 len = wqe->sg_list[indx].size; 1682 il_src = (void *)wqe->sg_list[indx].addr; 1683 t_len += len; 1684 if (t_len > qp->max_inline_data) 1685 return -ENOMEM; 1686 while (len) { 1687 if (pull_dst) { 1688 pull_dst = false; 1689 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx); 1690 (*idx)++; 1691 t_cplen = 0; 1692 offt = 0; 1693 } 1694 cplen = min_t(int, len, sizeof(struct sq_sge)); 1695 cplen = min_t(int, cplen, 1696 (sizeof(struct sq_sge) - offt)); 1697 memcpy(il_dst, il_src, cplen); 1698 t_cplen += cplen; 1699 il_src += cplen; 1700 il_dst += cplen; 1701 offt += cplen; 1702 len -= cplen; 1703 if (t_cplen == sizeof(struct sq_sge)) 1704 pull_dst = true; 1705 } 1706 } 1707 1708 return t_len; 1709 } 1710 1711 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq, 1712 struct bnxt_qplib_sge *ssge, 1713 u16 nsge, u16 *idx) 1714 { 1715 struct sq_sge *dsge; 1716 int indx, len = 0; 1717 1718 for (indx = 0; indx < nsge; indx++, (*idx)++) { 1719 dsge = bnxt_qplib_get_prod_qe(hwq, *idx); 1720 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr); 1721 dsge->l_key = cpu_to_le32(ssge[indx].lkey); 1722 dsge->size = cpu_to_le32(ssge[indx].size); 1723 len += ssge[indx].size; 1724 } 1725 1726 return len; 1727 } 1728 1729 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp, 1730 struct bnxt_qplib_swqe *wqe, 1731 u16 *wqe_sz, u16 *qdf, u8 mode) 1732 { 1733 u32 ilsize, bytes; 1734 u16 nsge; 1735 u16 slot; 1736 1737 nsge = wqe->num_sge; 1738 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */ 1739 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge); 1740 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) { 1741 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data); 1742 bytes = ALIGN(ilsize, sizeof(struct sq_sge)); 1743 bytes += sizeof(struct sq_send_hdr); 1744 } 1745 1746 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes); 1747 slot = bytes >> 4; 1748 *wqe_sz = slot; 1749 if (mode == BNXT_QPLIB_WQE_MODE_STATIC) 1750 slot = 8; 1751 return slot; 1752 } 1753 1754 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq, 1755 struct bnxt_qplib_swq *swq, bool hw_retx) 1756 { 1757 struct bnxt_qplib_hwq *hwq; 1758 u32 pg_num, pg_indx; 1759 void *buff; 1760 u32 tail; 1761 1762 hwq = &sq->hwq; 1763 if (!hwq->pad_pg) 1764 return; 1765 tail = swq->slot_idx / sq->dbinfo.max_slot; 1766 if (hw_retx) { 1767 /* For HW retx use qp msn index */ 1768 tail = qp->msn; 1769 tail %= qp->msn_tbl_sz; 1770 } 1771 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride); 1772 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride); 1773 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride); 1774 swq->psn_ext = buff; 1775 swq->psn_search = buff; 1776 } 1777 1778 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) 1779 { 1780 struct bnxt_qplib_q *sq = &qp->sq; 1781 1782 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ); 1783 } 1784 1785 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 1786 struct bnxt_qplib_swqe *wqe) 1787 { 1788 struct bnxt_qplib_nq_work *nq_work = NULL; 1789 int i, rc = 0, data_len = 0, pkt_num = 0; 1790 struct bnxt_qplib_q *sq = &qp->sq; 1791 struct bnxt_qplib_hwq *hwq; 1792 struct bnxt_qplib_swq *swq; 1793 bool sch_handler = false; 1794 u16 wqe_sz, qdf = 0; 1795 bool msn_update; 1796 void *base_hdr; 1797 void *ext_hdr; 1798 __le32 temp32; 1799 u32 wqe_idx; 1800 u32 slots; 1801 u16 idx; 1802 1803 hwq = &sq->hwq; 1804 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS && 1805 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1806 dev_err(&hwq->pdev->dev, 1807 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 1808 qp->id, qp->state); 1809 rc = -EINVAL; 1810 goto done; 1811 } 1812 1813 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode); 1814 if (bnxt_qplib_queue_full(sq, slots + qdf)) { 1815 dev_err(&hwq->pdev->dev, 1816 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n", 1817 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta); 1818 rc = -ENOMEM; 1819 goto done; 1820 } 1821 1822 swq = bnxt_qplib_get_swqe(sq, &wqe_idx); 1823 bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags)); 1824 1825 idx = 0; 1826 swq->slot_idx = hwq->prod; 1827 swq->slots = slots; 1828 swq->wr_id = wqe->wr_id; 1829 swq->type = wqe->type; 1830 swq->flags = wqe->flags; 1831 swq->start_psn = sq->psn & BTH_PSN_MASK; 1832 if (qp->sig_type) 1833 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP; 1834 1835 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1836 sch_handler = true; 1837 dev_dbg(&hwq->pdev->dev, 1838 "%s Error QP. Scheduling for poll_cq\n", __func__); 1839 goto queue_err; 1840 } 1841 1842 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 1843 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 1844 memset(base_hdr, 0, sizeof(struct sq_sge)); 1845 memset(ext_hdr, 0, sizeof(struct sq_sge)); 1846 1847 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) 1848 /* Copy the inline data */ 1849 data_len = bnxt_qplib_put_inline(qp, wqe, &idx); 1850 else 1851 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, 1852 &idx); 1853 if (data_len < 0) 1854 goto queue_err; 1855 /* Make sure we update MSN table only for wired wqes */ 1856 msn_update = true; 1857 /* Specifics */ 1858 switch (wqe->type) { 1859 case BNXT_QPLIB_SWQE_TYPE_SEND: 1860 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) { 1861 struct sq_send_raweth_qp1_hdr *sqe = base_hdr; 1862 struct sq_raw_ext_hdr *ext_sqe = ext_hdr; 1863 /* Assemble info for Raw Ethertype QPs */ 1864 1865 sqe->wqe_type = wqe->type; 1866 sqe->flags = wqe->flags; 1867 sqe->wqe_size = wqe_sz; 1868 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); 1869 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); 1870 sqe->length = cpu_to_le32(data_len); 1871 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & 1872 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) << 1873 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT); 1874 1875 break; 1876 } 1877 fallthrough; 1878 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: 1879 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: 1880 { 1881 struct sq_ud_ext_hdr *ext_sqe = ext_hdr; 1882 struct sq_send_hdr *sqe = base_hdr; 1883 1884 sqe->wqe_type = wqe->type; 1885 sqe->flags = wqe->flags; 1886 sqe->wqe_size = wqe_sz; 1887 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key); 1888 if (qp->type == CMDQ_CREATE_QP_TYPE_UD || 1889 qp->type == CMDQ_CREATE_QP_TYPE_GSI) { 1890 sqe->q_key = cpu_to_le32(wqe->send.q_key); 1891 sqe->length = cpu_to_le32(data_len); 1892 sq->psn = (sq->psn + 1) & BTH_PSN_MASK; 1893 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp & 1894 SQ_SEND_DST_QP_MASK); 1895 ext_sqe->avid = cpu_to_le32(wqe->send.avid & 1896 SQ_SEND_AVID_MASK); 1897 msn_update = false; 1898 } else { 1899 sqe->length = cpu_to_le32(data_len); 1900 if (qp->mtu) 1901 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1902 if (!pkt_num) 1903 pkt_num = 1; 1904 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1905 } 1906 break; 1907 } 1908 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: 1909 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: 1910 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: 1911 { 1912 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr; 1913 struct sq_rdma_hdr *sqe = base_hdr; 1914 1915 sqe->wqe_type = wqe->type; 1916 sqe->flags = wqe->flags; 1917 sqe->wqe_size = wqe_sz; 1918 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); 1919 sqe->length = cpu_to_le32((u32)data_len); 1920 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); 1921 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); 1922 if (qp->mtu) 1923 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1924 if (!pkt_num) 1925 pkt_num = 1; 1926 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1927 break; 1928 } 1929 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: 1930 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: 1931 { 1932 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr; 1933 struct sq_atomic_hdr *sqe = base_hdr; 1934 1935 sqe->wqe_type = wqe->type; 1936 sqe->flags = wqe->flags; 1937 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key); 1938 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va); 1939 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data); 1940 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data); 1941 if (qp->mtu) 1942 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1943 if (!pkt_num) 1944 pkt_num = 1; 1945 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1946 break; 1947 } 1948 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: 1949 { 1950 struct sq_localinvalidate *sqe = base_hdr; 1951 1952 sqe->wqe_type = wqe->type; 1953 sqe->flags = wqe->flags; 1954 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key); 1955 msn_update = false; 1956 break; 1957 } 1958 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR: 1959 { 1960 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr; 1961 struct sq_fr_pmr_hdr *sqe = base_hdr; 1962 1963 sqe->wqe_type = wqe->type; 1964 sqe->flags = wqe->flags; 1965 sqe->access_cntl = wqe->frmr.access_cntl | 1966 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; 1967 sqe->zero_based_page_size_log = 1968 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) << 1969 SQ_FR_PMR_PAGE_SIZE_LOG_SFT | 1970 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0); 1971 sqe->l_key = cpu_to_le32(wqe->frmr.l_key); 1972 temp32 = cpu_to_le32(wqe->frmr.length); 1973 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length)); 1974 sqe->numlevels_pbl_page_size_log = 1975 ((wqe->frmr.pbl_pg_sz_log << 1976 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) & 1977 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) | 1978 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) & 1979 SQ_FR_PMR_NUMLEVELS_MASK); 1980 1981 for (i = 0; i < wqe->frmr.page_list_len; i++) 1982 wqe->frmr.pbl_ptr[i] = cpu_to_le64( 1983 wqe->frmr.page_list[i] | 1984 PTU_PTE_VALID); 1985 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr); 1986 ext_sqe->va = cpu_to_le64(wqe->frmr.va); 1987 msn_update = false; 1988 1989 break; 1990 } 1991 case BNXT_QPLIB_SWQE_TYPE_BIND_MW: 1992 { 1993 struct sq_bind_ext_hdr *ext_sqe = ext_hdr; 1994 struct sq_bind_hdr *sqe = base_hdr; 1995 1996 sqe->wqe_type = wqe->type; 1997 sqe->flags = wqe->flags; 1998 sqe->access_cntl = wqe->bind.access_cntl; 1999 sqe->mw_type_zero_based = wqe->bind.mw_type | 2000 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0); 2001 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key); 2002 sqe->l_key = cpu_to_le32(wqe->bind.r_key); 2003 ext_sqe->va = cpu_to_le64(wqe->bind.va); 2004 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length); 2005 msn_update = false; 2006 break; 2007 } 2008 default: 2009 /* Bad wqe, return error */ 2010 rc = -EINVAL; 2011 goto done; 2012 } 2013 if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) { 2014 swq->next_psn = sq->psn & BTH_PSN_MASK; 2015 bnxt_qplib_fill_psn_search(qp, wqe, swq); 2016 } 2017 queue_err: 2018 bnxt_qplib_swq_mod_start(sq, wqe_idx); 2019 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots); 2020 qp->wqe_cnt++; 2021 done: 2022 if (sch_handler) { 2023 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); 2024 if (nq_work) { 2025 nq_work->cq = qp->scq; 2026 nq_work->nq = qp->scq->nq; 2027 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); 2028 queue_work(qp->scq->nq->cqn_wq, &nq_work->work); 2029 } else { 2030 dev_err(&hwq->pdev->dev, 2031 "FP: Failed to allocate SQ nq_work!\n"); 2032 rc = -ENOMEM; 2033 } 2034 } 2035 return rc; 2036 } 2037 2038 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) 2039 { 2040 struct bnxt_qplib_q *rq = &qp->rq; 2041 2042 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ); 2043 } 2044 2045 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 2046 struct bnxt_qplib_swqe *wqe) 2047 { 2048 struct bnxt_qplib_nq_work *nq_work = NULL; 2049 struct bnxt_qplib_q *rq = &qp->rq; 2050 struct rq_wqe_hdr *base_hdr; 2051 struct rq_ext_hdr *ext_hdr; 2052 struct bnxt_qplib_hwq *hwq; 2053 struct bnxt_qplib_swq *swq; 2054 bool sch_handler = false; 2055 u16 wqe_sz, idx; 2056 u32 wqe_idx; 2057 int rc = 0; 2058 2059 hwq = &rq->hwq; 2060 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 2061 dev_err(&hwq->pdev->dev, 2062 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 2063 qp->id, qp->state); 2064 rc = -EINVAL; 2065 goto done; 2066 } 2067 2068 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) { 2069 dev_err(&hwq->pdev->dev, 2070 "FP: QP (0x%x) RQ is full!\n", qp->id); 2071 rc = -EINVAL; 2072 goto done; 2073 } 2074 2075 swq = bnxt_qplib_get_swqe(rq, &wqe_idx); 2076 swq->wr_id = wqe->wr_id; 2077 swq->slots = rq->dbinfo.max_slot; 2078 2079 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 2080 sch_handler = true; 2081 dev_dbg(&hwq->pdev->dev, 2082 "%s: Error QP. Scheduling for poll_cq\n", __func__); 2083 goto queue_err; 2084 } 2085 2086 idx = 0; 2087 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 2088 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 2089 memset(base_hdr, 0, sizeof(struct sq_sge)); 2090 memset(ext_hdr, 0, sizeof(struct sq_sge)); 2091 wqe_sz = (sizeof(struct rq_wqe_hdr) + 2092 wqe->num_sge * sizeof(struct sq_sge)) >> 4; 2093 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx); 2094 if (!wqe->num_sge) { 2095 struct sq_sge *sge; 2096 2097 sge = bnxt_qplib_get_prod_qe(hwq, idx++); 2098 sge->size = 0; 2099 wqe_sz++; 2100 } 2101 base_hdr->wqe_type = wqe->type; 2102 base_hdr->flags = wqe->flags; 2103 base_hdr->wqe_size = wqe_sz; 2104 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx); 2105 queue_err: 2106 bnxt_qplib_swq_mod_start(rq, wqe_idx); 2107 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots); 2108 done: 2109 if (sch_handler) { 2110 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); 2111 if (nq_work) { 2112 nq_work->cq = qp->rcq; 2113 nq_work->nq = qp->rcq->nq; 2114 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); 2115 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work); 2116 } else { 2117 dev_err(&hwq->pdev->dev, 2118 "FP: Failed to allocate RQ nq_work!\n"); 2119 rc = -ENOMEM; 2120 } 2121 } 2122 2123 return rc; 2124 } 2125 2126 /* CQ */ 2127 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 2128 { 2129 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2130 struct bnxt_qplib_hwq_attr hwq_attr = {}; 2131 struct creq_create_cq_resp resp = {}; 2132 struct bnxt_qplib_cmdqmsg msg = {}; 2133 struct cmdq_create_cq req = {}; 2134 struct bnxt_qplib_pbl *pbl; 2135 u32 pg_sz_lvl; 2136 int rc; 2137 2138 if (!cq->dpi) { 2139 dev_err(&rcfw->pdev->dev, 2140 "FP: CREATE_CQ failed due to NULL DPI\n"); 2141 return -EINVAL; 2142 } 2143 2144 cq->dbinfo.flags = 0; 2145 hwq_attr.res = res; 2146 hwq_attr.depth = cq->max_wqe; 2147 hwq_attr.stride = sizeof(struct cq_base); 2148 hwq_attr.type = HWQ_TYPE_QUEUE; 2149 hwq_attr.sginfo = &cq->sg_info; 2150 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr); 2151 if (rc) 2152 return rc; 2153 2154 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2155 CMDQ_BASE_OPCODE_CREATE_CQ, 2156 sizeof(req)); 2157 2158 req.dpi = cpu_to_le32(cq->dpi->dpi); 2159 req.cq_handle = cpu_to_le64(cq->cq_handle); 2160 req.cq_size = cpu_to_le32(cq->max_wqe); 2161 pbl = &cq->hwq.pbl[PBL_LVL_0]; 2162 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) << 2163 CMDQ_CREATE_CQ_PG_SIZE_SFT); 2164 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK); 2165 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl); 2166 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 2167 req.cq_fco_cnq_id = cpu_to_le32( 2168 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 2169 CMDQ_CREATE_CQ_CNQ_ID_SFT); 2170 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2171 sizeof(resp), 0); 2172 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2173 if (rc) 2174 goto fail; 2175 2176 cq->id = le32_to_cpu(resp.xid); 2177 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 2178 init_waitqueue_head(&cq->waitq); 2179 INIT_LIST_HEAD(&cq->sqf_head); 2180 INIT_LIST_HEAD(&cq->rqf_head); 2181 spin_lock_init(&cq->compl_lock); 2182 spin_lock_init(&cq->flush_lock); 2183 2184 cq->dbinfo.hwq = &cq->hwq; 2185 cq->dbinfo.xid = cq->id; 2186 cq->dbinfo.db = cq->dpi->dbr; 2187 cq->dbinfo.priv_db = res->dpi_tbl.priv_db; 2188 2189 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); 2190 2191 return 0; 2192 2193 fail: 2194 bnxt_qplib_free_hwq(res, &cq->hwq); 2195 return rc; 2196 } 2197 2198 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, 2199 struct bnxt_qplib_cq *cq) 2200 { 2201 bnxt_qplib_free_hwq(res, &cq->hwq); 2202 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq)); 2203 /* Reset only the cons bit in the flags */ 2204 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT); 2205 } 2206 2207 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, 2208 int new_cqes) 2209 { 2210 struct bnxt_qplib_hwq_attr hwq_attr = {}; 2211 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2212 struct creq_resize_cq_resp resp = {}; 2213 struct bnxt_qplib_cmdqmsg msg = {}; 2214 struct cmdq_resize_cq req = {}; 2215 struct bnxt_qplib_pbl *pbl; 2216 u32 pg_sz, lvl, new_sz; 2217 int rc; 2218 2219 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2220 CMDQ_BASE_OPCODE_RESIZE_CQ, 2221 sizeof(req)); 2222 hwq_attr.sginfo = &cq->sg_info; 2223 hwq_attr.res = res; 2224 hwq_attr.depth = new_cqes; 2225 hwq_attr.stride = sizeof(struct cq_base); 2226 hwq_attr.type = HWQ_TYPE_QUEUE; 2227 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr); 2228 if (rc) 2229 return rc; 2230 2231 req.cq_cid = cpu_to_le32(cq->id); 2232 pbl = &cq->resize_hwq.pbl[PBL_LVL_0]; 2233 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq); 2234 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) & 2235 CMDQ_RESIZE_CQ_LVL_MASK; 2236 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) & 2237 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK; 2238 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl); 2239 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 2240 2241 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2242 sizeof(resp), 0); 2243 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2244 return rc; 2245 } 2246 2247 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 2248 { 2249 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2250 struct creq_destroy_cq_resp resp = {}; 2251 struct bnxt_qplib_cmdqmsg msg = {}; 2252 struct cmdq_destroy_cq req = {}; 2253 u16 total_cnq_events; 2254 int rc; 2255 2256 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2257 CMDQ_BASE_OPCODE_DESTROY_CQ, 2258 sizeof(req)); 2259 2260 req.cq_cid = cpu_to_le32(cq->id); 2261 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2262 sizeof(resp), 0); 2263 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2264 if (rc) 2265 return rc; 2266 total_cnq_events = le16_to_cpu(resp.total_cnq_events); 2267 __wait_for_all_nqes(cq, total_cnq_events); 2268 bnxt_qplib_free_hwq(res, &cq->hwq); 2269 return 0; 2270 } 2271 2272 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, 2273 struct bnxt_qplib_cqe **pcqe, int *budget) 2274 { 2275 struct bnxt_qplib_cqe *cqe; 2276 u32 start, last; 2277 int rc = 0; 2278 2279 /* Now complete all outstanding SQEs with FLUSHED_ERR */ 2280 start = sq->swq_start; 2281 cqe = *pcqe; 2282 while (*budget) { 2283 last = sq->swq_last; 2284 if (start == last) 2285 break; 2286 /* Skip the FENCE WQE completions */ 2287 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) { 2288 bnxt_qplib_cancel_phantom_processing(qp); 2289 goto skip_compl; 2290 } 2291 memset(cqe, 0, sizeof(*cqe)); 2292 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; 2293 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2294 cqe->qp_handle = (u64)(unsigned long)qp; 2295 cqe->wr_id = sq->swq[last].wr_id; 2296 cqe->src_qp = qp->id; 2297 cqe->type = sq->swq[last].type; 2298 cqe++; 2299 (*budget)--; 2300 skip_compl: 2301 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons, 2302 sq->swq[last].slots, &sq->dbinfo.flags); 2303 sq->swq_last = sq->swq[last].next_idx; 2304 } 2305 *pcqe = cqe; 2306 if (!(*budget) && sq->swq_last != start) 2307 /* Out of budget */ 2308 rc = -EAGAIN; 2309 2310 return rc; 2311 } 2312 2313 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, 2314 struct bnxt_qplib_cqe **pcqe, int *budget) 2315 { 2316 struct bnxt_qplib_cqe *cqe; 2317 u32 start, last; 2318 int opcode = 0; 2319 int rc = 0; 2320 2321 switch (qp->type) { 2322 case CMDQ_CREATE_QP1_TYPE_GSI: 2323 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1; 2324 break; 2325 case CMDQ_CREATE_QP_TYPE_RC: 2326 opcode = CQ_BASE_CQE_TYPE_RES_RC; 2327 break; 2328 case CMDQ_CREATE_QP_TYPE_UD: 2329 case CMDQ_CREATE_QP_TYPE_GSI: 2330 opcode = CQ_BASE_CQE_TYPE_RES_UD; 2331 break; 2332 } 2333 2334 /* Flush the rest of the RQ */ 2335 start = rq->swq_start; 2336 cqe = *pcqe; 2337 while (*budget) { 2338 last = rq->swq_last; 2339 if (last == start) 2340 break; 2341 memset(cqe, 0, sizeof(*cqe)); 2342 cqe->status = 2343 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR; 2344 cqe->opcode = opcode; 2345 cqe->qp_handle = (unsigned long)qp; 2346 cqe->wr_id = rq->swq[last].wr_id; 2347 cqe++; 2348 (*budget)--; 2349 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2350 rq->swq[last].slots, &rq->dbinfo.flags); 2351 rq->swq_last = rq->swq[last].next_idx; 2352 } 2353 *pcqe = cqe; 2354 if (!*budget && rq->swq_last != start) 2355 /* Out of budget */ 2356 rc = -EAGAIN; 2357 2358 return rc; 2359 } 2360 2361 void bnxt_qplib_mark_qp_error(void *qp_handle) 2362 { 2363 struct bnxt_qplib_qp *qp = qp_handle; 2364 2365 if (!qp) 2366 return; 2367 2368 /* Must block new posting of SQ and RQ */ 2369 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2370 bnxt_qplib_cancel_phantom_processing(qp); 2371 } 2372 2373 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2374 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 2375 */ 2376 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, 2377 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons) 2378 { 2379 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags; 2380 struct bnxt_qplib_q *sq = &qp->sq; 2381 struct cq_req *peek_req_hwcqe; 2382 struct bnxt_qplib_qp *peek_qp; 2383 struct bnxt_qplib_q *peek_sq; 2384 struct bnxt_qplib_swq *swq; 2385 struct cq_base *peek_hwcqe; 2386 int i, rc = 0; 2387 2388 /* Normal mode */ 2389 /* Check for the psn_search marking before completing */ 2390 swq = &sq->swq[swq_last]; 2391 if (swq->psn_search && 2392 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { 2393 /* Unmark */ 2394 swq->psn_search->flags_next_psn = cpu_to_le32 2395 (le32_to_cpu(swq->psn_search->flags_next_psn) 2396 & ~0x80000000); 2397 dev_dbg(&cq->hwq.pdev->dev, 2398 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", 2399 cq_cons, qp->id, swq_last, cqe_sq_cons); 2400 sq->condition = true; 2401 sq->send_phantom = true; 2402 2403 /* TODO: Only ARM if the previous SQE is ARMALL */ 2404 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL); 2405 rc = -EAGAIN; 2406 goto out; 2407 } 2408 if (sq->condition) { 2409 /* Peek at the completions */ 2410 peek_flags = cq->dbinfo.flags; 2411 peek_sw_cq_cons = cq_cons; 2412 i = cq->hwq.max_elements; 2413 while (i--) { 2414 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq, 2415 peek_sw_cq_cons, NULL); 2416 /* If the next hwcqe is VALID */ 2417 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) { 2418 /* 2419 * The valid test of the entry must be done first before 2420 * reading any further. 2421 */ 2422 dma_rmb(); 2423 /* If the next hwcqe is a REQ */ 2424 if ((peek_hwcqe->cqe_type_toggle & 2425 CQ_BASE_CQE_TYPE_MASK) == 2426 CQ_BASE_CQE_TYPE_REQ) { 2427 peek_req_hwcqe = (struct cq_req *) 2428 peek_hwcqe; 2429 peek_qp = (struct bnxt_qplib_qp *) 2430 ((unsigned long) 2431 le64_to_cpu 2432 (peek_req_hwcqe->qp_handle)); 2433 peek_sq = &peek_qp->sq; 2434 peek_sq_cons_idx = 2435 ((le16_to_cpu( 2436 peek_req_hwcqe->sq_cons_idx) 2437 - 1) % sq->max_wqe); 2438 /* If the hwcqe's sq's wr_id matches */ 2439 if (peek_sq == sq && 2440 sq->swq[peek_sq_cons_idx].wr_id == 2441 BNXT_QPLIB_FENCE_WRID) { 2442 /* 2443 * Unbreak only if the phantom 2444 * comes back 2445 */ 2446 dev_dbg(&cq->hwq.pdev->dev, 2447 "FP: Got Phantom CQE\n"); 2448 sq->condition = false; 2449 sq->single = true; 2450 rc = 0; 2451 goto out; 2452 } 2453 } 2454 /* Valid but not the phantom, so keep looping */ 2455 } else { 2456 /* Not valid yet, just exit and wait */ 2457 rc = -EINVAL; 2458 goto out; 2459 } 2460 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, 2461 &peek_sw_cq_cons, 2462 1, &peek_flags); 2463 } 2464 dev_err(&cq->hwq.pdev->dev, 2465 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n", 2466 cq_cons, qp->id, swq_last, cqe_sq_cons); 2467 rc = -EINVAL; 2468 } 2469 out: 2470 return rc; 2471 } 2472 2473 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 2474 struct cq_req *hwcqe, 2475 struct bnxt_qplib_cqe **pcqe, int *budget, 2476 u32 cq_cons, struct bnxt_qplib_qp **lib_qp) 2477 { 2478 struct bnxt_qplib_swq *swq; 2479 struct bnxt_qplib_cqe *cqe; 2480 struct bnxt_qplib_qp *qp; 2481 struct bnxt_qplib_q *sq; 2482 u32 cqe_sq_cons; 2483 int rc = 0; 2484 2485 qp = (struct bnxt_qplib_qp *)((unsigned long) 2486 le64_to_cpu(hwcqe->qp_handle)); 2487 if (!qp) { 2488 dev_err(&cq->hwq.pdev->dev, 2489 "FP: Process Req qp is NULL\n"); 2490 return -EINVAL; 2491 } 2492 sq = &qp->sq; 2493 2494 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe; 2495 if (qp->sq.flushed) { 2496 dev_dbg(&cq->hwq.pdev->dev, 2497 "%s: QP in Flush QP = %p\n", __func__, qp); 2498 goto done; 2499 } 2500 /* Require to walk the sq's swq to fabricate CQEs for all previously 2501 * signaled SWQEs due to CQE aggregation from the current sq cons 2502 * to the cqe_sq_cons 2503 */ 2504 cqe = *pcqe; 2505 while (*budget) { 2506 if (sq->swq_last == cqe_sq_cons) 2507 /* Done */ 2508 break; 2509 2510 swq = &sq->swq[sq->swq_last]; 2511 memset(cqe, 0, sizeof(*cqe)); 2512 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2513 cqe->qp_handle = (u64)(unsigned long)qp; 2514 cqe->src_qp = qp->id; 2515 cqe->wr_id = swq->wr_id; 2516 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) 2517 goto skip; 2518 cqe->type = swq->type; 2519 2520 /* For the last CQE, check for status. For errors, regardless 2521 * of the request being signaled or not, it must complete with 2522 * the hwcqe error status 2523 */ 2524 if (swq->next_idx == cqe_sq_cons && 2525 hwcqe->status != CQ_REQ_STATUS_OK) { 2526 cqe->status = hwcqe->status; 2527 dev_err(&cq->hwq.pdev->dev, 2528 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n", 2529 sq->swq_last, cqe->wr_id, cqe->status); 2530 cqe++; 2531 (*budget)--; 2532 bnxt_qplib_mark_qp_error(qp); 2533 /* Add qp to flush list of the CQ */ 2534 bnxt_qplib_add_flush_qp(qp); 2535 } else { 2536 /* Before we complete, do WA 9060 */ 2537 if (do_wa9060(qp, cq, cq_cons, sq->swq_last, 2538 cqe_sq_cons)) { 2539 *lib_qp = qp; 2540 goto out; 2541 } 2542 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2543 cqe->status = CQ_REQ_STATUS_OK; 2544 cqe++; 2545 (*budget)--; 2546 } 2547 } 2548 skip: 2549 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons, 2550 swq->slots, &sq->dbinfo.flags); 2551 sq->swq_last = swq->next_idx; 2552 if (sq->single) 2553 break; 2554 } 2555 out: 2556 *pcqe = cqe; 2557 if (sq->swq_last != cqe_sq_cons) { 2558 /* Out of budget */ 2559 rc = -EAGAIN; 2560 goto done; 2561 } 2562 /* 2563 * Back to normal completion mode only after it has completed all of 2564 * the WC for this CQE 2565 */ 2566 sq->single = false; 2567 done: 2568 return rc; 2569 } 2570 2571 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag) 2572 { 2573 spin_lock(&srq->hwq.lock); 2574 srq->swq[srq->last_idx].next_idx = (int)tag; 2575 srq->last_idx = (int)tag; 2576 srq->swq[srq->last_idx].next_idx = -1; 2577 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons, 2578 srq->dbinfo.max_slot, &srq->dbinfo.flags); 2579 spin_unlock(&srq->hwq.lock); 2580 } 2581 2582 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, 2583 struct cq_res_rc *hwcqe, 2584 struct bnxt_qplib_cqe **pcqe, 2585 int *budget) 2586 { 2587 struct bnxt_qplib_srq *srq; 2588 struct bnxt_qplib_cqe *cqe; 2589 struct bnxt_qplib_qp *qp; 2590 struct bnxt_qplib_q *rq; 2591 u32 wr_id_idx; 2592 2593 qp = (struct bnxt_qplib_qp *)((unsigned long) 2594 le64_to_cpu(hwcqe->qp_handle)); 2595 if (!qp) { 2596 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n"); 2597 return -EINVAL; 2598 } 2599 if (qp->rq.flushed) { 2600 dev_dbg(&cq->hwq.pdev->dev, 2601 "%s: QP in Flush QP = %p\n", __func__, qp); 2602 return 0; 2603 } 2604 2605 cqe = *pcqe; 2606 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2607 cqe->length = le32_to_cpu(hwcqe->length); 2608 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key); 2609 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle); 2610 cqe->flags = le16_to_cpu(hwcqe->flags); 2611 cqe->status = hwcqe->status; 2612 cqe->qp_handle = (u64)(unsigned long)qp; 2613 2614 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) & 2615 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK; 2616 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { 2617 srq = qp->srq; 2618 if (!srq) 2619 return -EINVAL; 2620 if (wr_id_idx >= srq->hwq.max_elements) { 2621 dev_err(&cq->hwq.pdev->dev, 2622 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2623 wr_id_idx, srq->hwq.max_elements); 2624 return -EINVAL; 2625 } 2626 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2627 bnxt_qplib_release_srqe(srq, wr_id_idx); 2628 cqe++; 2629 (*budget)--; 2630 *pcqe = cqe; 2631 } else { 2632 struct bnxt_qplib_swq *swq; 2633 2634 rq = &qp->rq; 2635 if (wr_id_idx > (rq->max_wqe - 1)) { 2636 dev_err(&cq->hwq.pdev->dev, 2637 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n", 2638 wr_id_idx, rq->max_wqe); 2639 return -EINVAL; 2640 } 2641 if (wr_id_idx != rq->swq_last) 2642 return -EINVAL; 2643 swq = &rq->swq[rq->swq_last]; 2644 cqe->wr_id = swq->wr_id; 2645 cqe++; 2646 (*budget)--; 2647 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2648 swq->slots, &rq->dbinfo.flags); 2649 rq->swq_last = swq->next_idx; 2650 *pcqe = cqe; 2651 2652 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2653 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2654 /* Add qp to flush list of the CQ */ 2655 bnxt_qplib_add_flush_qp(qp); 2656 } 2657 } 2658 2659 return 0; 2660 } 2661 2662 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, 2663 struct cq_res_ud *hwcqe, 2664 struct bnxt_qplib_cqe **pcqe, 2665 int *budget) 2666 { 2667 struct bnxt_qplib_srq *srq; 2668 struct bnxt_qplib_cqe *cqe; 2669 struct bnxt_qplib_qp *qp; 2670 struct bnxt_qplib_q *rq; 2671 u32 wr_id_idx; 2672 2673 qp = (struct bnxt_qplib_qp *)((unsigned long) 2674 le64_to_cpu(hwcqe->qp_handle)); 2675 if (!qp) { 2676 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n"); 2677 return -EINVAL; 2678 } 2679 if (qp->rq.flushed) { 2680 dev_dbg(&cq->hwq.pdev->dev, 2681 "%s: QP in Flush QP = %p\n", __func__, qp); 2682 return 0; 2683 } 2684 cqe = *pcqe; 2685 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2686 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK; 2687 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata); 2688 cqe->invrkey = le32_to_cpu(hwcqe->imm_data); 2689 cqe->flags = le16_to_cpu(hwcqe->flags); 2690 cqe->status = hwcqe->status; 2691 cqe->qp_handle = (u64)(unsigned long)qp; 2692 /*FIXME: Endianness fix needed for smace */ 2693 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN); 2694 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) 2695 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK; 2696 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) | 2697 ((le32_to_cpu( 2698 hwcqe->src_qp_high_srq_or_rq_wr_id) & 2699 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8); 2700 2701 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { 2702 srq = qp->srq; 2703 if (!srq) 2704 return -EINVAL; 2705 2706 if (wr_id_idx >= srq->hwq.max_elements) { 2707 dev_err(&cq->hwq.pdev->dev, 2708 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2709 wr_id_idx, srq->hwq.max_elements); 2710 return -EINVAL; 2711 } 2712 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2713 bnxt_qplib_release_srqe(srq, wr_id_idx); 2714 cqe++; 2715 (*budget)--; 2716 *pcqe = cqe; 2717 } else { 2718 struct bnxt_qplib_swq *swq; 2719 2720 rq = &qp->rq; 2721 if (wr_id_idx > (rq->max_wqe - 1)) { 2722 dev_err(&cq->hwq.pdev->dev, 2723 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n", 2724 wr_id_idx, rq->max_wqe); 2725 return -EINVAL; 2726 } 2727 2728 if (rq->swq_last != wr_id_idx) 2729 return -EINVAL; 2730 swq = &rq->swq[rq->swq_last]; 2731 cqe->wr_id = swq->wr_id; 2732 cqe++; 2733 (*budget)--; 2734 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2735 swq->slots, &rq->dbinfo.flags); 2736 rq->swq_last = swq->next_idx; 2737 *pcqe = cqe; 2738 2739 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2740 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2741 /* Add qp to flush list of the CQ */ 2742 bnxt_qplib_add_flush_qp(qp); 2743 } 2744 } 2745 2746 return 0; 2747 } 2748 2749 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2750 { 2751 struct cq_base *hw_cqe; 2752 bool rc = true; 2753 2754 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL); 2755 /* Check for Valid bit. If the CQE is valid, return false */ 2756 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags); 2757 return rc; 2758 } 2759 2760 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, 2761 struct cq_res_raweth_qp1 *hwcqe, 2762 struct bnxt_qplib_cqe **pcqe, 2763 int *budget) 2764 { 2765 struct bnxt_qplib_qp *qp; 2766 struct bnxt_qplib_q *rq; 2767 struct bnxt_qplib_srq *srq; 2768 struct bnxt_qplib_cqe *cqe; 2769 u32 wr_id_idx; 2770 2771 qp = (struct bnxt_qplib_qp *)((unsigned long) 2772 le64_to_cpu(hwcqe->qp_handle)); 2773 if (!qp) { 2774 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n"); 2775 return -EINVAL; 2776 } 2777 if (qp->rq.flushed) { 2778 dev_dbg(&cq->hwq.pdev->dev, 2779 "%s: QP in Flush QP = %p\n", __func__, qp); 2780 return 0; 2781 } 2782 cqe = *pcqe; 2783 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2784 cqe->flags = le16_to_cpu(hwcqe->flags); 2785 cqe->qp_handle = (u64)(unsigned long)qp; 2786 2787 wr_id_idx = 2788 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id) 2789 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK; 2790 cqe->src_qp = qp->id; 2791 if (qp->id == 1 && !cqe->length) { 2792 /* Add workaround for the length misdetection */ 2793 cqe->length = 296; 2794 } else { 2795 cqe->length = le16_to_cpu(hwcqe->length); 2796 } 2797 cqe->pkey_index = qp->pkey_index; 2798 memcpy(cqe->smac, qp->smac, 6); 2799 2800 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags); 2801 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2); 2802 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata); 2803 2804 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) { 2805 srq = qp->srq; 2806 if (!srq) { 2807 dev_err(&cq->hwq.pdev->dev, 2808 "FP: SRQ used but not defined??\n"); 2809 return -EINVAL; 2810 } 2811 if (wr_id_idx >= srq->hwq.max_elements) { 2812 dev_err(&cq->hwq.pdev->dev, 2813 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2814 wr_id_idx, srq->hwq.max_elements); 2815 return -EINVAL; 2816 } 2817 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2818 bnxt_qplib_release_srqe(srq, wr_id_idx); 2819 cqe++; 2820 (*budget)--; 2821 *pcqe = cqe; 2822 } else { 2823 struct bnxt_qplib_swq *swq; 2824 2825 rq = &qp->rq; 2826 if (wr_id_idx > (rq->max_wqe - 1)) { 2827 dev_err(&cq->hwq.pdev->dev, 2828 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n", 2829 wr_id_idx, rq->max_wqe); 2830 return -EINVAL; 2831 } 2832 if (rq->swq_last != wr_id_idx) 2833 return -EINVAL; 2834 swq = &rq->swq[rq->swq_last]; 2835 cqe->wr_id = swq->wr_id; 2836 cqe++; 2837 (*budget)--; 2838 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2839 swq->slots, &rq->dbinfo.flags); 2840 rq->swq_last = swq->next_idx; 2841 *pcqe = cqe; 2842 2843 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2844 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2845 /* Add qp to flush list of the CQ */ 2846 bnxt_qplib_add_flush_qp(qp); 2847 } 2848 } 2849 2850 return 0; 2851 } 2852 2853 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, 2854 struct cq_terminal *hwcqe, 2855 struct bnxt_qplib_cqe **pcqe, 2856 int *budget) 2857 { 2858 struct bnxt_qplib_qp *qp; 2859 struct bnxt_qplib_q *sq, *rq; 2860 struct bnxt_qplib_cqe *cqe; 2861 u32 swq_last = 0, cqe_cons; 2862 int rc = 0; 2863 2864 /* Check the Status */ 2865 if (hwcqe->status != CQ_TERMINAL_STATUS_OK) 2866 dev_warn(&cq->hwq.pdev->dev, 2867 "FP: CQ Process Terminal Error status = 0x%x\n", 2868 hwcqe->status); 2869 2870 qp = (struct bnxt_qplib_qp *)((unsigned long) 2871 le64_to_cpu(hwcqe->qp_handle)); 2872 if (!qp) 2873 return -EINVAL; 2874 2875 /* Must block new posting of SQ and RQ */ 2876 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2877 2878 sq = &qp->sq; 2879 rq = &qp->rq; 2880 2881 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx); 2882 if (cqe_cons == 0xFFFF) 2883 goto do_rq; 2884 cqe_cons %= sq->max_wqe; 2885 2886 if (qp->sq.flushed) { 2887 dev_dbg(&cq->hwq.pdev->dev, 2888 "%s: QP in Flush QP = %p\n", __func__, qp); 2889 goto sq_done; 2890 } 2891 2892 /* Terminal CQE can also include aggregated successful CQEs prior. 2893 * So we must complete all CQEs from the current sq's cons to the 2894 * cq_cons with status OK 2895 */ 2896 cqe = *pcqe; 2897 while (*budget) { 2898 swq_last = sq->swq_last; 2899 if (swq_last == cqe_cons) 2900 break; 2901 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2902 memset(cqe, 0, sizeof(*cqe)); 2903 cqe->status = CQ_REQ_STATUS_OK; 2904 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2905 cqe->qp_handle = (u64)(unsigned long)qp; 2906 cqe->src_qp = qp->id; 2907 cqe->wr_id = sq->swq[swq_last].wr_id; 2908 cqe->type = sq->swq[swq_last].type; 2909 cqe++; 2910 (*budget)--; 2911 } 2912 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons, 2913 sq->swq[swq_last].slots, &sq->dbinfo.flags); 2914 sq->swq_last = sq->swq[swq_last].next_idx; 2915 } 2916 *pcqe = cqe; 2917 if (!(*budget) && swq_last != cqe_cons) { 2918 /* Out of budget */ 2919 rc = -EAGAIN; 2920 goto sq_done; 2921 } 2922 sq_done: 2923 if (rc) 2924 return rc; 2925 do_rq: 2926 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx); 2927 if (cqe_cons == 0xFFFF) { 2928 goto done; 2929 } else if (cqe_cons > rq->max_wqe - 1) { 2930 dev_err(&cq->hwq.pdev->dev, 2931 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n", 2932 cqe_cons, rq->max_wqe); 2933 rc = -EINVAL; 2934 goto done; 2935 } 2936 2937 if (qp->rq.flushed) { 2938 dev_dbg(&cq->hwq.pdev->dev, 2939 "%s: QP in Flush QP = %p\n", __func__, qp); 2940 rc = 0; 2941 goto done; 2942 } 2943 2944 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR 2945 * from the current rq->cons to the rq->prod regardless what the 2946 * rq->cons the terminal CQE indicates 2947 */ 2948 2949 /* Add qp to flush list of the CQ */ 2950 bnxt_qplib_add_flush_qp(qp); 2951 done: 2952 return rc; 2953 } 2954 2955 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, 2956 struct cq_cutoff *hwcqe) 2957 { 2958 /* Check the Status */ 2959 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) { 2960 dev_err(&cq->hwq.pdev->dev, 2961 "FP: CQ Process Cutoff Error status = 0x%x\n", 2962 hwcqe->status); 2963 return -EINVAL; 2964 } 2965 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); 2966 wake_up_interruptible(&cq->waitq); 2967 2968 return 0; 2969 } 2970 2971 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, 2972 struct bnxt_qplib_cqe *cqe, 2973 int num_cqes) 2974 { 2975 struct bnxt_qplib_qp *qp = NULL; 2976 u32 budget = num_cqes; 2977 unsigned long flags; 2978 2979 spin_lock_irqsave(&cq->flush_lock, flags); 2980 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 2981 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp); 2982 __flush_sq(&qp->sq, qp, &cqe, &budget); 2983 } 2984 2985 list_for_each_entry(qp, &cq->rqf_head, rq_flush) { 2986 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp); 2987 __flush_rq(&qp->rq, qp, &cqe, &budget); 2988 } 2989 spin_unlock_irqrestore(&cq->flush_lock, flags); 2990 2991 return num_cqes - budget; 2992 } 2993 2994 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2995 int num_cqes, struct bnxt_qplib_qp **lib_qp) 2996 { 2997 struct cq_base *hw_cqe; 2998 int budget, rc = 0; 2999 u32 hw_polled = 0; 3000 u8 type; 3001 3002 budget = num_cqes; 3003 3004 while (budget) { 3005 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL); 3006 3007 /* Check for Valid bit */ 3008 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags)) 3009 break; 3010 3011 /* 3012 * The valid test of the entry must be done first before 3013 * reading any further. 3014 */ 3015 dma_rmb(); 3016 /* From the device's respective CQE format to qplib_wc*/ 3017 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 3018 switch (type) { 3019 case CQ_BASE_CQE_TYPE_REQ: 3020 rc = bnxt_qplib_cq_process_req(cq, 3021 (struct cq_req *)hw_cqe, 3022 &cqe, &budget, 3023 cq->hwq.cons, lib_qp); 3024 break; 3025 case CQ_BASE_CQE_TYPE_RES_RC: 3026 rc = bnxt_qplib_cq_process_res_rc(cq, 3027 (struct cq_res_rc *) 3028 hw_cqe, &cqe, 3029 &budget); 3030 break; 3031 case CQ_BASE_CQE_TYPE_RES_UD: 3032 rc = bnxt_qplib_cq_process_res_ud 3033 (cq, (struct cq_res_ud *)hw_cqe, &cqe, 3034 &budget); 3035 break; 3036 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 3037 rc = bnxt_qplib_cq_process_res_raweth_qp1 3038 (cq, (struct cq_res_raweth_qp1 *) 3039 hw_cqe, &cqe, &budget); 3040 break; 3041 case CQ_BASE_CQE_TYPE_TERMINAL: 3042 rc = bnxt_qplib_cq_process_terminal 3043 (cq, (struct cq_terminal *)hw_cqe, 3044 &cqe, &budget); 3045 break; 3046 case CQ_BASE_CQE_TYPE_CUT_OFF: 3047 bnxt_qplib_cq_process_cutoff 3048 (cq, (struct cq_cutoff *)hw_cqe); 3049 /* Done processing this CQ */ 3050 goto exit; 3051 default: 3052 dev_err(&cq->hwq.pdev->dev, 3053 "process_cq unknown type 0x%lx\n", 3054 hw_cqe->cqe_type_toggle & 3055 CQ_BASE_CQE_TYPE_MASK); 3056 rc = -EINVAL; 3057 break; 3058 } 3059 if (rc < 0) { 3060 if (rc == -EAGAIN) 3061 break; 3062 /* Error while processing the CQE, just skip to the 3063 * next one 3064 */ 3065 if (type != CQ_BASE_CQE_TYPE_TERMINAL) 3066 dev_err(&cq->hwq.pdev->dev, 3067 "process_cqe error rc = 0x%x\n", rc); 3068 } 3069 hw_polled++; 3070 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons, 3071 1, &cq->dbinfo.flags); 3072 3073 } 3074 if (hw_polled) 3075 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ); 3076 exit: 3077 return num_cqes - budget; 3078 } 3079 3080 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 3081 { 3082 if (arm_type) 3083 bnxt_qplib_ring_db(&cq->dbinfo, arm_type); 3084 /* Using cq->arm_state variable to track whether to issue cq handler */ 3085 atomic_set(&cq->arm_state, 1); 3086 } 3087 3088 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 3089 { 3090 flush_workqueue(qp->scq->nq->cqn_wq); 3091 if (qp->scq != qp->rcq) 3092 flush_workqueue(qp->rcq->nq->cqn_wq); 3093 } 3094