1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50 
51 #include "roce_hsi.h"
52 
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59 
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 	qp->sq.condition = false;
63 	qp->sq.send_phantom = false;
64 	qp->sq.single = false;
65 }
66 
67 /* Flush list */
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 	struct bnxt_qplib_cq *scq, *rcq;
71 
72 	scq = qp->scq;
73 	rcq = qp->rcq;
74 
75 	if (!qp->sq.flushed) {
76 		dev_dbg(&scq->hwq.pdev->dev,
77 			"FP: Adding to SQ Flush list = %p\n", qp);
78 		bnxt_qplib_cancel_phantom_processing(qp);
79 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 		qp->sq.flushed = true;
81 	}
82 	if (!qp->srq) {
83 		if (!qp->rq.flushed) {
84 			dev_dbg(&rcq->hwq.pdev->dev,
85 				"FP: Adding to RQ Flush list = %p\n", qp);
86 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 			qp->rq.flushed = true;
88 		}
89 	}
90 }
91 
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 				       unsigned long *flags)
94 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 	if (qp->scq == qp->rcq)
98 		__acquire(&qp->rcq->flush_lock);
99 	else
100 		spin_lock(&qp->rcq->flush_lock);
101 }
102 
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 				       unsigned long *flags)
105 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 	if (qp->scq == qp->rcq)
108 		__release(&qp->rcq->flush_lock);
109 	else
110 		spin_unlock(&qp->rcq->flush_lock);
111 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113 
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 	unsigned long flags;
117 
118 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 	__bnxt_qplib_add_flush_qp(qp);
120 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122 
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 	if (qp->sq.flushed) {
126 		qp->sq.flushed = false;
127 		list_del(&qp->sq_flush);
128 	}
129 	if (!qp->srq) {
130 		if (qp->rq.flushed) {
131 			qp->rq.flushed = false;
132 			list_del(&qp->rq_flush);
133 		}
134 	}
135 }
136 
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 	unsigned long flags;
140 
141 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143 	qp->sq.hwq.prod = 0;
144 	qp->sq.hwq.cons = 0;
145 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 	qp->rq.hwq.prod = 0;
147 	qp->rq.hwq.cons = 0;
148 
149 	__bnxt_qplib_del_flush_qp(qp);
150 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152 
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 	struct bnxt_qplib_nq_work *nq_work =
156 			container_of(work, struct bnxt_qplib_nq_work, work);
157 
158 	struct bnxt_qplib_cq *cq = nq_work->cq;
159 	struct bnxt_qplib_nq *nq = nq_work->nq;
160 
161 	if (cq && nq) {
162 		spin_lock_bh(&cq->compl_lock);
163 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 			dev_dbg(&nq->pdev->dev,
165 				"%s:Trigger cq  = %p event nq = %p\n",
166 				__func__, cq, nq);
167 			nq->cqn_handler(nq, cq);
168 		}
169 		spin_unlock_bh(&cq->compl_lock);
170 	}
171 	kfree(nq_work);
172 }
173 
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 				       struct bnxt_qplib_qp *qp)
176 {
177 	struct bnxt_qplib_q *rq = &qp->rq;
178 	struct bnxt_qplib_q *sq = &qp->sq;
179 
180 	if (qp->rq_hdr_buf)
181 		dma_free_coherent(&res->pdev->dev,
182 				  rq->max_wqe * qp->rq_hdr_buf_size,
183 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 	if (qp->sq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  sq->max_wqe * qp->sq_hdr_buf_size,
187 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 	qp->rq_hdr_buf = NULL;
189 	qp->sq_hdr_buf = NULL;
190 	qp->rq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_map = 0;
192 	qp->sq_hdr_buf_size = 0;
193 	qp->rq_hdr_buf_size = 0;
194 }
195 
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 				       struct bnxt_qplib_qp *qp)
198 {
199 	struct bnxt_qplib_q *rq = &qp->rq;
200 	struct bnxt_qplib_q *sq = &qp->sq;
201 	int rc = 0;
202 
203 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 					sq->max_wqe * qp->sq_hdr_buf_size,
206 					&qp->sq_hdr_buf_map, GFP_KERNEL);
207 		if (!qp->sq_hdr_buf) {
208 			rc = -ENOMEM;
209 			dev_err(&res->pdev->dev,
210 				"Failed to create sq_hdr_buf\n");
211 			goto fail;
212 		}
213 	}
214 
215 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 						    rq->max_wqe *
218 						    qp->rq_hdr_buf_size,
219 						    &qp->rq_hdr_buf_map,
220 						    GFP_KERNEL);
221 		if (!qp->rq_hdr_buf) {
222 			rc = -ENOMEM;
223 			dev_err(&res->pdev->dev,
224 				"Failed to create rq_hdr_buf\n");
225 			goto fail;
226 		}
227 	}
228 	return 0;
229 
230 fail:
231 	bnxt_qplib_free_qp_hdr_buf(res, qp);
232 	return rc;
233 }
234 
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 	struct nq_base *nqe, **nq_ptr;
239 	int budget = nq->budget;
240 	u32 sw_cons, raw_cons;
241 	uintptr_t q_handle;
242 	u16 type;
243 
244 	spin_lock_bh(&hwq->lock);
245 	/* Service the NQ until empty */
246 	raw_cons = hwq->cons;
247 	while (budget--) {
248 		sw_cons = HWQ_CMP(raw_cons, hwq);
249 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252 			break;
253 
254 		/*
255 		 * The valid test of the entry must be done first before
256 		 * reading any further.
257 		 */
258 		dma_rmb();
259 
260 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261 		switch (type) {
262 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
263 		{
264 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
265 
266 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
267 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268 						     << 32;
269 			if ((unsigned long)cq == q_handle) {
270 				nqcne->cq_handle_low = 0;
271 				nqcne->cq_handle_high = 0;
272 				cq->cnq_events++;
273 			}
274 			break;
275 		}
276 		default:
277 			break;
278 		}
279 		raw_cons++;
280 	}
281 	spin_unlock_bh(&hwq->lock);
282 }
283 
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285  * this CQ.
286  */
287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
288 {
289 	u32 retry_cnt = 100;
290 
291 	while (retry_cnt--) {
292 		if (cnq_events == cq->cnq_events)
293 			return;
294 		usleep_range(50, 100);
295 		clean_nq(cq->nq, cq);
296 	}
297 }
298 
299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
300 {
301 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
303 	struct bnxt_qplib_cq *cq;
304 	int budget = nq->budget;
305 	u32 sw_cons, raw_cons;
306 	struct nq_base *nqe;
307 	uintptr_t q_handle;
308 	u16 type;
309 
310 	spin_lock_bh(&hwq->lock);
311 	/* Service the NQ until empty */
312 	raw_cons = hwq->cons;
313 	while (budget--) {
314 		sw_cons = HWQ_CMP(raw_cons, hwq);
315 		nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
316 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
317 			break;
318 
319 		/*
320 		 * The valid test of the entry must be done first before
321 		 * reading any further.
322 		 */
323 		dma_rmb();
324 
325 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 		switch (type) {
327 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 		{
329 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 
331 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
332 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
333 						     << 32;
334 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
335 			if (!cq)
336 				break;
337 			bnxt_qplib_armen_db(&cq->dbinfo,
338 					    DBC_DBC_TYPE_CQ_ARMENA);
339 			spin_lock_bh(&cq->compl_lock);
340 			atomic_set(&cq->arm_state, 0);
341 			if (nq->cqn_handler(nq, (cq)))
342 				dev_warn(&nq->pdev->dev,
343 					 "cqn - type 0x%x not handled\n", type);
344 			cq->cnq_events++;
345 			spin_unlock_bh(&cq->compl_lock);
346 			break;
347 		}
348 		case NQ_BASE_TYPE_SRQ_EVENT:
349 		{
350 			struct bnxt_qplib_srq *srq;
351 			struct nq_srq_event *nqsrqe =
352 						(struct nq_srq_event *)nqe;
353 
354 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
355 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
356 				     << 32;
357 			srq = (struct bnxt_qplib_srq *)q_handle;
358 			bnxt_qplib_armen_db(&srq->dbinfo,
359 					    DBC_DBC_TYPE_SRQ_ARMENA);
360 			if (nq->srqn_handler(nq,
361 					     (struct bnxt_qplib_srq *)q_handle,
362 					     nqsrqe->event))
363 				dev_warn(&nq->pdev->dev,
364 					 "SRQ event 0x%x not handled\n",
365 					 nqsrqe->event);
366 			break;
367 		}
368 		case NQ_BASE_TYPE_DBQ_EVENT:
369 			break;
370 		default:
371 			dev_warn(&nq->pdev->dev,
372 				 "nqe with type = 0x%x not handled\n", type);
373 			break;
374 		}
375 		raw_cons++;
376 	}
377 	if (hwq->cons != raw_cons) {
378 		hwq->cons = raw_cons;
379 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 	}
381 	spin_unlock_bh(&hwq->lock);
382 }
383 
384 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
385 {
386 	struct bnxt_qplib_nq *nq = dev_instance;
387 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
388 	u32 sw_cons;
389 
390 	/* Prefetch the NQ element */
391 	sw_cons = HWQ_CMP(hwq->cons, hwq);
392 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
393 
394 	/* Fan out to CPU affinitized kthreads? */
395 	tasklet_schedule(&nq->nq_tasklet);
396 
397 	return IRQ_HANDLED;
398 }
399 
400 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
401 {
402 	tasklet_disable(&nq->nq_tasklet);
403 	/* Mask h/w interrupt */
404 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
405 	/* Sync with last running IRQ handler */
406 	synchronize_irq(nq->msix_vec);
407 	if (kill)
408 		tasklet_kill(&nq->nq_tasklet);
409 	if (nq->requested) {
410 		irq_set_affinity_hint(nq->msix_vec, NULL);
411 		free_irq(nq->msix_vec, nq);
412 		nq->requested = false;
413 	}
414 }
415 
416 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
417 {
418 	if (nq->cqn_wq) {
419 		destroy_workqueue(nq->cqn_wq);
420 		nq->cqn_wq = NULL;
421 	}
422 
423 	/* Make sure the HW is stopped! */
424 	bnxt_qplib_nq_stop_irq(nq, true);
425 
426 	if (nq->nq_db.reg.bar_reg) {
427 		iounmap(nq->nq_db.reg.bar_reg);
428 		nq->nq_db.reg.bar_reg = NULL;
429 	}
430 
431 	nq->cqn_handler = NULL;
432 	nq->srqn_handler = NULL;
433 	nq->msix_vec = 0;
434 }
435 
436 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
437 			    int msix_vector, bool need_init)
438 {
439 	int rc;
440 
441 	if (nq->requested)
442 		return -EFAULT;
443 
444 	nq->msix_vec = msix_vector;
445 	if (need_init)
446 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
447 	else
448 		tasklet_enable(&nq->nq_tasklet);
449 
450 	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
451 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
452 	if (rc)
453 		return rc;
454 
455 	cpumask_clear(&nq->mask);
456 	cpumask_set_cpu(nq_indx, &nq->mask);
457 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
458 	if (rc) {
459 		dev_warn(&nq->pdev->dev,
460 			 "set affinity failed; vector: %d nq_idx: %d\n",
461 			 nq->msix_vec, nq_indx);
462 	}
463 	nq->requested = true;
464 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
465 
466 	return rc;
467 }
468 
469 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
470 {
471 	resource_size_t reg_base;
472 	struct bnxt_qplib_nq_db *nq_db;
473 	struct pci_dev *pdev;
474 	int rc = 0;
475 
476 	pdev = nq->pdev;
477 	nq_db = &nq->nq_db;
478 
479 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
480 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
481 	if (!nq_db->reg.bar_base) {
482 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
483 			nq_db->reg.bar_id);
484 		rc = -ENOMEM;
485 		goto fail;
486 	}
487 
488 	reg_base = nq_db->reg.bar_base + reg_offt;
489 	/* Unconditionally map 8 bytes to support 57500 series */
490 	nq_db->reg.len = 8;
491 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
492 	if (!nq_db->reg.bar_reg) {
493 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
494 			nq_db->reg.bar_id);
495 		rc = -ENOMEM;
496 		goto fail;
497 	}
498 
499 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
500 	nq_db->dbinfo.hwq = &nq->hwq;
501 	nq_db->dbinfo.xid = nq->ring_id;
502 fail:
503 	return rc;
504 }
505 
506 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
507 			 int nq_idx, int msix_vector, int bar_reg_offset,
508 			 cqn_handler_t cqn_handler,
509 			 srqn_handler_t srqn_handler)
510 {
511 	int rc = -1;
512 
513 	nq->pdev = pdev;
514 	nq->cqn_handler = cqn_handler;
515 	nq->srqn_handler = srqn_handler;
516 
517 	/* Have a task to schedule CQ notifiers in post send case */
518 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
519 	if (!nq->cqn_wq)
520 		return -ENOMEM;
521 
522 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
523 	if (rc)
524 		goto fail;
525 
526 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
527 	if (rc) {
528 		dev_err(&nq->pdev->dev,
529 			"Failed to request irq for nq-idx %d\n", nq_idx);
530 		goto fail;
531 	}
532 
533 	return 0;
534 fail:
535 	bnxt_qplib_disable_nq(nq);
536 	return rc;
537 }
538 
539 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
540 {
541 	if (nq->hwq.max_elements) {
542 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
543 		nq->hwq.max_elements = 0;
544 	}
545 }
546 
547 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
548 {
549 	struct bnxt_qplib_hwq_attr hwq_attr = {};
550 	struct bnxt_qplib_sg_info sginfo = {};
551 
552 	nq->pdev = res->pdev;
553 	nq->res = res;
554 	if (!nq->hwq.max_elements ||
555 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
556 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
557 
558 	sginfo.pgsize = PAGE_SIZE;
559 	sginfo.pgshft = PAGE_SHIFT;
560 	hwq_attr.res = res;
561 	hwq_attr.sginfo = &sginfo;
562 	hwq_attr.depth = nq->hwq.max_elements;
563 	hwq_attr.stride = sizeof(struct nq_base);
564 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
565 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
566 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
567 		return -ENOMEM;
568 	}
569 	nq->budget = 8;
570 	return 0;
571 }
572 
573 /* SRQ */
574 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
575 			   struct bnxt_qplib_srq *srq)
576 {
577 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
578 	struct creq_destroy_srq_resp resp = {};
579 	struct bnxt_qplib_cmdqmsg msg = {};
580 	struct cmdq_destroy_srq req = {};
581 	int rc;
582 
583 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
584 				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
585 				 sizeof(req));
586 
587 	/* Configure the request */
588 	req.srq_cid = cpu_to_le32(srq->id);
589 
590 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
591 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
592 	kfree(srq->swq);
593 	if (rc)
594 		return;
595 	bnxt_qplib_free_hwq(res, &srq->hwq);
596 }
597 
598 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
599 			  struct bnxt_qplib_srq *srq)
600 {
601 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
602 	struct bnxt_qplib_hwq_attr hwq_attr = {};
603 	struct creq_create_srq_resp resp = {};
604 	struct bnxt_qplib_cmdqmsg msg = {};
605 	struct cmdq_create_srq req = {};
606 	struct bnxt_qplib_pbl *pbl;
607 	u16 pg_sz_lvl;
608 	int rc, idx;
609 
610 	hwq_attr.res = res;
611 	hwq_attr.sginfo = &srq->sg_info;
612 	hwq_attr.depth = srq->max_wqe;
613 	hwq_attr.stride = srq->wqe_size;
614 	hwq_attr.type = HWQ_TYPE_QUEUE;
615 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
616 	if (rc)
617 		goto exit;
618 
619 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
620 			   GFP_KERNEL);
621 	if (!srq->swq) {
622 		rc = -ENOMEM;
623 		goto fail;
624 	}
625 
626 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
627 				 CMDQ_BASE_OPCODE_CREATE_SRQ,
628 				 sizeof(req));
629 
630 	/* Configure the request */
631 	req.dpi = cpu_to_le32(srq->dpi->dpi);
632 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
633 
634 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
635 	pbl = &srq->hwq.pbl[PBL_LVL_0];
636 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
637 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
638 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
639 		      CMDQ_CREATE_SRQ_LVL_SFT;
640 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
641 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
642 	req.pd_id = cpu_to_le32(srq->pd->id);
643 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
644 
645 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
646 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
647 	if (rc)
648 		goto fail;
649 
650 	spin_lock_init(&srq->lock);
651 	srq->start_idx = 0;
652 	srq->last_idx = srq->hwq.max_elements - 1;
653 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
654 		srq->swq[idx].next_idx = idx + 1;
655 	srq->swq[srq->last_idx].next_idx = -1;
656 
657 	srq->id = le32_to_cpu(resp.xid);
658 	srq->dbinfo.hwq = &srq->hwq;
659 	srq->dbinfo.xid = srq->id;
660 	srq->dbinfo.db = srq->dpi->dbr;
661 	srq->dbinfo.max_slot = 1;
662 	srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
663 	if (srq->threshold)
664 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
665 	srq->arm_req = false;
666 
667 	return 0;
668 fail:
669 	bnxt_qplib_free_hwq(res, &srq->hwq);
670 	kfree(srq->swq);
671 exit:
672 	return rc;
673 }
674 
675 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
676 			  struct bnxt_qplib_srq *srq)
677 {
678 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
679 	u32 sw_prod, sw_cons, count = 0;
680 
681 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
682 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
683 
684 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
685 				    srq_hwq->max_elements - sw_cons + sw_prod;
686 	if (count > srq->threshold) {
687 		srq->arm_req = false;
688 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
689 	} else {
690 		/* Deferred arming */
691 		srq->arm_req = true;
692 	}
693 
694 	return 0;
695 }
696 
697 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
698 			 struct bnxt_qplib_srq *srq)
699 {
700 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
701 	struct creq_query_srq_resp resp = {};
702 	struct bnxt_qplib_cmdqmsg msg = {};
703 	struct bnxt_qplib_rcfw_sbuf *sbuf;
704 	struct creq_query_srq_resp_sb *sb;
705 	struct cmdq_query_srq req = {};
706 	int rc = 0;
707 
708 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
709 				 CMDQ_BASE_OPCODE_QUERY_SRQ,
710 				 sizeof(req));
711 
712 	/* Configure the request */
713 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
714 	if (!sbuf)
715 		return -ENOMEM;
716 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
717 	req.srq_cid = cpu_to_le32(srq->id);
718 	sb = sbuf->sb;
719 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
720 				sizeof(resp), 0);
721 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
722 	srq->threshold = le16_to_cpu(sb->srq_limit);
723 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
724 
725 	return rc;
726 }
727 
728 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
729 			     struct bnxt_qplib_swqe *wqe)
730 {
731 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
732 	struct rq_wqe *srqe;
733 	struct sq_sge *hw_sge;
734 	u32 sw_prod, sw_cons, count = 0;
735 	int i, rc = 0, next;
736 
737 	spin_lock(&srq_hwq->lock);
738 	if (srq->start_idx == srq->last_idx) {
739 		dev_err(&srq_hwq->pdev->dev,
740 			"FP: SRQ (0x%x) is full!\n", srq->id);
741 		rc = -EINVAL;
742 		spin_unlock(&srq_hwq->lock);
743 		goto done;
744 	}
745 	next = srq->start_idx;
746 	srq->start_idx = srq->swq[next].next_idx;
747 	spin_unlock(&srq_hwq->lock);
748 
749 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
750 	srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
751 	memset(srqe, 0, srq->wqe_size);
752 	/* Calculate wqe_size16 and data_len */
753 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
754 	     i < wqe->num_sge; i++, hw_sge++) {
755 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
756 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
757 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
758 	}
759 	srqe->wqe_type = wqe->type;
760 	srqe->flags = wqe->flags;
761 	srqe->wqe_size = wqe->num_sge +
762 			((offsetof(typeof(*srqe), data) + 15) >> 4);
763 	srqe->wr_id[0] = cpu_to_le32((u32)next);
764 	srq->swq[next].wr_id = wqe->wr_id;
765 
766 	srq_hwq->prod++;
767 
768 	spin_lock(&srq_hwq->lock);
769 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
770 	/* retaining srq_hwq->cons for this logic
771 	 * actually the lock is only required to
772 	 * read srq_hwq->cons.
773 	 */
774 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
775 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
776 				    srq_hwq->max_elements - sw_cons + sw_prod;
777 	spin_unlock(&srq_hwq->lock);
778 	/* Ring DB */
779 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
780 	if (srq->arm_req == true && count > srq->threshold) {
781 		srq->arm_req = false;
782 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
783 	}
784 done:
785 	return rc;
786 }
787 
788 /* QP */
789 
790 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
791 {
792 	int rc = 0;
793 	int indx;
794 
795 	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
796 	if (!que->swq) {
797 		rc = -ENOMEM;
798 		goto out;
799 	}
800 
801 	que->swq_start = 0;
802 	que->swq_last = que->max_wqe - 1;
803 	for (indx = 0; indx < que->max_wqe; indx++)
804 		que->swq[indx].next_idx = indx + 1;
805 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
806 	que->swq_last = 0;
807 out:
808 	return rc;
809 }
810 
811 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
812 {
813 	struct bnxt_qplib_hwq_attr hwq_attr = {};
814 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
815 	struct creq_create_qp1_resp resp = {};
816 	struct bnxt_qplib_cmdqmsg msg = {};
817 	struct bnxt_qplib_q *sq = &qp->sq;
818 	struct bnxt_qplib_q *rq = &qp->rq;
819 	struct cmdq_create_qp1 req = {};
820 	struct bnxt_qplib_pbl *pbl;
821 	u32 qp_flags = 0;
822 	u8 pg_sz_lvl;
823 	u32 tbl_indx;
824 	int rc;
825 
826 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
827 				 CMDQ_BASE_OPCODE_CREATE_QP1,
828 				 sizeof(req));
829 	/* General */
830 	req.type = qp->type;
831 	req.dpi = cpu_to_le32(qp->dpi->dpi);
832 	req.qp_handle = cpu_to_le64(qp->qp_handle);
833 
834 	/* SQ */
835 	hwq_attr.res = res;
836 	hwq_attr.sginfo = &sq->sg_info;
837 	hwq_attr.stride = sizeof(struct sq_sge);
838 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
839 	hwq_attr.type = HWQ_TYPE_QUEUE;
840 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
841 	if (rc)
842 		goto exit;
843 
844 	rc = bnxt_qplib_alloc_init_swq(sq);
845 	if (rc)
846 		goto fail_sq;
847 
848 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
849 	pbl = &sq->hwq.pbl[PBL_LVL_0];
850 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
851 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
852 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
853 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
854 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
855 	req.sq_fwo_sq_sge =
856 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
857 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
858 	req.scq_cid = cpu_to_le32(qp->scq->id);
859 
860 	/* RQ */
861 	if (rq->max_wqe) {
862 		hwq_attr.res = res;
863 		hwq_attr.sginfo = &rq->sg_info;
864 		hwq_attr.stride = sizeof(struct sq_sge);
865 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
866 		hwq_attr.type = HWQ_TYPE_QUEUE;
867 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
868 		if (rc)
869 			goto sq_swq;
870 		rc = bnxt_qplib_alloc_init_swq(rq);
871 		if (rc)
872 			goto fail_rq;
873 		req.rq_size = cpu_to_le32(rq->max_wqe);
874 		pbl = &rq->hwq.pbl[PBL_LVL_0];
875 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
876 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
877 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
878 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
879 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
880 		req.rq_fwo_rq_sge =
881 			cpu_to_le16((rq->max_sge &
882 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
883 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
884 	}
885 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
886 	/* Header buffer - allow hdr_buf pass in */
887 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
888 	if (rc) {
889 		rc = -ENOMEM;
890 		goto rq_rwq;
891 	}
892 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
893 	req.qp_flags = cpu_to_le32(qp_flags);
894 	req.pd_id = cpu_to_le32(qp->pd->id);
895 
896 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
897 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
898 	if (rc)
899 		goto fail;
900 
901 	qp->id = le32_to_cpu(resp.xid);
902 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
903 	qp->cctx = res->cctx;
904 	sq->dbinfo.hwq = &sq->hwq;
905 	sq->dbinfo.xid = qp->id;
906 	sq->dbinfo.db = qp->dpi->dbr;
907 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
908 	if (rq->max_wqe) {
909 		rq->dbinfo.hwq = &rq->hwq;
910 		rq->dbinfo.xid = qp->id;
911 		rq->dbinfo.db = qp->dpi->dbr;
912 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
913 	}
914 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
915 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
916 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
917 
918 	return 0;
919 
920 fail:
921 	bnxt_qplib_free_qp_hdr_buf(res, qp);
922 rq_rwq:
923 	kfree(rq->swq);
924 fail_rq:
925 	bnxt_qplib_free_hwq(res, &rq->hwq);
926 sq_swq:
927 	kfree(sq->swq);
928 fail_sq:
929 	bnxt_qplib_free_hwq(res, &sq->hwq);
930 exit:
931 	return rc;
932 }
933 
934 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
935 {
936 	struct bnxt_qplib_hwq *hwq;
937 	struct bnxt_qplib_q *sq;
938 	u64 fpsne, psn_pg;
939 	u16 indx_pad = 0;
940 
941 	sq = &qp->sq;
942 	hwq = &sq->hwq;
943 	/* First psn entry */
944 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
945 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
946 		indx_pad = (fpsne & ~PAGE_MASK) / size;
947 	hwq->pad_pgofft = indx_pad;
948 	hwq->pad_pg = (u64 *)psn_pg;
949 	hwq->pad_stride = size;
950 }
951 
952 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
953 {
954 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
955 	struct bnxt_qplib_hwq_attr hwq_attr = {};
956 	struct bnxt_qplib_sg_info sginfo = {};
957 	struct creq_create_qp_resp resp = {};
958 	struct bnxt_qplib_cmdqmsg msg = {};
959 	struct bnxt_qplib_q *sq = &qp->sq;
960 	struct bnxt_qplib_q *rq = &qp->rq;
961 	struct cmdq_create_qp req = {};
962 	int rc, req_size, psn_sz = 0;
963 	struct bnxt_qplib_hwq *xrrq;
964 	struct bnxt_qplib_pbl *pbl;
965 	u32 qp_flags = 0;
966 	u8 pg_sz_lvl;
967 	u32 tbl_indx;
968 	u16 nsge;
969 
970 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
971 				 CMDQ_BASE_OPCODE_CREATE_QP,
972 				 sizeof(req));
973 
974 	/* General */
975 	req.type = qp->type;
976 	req.dpi = cpu_to_le32(qp->dpi->dpi);
977 	req.qp_handle = cpu_to_le64(qp->qp_handle);
978 
979 	/* SQ */
980 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
981 		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
982 			 sizeof(struct sq_psn_search_ext) :
983 			 sizeof(struct sq_psn_search);
984 	}
985 
986 	hwq_attr.res = res;
987 	hwq_attr.sginfo = &sq->sg_info;
988 	hwq_attr.stride = sizeof(struct sq_sge);
989 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
990 	hwq_attr.aux_stride = psn_sz;
991 	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
992 	hwq_attr.type = HWQ_TYPE_QUEUE;
993 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
994 	if (rc)
995 		goto exit;
996 
997 	rc = bnxt_qplib_alloc_init_swq(sq);
998 	if (rc)
999 		goto fail_sq;
1000 
1001 	if (psn_sz)
1002 		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1003 
1004 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1005 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1006 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1007 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1008 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1009 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1010 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1011 	req.sq_fwo_sq_sge =
1012 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1013 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1014 	req.scq_cid = cpu_to_le32(qp->scq->id);
1015 
1016 	/* RQ */
1017 	if (!qp->srq) {
1018 		hwq_attr.res = res;
1019 		hwq_attr.sginfo = &rq->sg_info;
1020 		hwq_attr.stride = sizeof(struct sq_sge);
1021 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1022 		hwq_attr.aux_stride = 0;
1023 		hwq_attr.aux_depth = 0;
1024 		hwq_attr.type = HWQ_TYPE_QUEUE;
1025 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1026 		if (rc)
1027 			goto sq_swq;
1028 		rc = bnxt_qplib_alloc_init_swq(rq);
1029 		if (rc)
1030 			goto fail_rq;
1031 
1032 		req.rq_size = cpu_to_le32(rq->max_wqe);
1033 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1034 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1035 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1036 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1037 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1038 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1039 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1040 			6 : rq->max_sge;
1041 		req.rq_fwo_rq_sge =
1042 			cpu_to_le16(((nsge &
1043 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1044 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1045 	} else {
1046 		/* SRQ */
1047 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1048 		req.srq_cid = cpu_to_le32(qp->srq->id);
1049 	}
1050 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1051 
1052 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1053 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1054 	if (qp->sig_type)
1055 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1056 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1057 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1058 	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1059 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1060 
1061 	req.qp_flags = cpu_to_le32(qp_flags);
1062 
1063 	/* ORRQ and IRRQ */
1064 	if (psn_sz) {
1065 		xrrq = &qp->orrq;
1066 		xrrq->max_elements =
1067 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1068 		req_size = xrrq->max_elements *
1069 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1070 		req_size &= ~(PAGE_SIZE - 1);
1071 		sginfo.pgsize = req_size;
1072 		sginfo.pgshft = PAGE_SHIFT;
1073 
1074 		hwq_attr.res = res;
1075 		hwq_attr.sginfo = &sginfo;
1076 		hwq_attr.depth = xrrq->max_elements;
1077 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1078 		hwq_attr.aux_stride = 0;
1079 		hwq_attr.aux_depth = 0;
1080 		hwq_attr.type = HWQ_TYPE_CTX;
1081 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1082 		if (rc)
1083 			goto rq_swq;
1084 		pbl = &xrrq->pbl[PBL_LVL_0];
1085 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1086 
1087 		xrrq = &qp->irrq;
1088 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1089 						qp->max_dest_rd_atomic);
1090 		req_size = xrrq->max_elements *
1091 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1092 		req_size &= ~(PAGE_SIZE - 1);
1093 		sginfo.pgsize = req_size;
1094 		hwq_attr.depth =  xrrq->max_elements;
1095 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1096 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1097 		if (rc)
1098 			goto fail_orrq;
1099 
1100 		pbl = &xrrq->pbl[PBL_LVL_0];
1101 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1102 	}
1103 	req.pd_id = cpu_to_le32(qp->pd->id);
1104 
1105 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1106 				sizeof(resp), 0);
1107 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1108 	if (rc)
1109 		goto fail;
1110 
1111 	qp->id = le32_to_cpu(resp.xid);
1112 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1113 	INIT_LIST_HEAD(&qp->sq_flush);
1114 	INIT_LIST_HEAD(&qp->rq_flush);
1115 	qp->cctx = res->cctx;
1116 	sq->dbinfo.hwq = &sq->hwq;
1117 	sq->dbinfo.xid = qp->id;
1118 	sq->dbinfo.db = qp->dpi->dbr;
1119 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1120 	if (rq->max_wqe) {
1121 		rq->dbinfo.hwq = &rq->hwq;
1122 		rq->dbinfo.xid = qp->id;
1123 		rq->dbinfo.db = qp->dpi->dbr;
1124 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1125 	}
1126 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1127 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1128 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1129 
1130 	return 0;
1131 fail:
1132 	bnxt_qplib_free_hwq(res, &qp->irrq);
1133 fail_orrq:
1134 	bnxt_qplib_free_hwq(res, &qp->orrq);
1135 rq_swq:
1136 	kfree(rq->swq);
1137 fail_rq:
1138 	bnxt_qplib_free_hwq(res, &rq->hwq);
1139 sq_swq:
1140 	kfree(sq->swq);
1141 fail_sq:
1142 	bnxt_qplib_free_hwq(res, &sq->hwq);
1143 exit:
1144 	return rc;
1145 }
1146 
1147 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1148 {
1149 	switch (qp->state) {
1150 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1151 		/* INIT->RTR, configure the path_mtu to the default
1152 		 * 2048 if not being requested
1153 		 */
1154 		if (!(qp->modify_flags &
1155 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1156 			qp->modify_flags |=
1157 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1158 			qp->path_mtu =
1159 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1160 		}
1161 		qp->modify_flags &=
1162 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1163 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1164 		if (qp->max_dest_rd_atomic < 1)
1165 			qp->max_dest_rd_atomic = 1;
1166 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1167 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1168 		if (!(qp->modify_flags &
1169 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1170 			qp->modify_flags |=
1171 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1172 			qp->ah.sgid_index = 0;
1173 		}
1174 		break;
1175 	default:
1176 		break;
1177 	}
1178 }
1179 
1180 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1181 {
1182 	switch (qp->state) {
1183 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1184 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1185 		if (qp->max_rd_atomic < 1)
1186 			qp->max_rd_atomic = 1;
1187 		/* Bono FW does not allow PKEY_INDEX,
1188 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1189 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1190 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1191 		 * modification
1192 		 */
1193 		qp->modify_flags &=
1194 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1195 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1196 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1197 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1198 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1199 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1200 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1201 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1202 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1203 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1204 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1205 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1206 		break;
1207 	default:
1208 		break;
1209 	}
1210 }
1211 
1212 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1213 {
1214 	switch (qp->cur_qp_state) {
1215 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1216 		break;
1217 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1218 		__modify_flags_from_init_state(qp);
1219 		break;
1220 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1221 		__modify_flags_from_rtr_state(qp);
1222 		break;
1223 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1224 		break;
1225 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1226 		break;
1227 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1228 		break;
1229 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1230 		break;
1231 	default:
1232 		break;
1233 	}
1234 }
1235 
1236 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1237 {
1238 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1239 	struct creq_modify_qp_resp resp = {};
1240 	struct bnxt_qplib_cmdqmsg msg = {};
1241 	struct cmdq_modify_qp req = {};
1242 	u32 temp32[4];
1243 	u32 bmask;
1244 	int rc;
1245 
1246 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1247 				 CMDQ_BASE_OPCODE_MODIFY_QP,
1248 				 sizeof(req));
1249 
1250 	/* Filter out the qp_attr_mask based on the state->new transition */
1251 	__filter_modify_flags(qp);
1252 	bmask = qp->modify_flags;
1253 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1254 	req.qp_cid = cpu_to_le32(qp->id);
1255 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1256 		req.network_type_en_sqd_async_notify_new_state =
1257 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1258 				(qp->en_sqd_async_notify ?
1259 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1260 	}
1261 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1262 
1263 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1264 		req.access = qp->access;
1265 
1266 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1267 		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1268 
1269 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1270 		req.qkey = cpu_to_le32(qp->qkey);
1271 
1272 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1273 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1274 		req.dgid[0] = cpu_to_le32(temp32[0]);
1275 		req.dgid[1] = cpu_to_le32(temp32[1]);
1276 		req.dgid[2] = cpu_to_le32(temp32[2]);
1277 		req.dgid[3] = cpu_to_le32(temp32[3]);
1278 	}
1279 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1280 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1281 
1282 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1283 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1284 					     [qp->ah.sgid_index]);
1285 
1286 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1287 		req.hop_limit = qp->ah.hop_limit;
1288 
1289 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1290 		req.traffic_class = qp->ah.traffic_class;
1291 
1292 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1293 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1294 
1295 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1296 		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1297 
1298 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1299 		req.timeout = qp->timeout;
1300 
1301 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1302 		req.retry_cnt = qp->retry_cnt;
1303 
1304 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1305 		req.rnr_retry = qp->rnr_retry;
1306 
1307 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1308 		req.min_rnr_timer = qp->min_rnr_timer;
1309 
1310 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1311 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1312 
1313 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1314 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1315 
1316 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1317 		req.max_rd_atomic =
1318 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1319 
1320 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1321 		req.max_dest_rd_atomic =
1322 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1323 
1324 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1325 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1326 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1327 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1328 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1329 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1330 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1331 
1332 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1333 
1334 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1335 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1336 	if (rc)
1337 		return rc;
1338 	qp->cur_qp_state = qp->state;
1339 	return 0;
1340 }
1341 
1342 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1343 {
1344 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1345 	struct creq_query_qp_resp resp = {};
1346 	struct bnxt_qplib_cmdqmsg msg = {};
1347 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1348 	struct creq_query_qp_resp_sb *sb;
1349 	struct cmdq_query_qp req = {};
1350 	u32 temp32[4];
1351 	int i, rc = 0;
1352 
1353 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1354 				 CMDQ_BASE_OPCODE_QUERY_QP,
1355 				 sizeof(req));
1356 
1357 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1358 	if (!sbuf)
1359 		return -ENOMEM;
1360 	sb = sbuf->sb;
1361 
1362 	req.qp_cid = cpu_to_le32(qp->id);
1363 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1364 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
1365 				sizeof(resp), 0);
1366 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1367 	if (rc)
1368 		goto bail;
1369 	/* Extract the context from the side buffer */
1370 	qp->state = sb->en_sqd_async_notify_state &
1371 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1372 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1373 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1374 				  true : false;
1375 	qp->access = sb->access;
1376 	qp->pkey_index = le16_to_cpu(sb->pkey);
1377 	qp->qkey = le32_to_cpu(sb->qkey);
1378 
1379 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1380 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1381 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1382 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1383 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1384 
1385 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1386 
1387 	qp->ah.sgid_index = 0;
1388 	for (i = 0; i < res->sgid_tbl.max; i++) {
1389 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1390 			qp->ah.sgid_index = i;
1391 			break;
1392 		}
1393 	}
1394 	if (i == res->sgid_tbl.max)
1395 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1396 
1397 	qp->ah.hop_limit = sb->hop_limit;
1398 	qp->ah.traffic_class = sb->traffic_class;
1399 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1400 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1401 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1402 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1403 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1404 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1405 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1406 	qp->timeout = sb->timeout;
1407 	qp->retry_cnt = sb->retry_cnt;
1408 	qp->rnr_retry = sb->rnr_retry;
1409 	qp->min_rnr_timer = sb->min_rnr_timer;
1410 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1411 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1412 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1413 	qp->max_dest_rd_atomic =
1414 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1415 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1416 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1417 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1418 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1419 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1420 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1421 	memcpy(qp->smac, sb->src_mac, 6);
1422 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1423 bail:
1424 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1425 	return rc;
1426 }
1427 
1428 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1429 {
1430 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1431 	struct cq_base *hw_cqe;
1432 	int i;
1433 
1434 	for (i = 0; i < cq_hwq->max_elements; i++) {
1435 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1436 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1437 			continue;
1438 		/*
1439 		 * The valid test of the entry must be done first before
1440 		 * reading any further.
1441 		 */
1442 		dma_rmb();
1443 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1444 		case CQ_BASE_CQE_TYPE_REQ:
1445 		case CQ_BASE_CQE_TYPE_TERMINAL:
1446 		{
1447 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1448 
1449 			if (qp == le64_to_cpu(cqe->qp_handle))
1450 				cqe->qp_handle = 0;
1451 			break;
1452 		}
1453 		case CQ_BASE_CQE_TYPE_RES_RC:
1454 		case CQ_BASE_CQE_TYPE_RES_UD:
1455 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1456 		{
1457 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1458 
1459 			if (qp == le64_to_cpu(cqe->qp_handle))
1460 				cqe->qp_handle = 0;
1461 			break;
1462 		}
1463 		default:
1464 			break;
1465 		}
1466 	}
1467 }
1468 
1469 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1470 			  struct bnxt_qplib_qp *qp)
1471 {
1472 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1473 	struct creq_destroy_qp_resp resp = {};
1474 	struct bnxt_qplib_cmdqmsg msg = {};
1475 	struct cmdq_destroy_qp req = {};
1476 	u32 tbl_indx;
1477 	int rc;
1478 
1479 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1480 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1481 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1482 
1483 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1484 				 CMDQ_BASE_OPCODE_DESTROY_QP,
1485 				 sizeof(req));
1486 
1487 	req.qp_cid = cpu_to_le32(qp->id);
1488 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1489 				sizeof(resp), 0);
1490 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1491 	if (rc) {
1492 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1493 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1494 		return rc;
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1501 			    struct bnxt_qplib_qp *qp)
1502 {
1503 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1504 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1505 	kfree(qp->sq.swq);
1506 
1507 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1508 	kfree(qp->rq.swq);
1509 
1510 	if (qp->irrq.max_elements)
1511 		bnxt_qplib_free_hwq(res, &qp->irrq);
1512 	if (qp->orrq.max_elements)
1513 		bnxt_qplib_free_hwq(res, &qp->orrq);
1514 
1515 }
1516 
1517 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1518 				struct bnxt_qplib_sge *sge)
1519 {
1520 	struct bnxt_qplib_q *sq = &qp->sq;
1521 	u32 sw_prod;
1522 
1523 	memset(sge, 0, sizeof(*sge));
1524 
1525 	if (qp->sq_hdr_buf) {
1526 		sw_prod = sq->swq_start;
1527 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1528 					 sw_prod * qp->sq_hdr_buf_size);
1529 		sge->lkey = 0xFFFFFFFF;
1530 		sge->size = qp->sq_hdr_buf_size;
1531 		return qp->sq_hdr_buf + sw_prod * sge->size;
1532 	}
1533 	return NULL;
1534 }
1535 
1536 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1537 {
1538 	struct bnxt_qplib_q *rq = &qp->rq;
1539 
1540 	return rq->swq_start;
1541 }
1542 
1543 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1544 {
1545 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1546 }
1547 
1548 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1549 				struct bnxt_qplib_sge *sge)
1550 {
1551 	struct bnxt_qplib_q *rq = &qp->rq;
1552 	u32 sw_prod;
1553 
1554 	memset(sge, 0, sizeof(*sge));
1555 
1556 	if (qp->rq_hdr_buf) {
1557 		sw_prod = rq->swq_start;
1558 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1559 					 sw_prod * qp->rq_hdr_buf_size);
1560 		sge->lkey = 0xFFFFFFFF;
1561 		sge->size = qp->rq_hdr_buf_size;
1562 		return qp->rq_hdr_buf + sw_prod * sge->size;
1563 	}
1564 	return NULL;
1565 }
1566 
1567 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1568 				       struct bnxt_qplib_swqe *wqe,
1569 				       struct bnxt_qplib_swq *swq)
1570 {
1571 	struct sq_psn_search_ext *psns_ext;
1572 	struct sq_psn_search *psns;
1573 	u32 flg_npsn;
1574 	u32 op_spsn;
1575 
1576 	if (!swq->psn_search)
1577 		return;
1578 	psns = swq->psn_search;
1579 	psns_ext = swq->psn_ext;
1580 
1581 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1582 		    SQ_PSN_SEARCH_START_PSN_MASK);
1583 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1584 		     SQ_PSN_SEARCH_OPCODE_MASK);
1585 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1586 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1587 
1588 	if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1589 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1590 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1591 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1592 	} else {
1593 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1594 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1595 	}
1596 }
1597 
1598 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1599 				 struct bnxt_qplib_swqe *wqe,
1600 				 u16 *idx)
1601 {
1602 	struct bnxt_qplib_hwq *hwq;
1603 	int len, t_len, offt;
1604 	bool pull_dst = true;
1605 	void *il_dst = NULL;
1606 	void *il_src = NULL;
1607 	int t_cplen, cplen;
1608 	int indx;
1609 
1610 	hwq = &qp->sq.hwq;
1611 	t_len = 0;
1612 	for (indx = 0; indx < wqe->num_sge; indx++) {
1613 		len = wqe->sg_list[indx].size;
1614 		il_src = (void *)wqe->sg_list[indx].addr;
1615 		t_len += len;
1616 		if (t_len > qp->max_inline_data)
1617 			goto bad;
1618 		while (len) {
1619 			if (pull_dst) {
1620 				pull_dst = false;
1621 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1622 				(*idx)++;
1623 				t_cplen = 0;
1624 				offt = 0;
1625 			}
1626 			cplen = min_t(int, len, sizeof(struct sq_sge));
1627 			cplen = min_t(int, cplen,
1628 					(sizeof(struct sq_sge) - offt));
1629 			memcpy(il_dst, il_src, cplen);
1630 			t_cplen += cplen;
1631 			il_src += cplen;
1632 			il_dst += cplen;
1633 			offt += cplen;
1634 			len -= cplen;
1635 			if (t_cplen == sizeof(struct sq_sge))
1636 				pull_dst = true;
1637 		}
1638 	}
1639 
1640 	return t_len;
1641 bad:
1642 	return -ENOMEM;
1643 }
1644 
1645 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1646 			       struct bnxt_qplib_sge *ssge,
1647 			       u16 nsge, u16 *idx)
1648 {
1649 	struct sq_sge *dsge;
1650 	int indx, len = 0;
1651 
1652 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1653 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1654 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1655 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1656 		dsge->size = cpu_to_le32(ssge[indx].size);
1657 		len += ssge[indx].size;
1658 	}
1659 
1660 	return len;
1661 }
1662 
1663 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1664 				     struct bnxt_qplib_swqe *wqe,
1665 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1666 {
1667 	u32 ilsize, bytes;
1668 	u16 nsge;
1669 	u16 slot;
1670 
1671 	nsge = wqe->num_sge;
1672 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1673 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1674 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1675 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1676 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1677 		bytes += sizeof(struct sq_send_hdr);
1678 	}
1679 
1680 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1681 	slot = bytes >> 4;
1682 	*wqe_sz = slot;
1683 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1684 		slot = 8;
1685 	return slot;
1686 }
1687 
1688 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1689 				     struct bnxt_qplib_swq *swq)
1690 {
1691 	struct bnxt_qplib_hwq *hwq;
1692 	u32 pg_num, pg_indx;
1693 	void *buff;
1694 	u32 tail;
1695 
1696 	hwq = &sq->hwq;
1697 	if (!hwq->pad_pg)
1698 		return;
1699 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1700 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1701 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1702 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1703 	swq->psn_ext = buff;
1704 	swq->psn_search = buff;
1705 }
1706 
1707 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1708 {
1709 	struct bnxt_qplib_q *sq = &qp->sq;
1710 
1711 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1712 }
1713 
1714 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1715 			 struct bnxt_qplib_swqe *wqe)
1716 {
1717 	struct bnxt_qplib_nq_work *nq_work = NULL;
1718 	int i, rc = 0, data_len = 0, pkt_num = 0;
1719 	struct bnxt_qplib_q *sq = &qp->sq;
1720 	struct bnxt_qplib_hwq *hwq;
1721 	struct bnxt_qplib_swq *swq;
1722 	bool sch_handler = false;
1723 	u16 wqe_sz, qdf = 0;
1724 	void *base_hdr;
1725 	void *ext_hdr;
1726 	__le32 temp32;
1727 	u32 wqe_idx;
1728 	u32 slots;
1729 	u16 idx;
1730 
1731 	hwq = &sq->hwq;
1732 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1733 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1734 		dev_err(&hwq->pdev->dev,
1735 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1736 			qp->id, qp->state);
1737 		rc = -EINVAL;
1738 		goto done;
1739 	}
1740 
1741 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1742 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1743 		dev_err(&hwq->pdev->dev,
1744 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1745 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1746 		rc = -ENOMEM;
1747 		goto done;
1748 	}
1749 
1750 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1751 	bnxt_qplib_pull_psn_buff(sq, swq);
1752 
1753 	idx = 0;
1754 	swq->slot_idx = hwq->prod;
1755 	swq->slots = slots;
1756 	swq->wr_id = wqe->wr_id;
1757 	swq->type = wqe->type;
1758 	swq->flags = wqe->flags;
1759 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1760 	if (qp->sig_type)
1761 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1762 
1763 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1764 		sch_handler = true;
1765 		dev_dbg(&hwq->pdev->dev,
1766 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1767 		goto queue_err;
1768 	}
1769 
1770 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1771 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1772 	memset(base_hdr, 0, sizeof(struct sq_sge));
1773 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1774 
1775 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1776 		/* Copy the inline data */
1777 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1778 	else
1779 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1780 					       &idx);
1781 	if (data_len < 0)
1782 		goto queue_err;
1783 	/* Specifics */
1784 	switch (wqe->type) {
1785 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1786 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1787 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1788 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1789 			/* Assemble info for Raw Ethertype QPs */
1790 
1791 			sqe->wqe_type = wqe->type;
1792 			sqe->flags = wqe->flags;
1793 			sqe->wqe_size = wqe_sz;
1794 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1795 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1796 			sqe->length = cpu_to_le32(data_len);
1797 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1798 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1799 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1800 
1801 			break;
1802 		}
1803 		fallthrough;
1804 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1805 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1806 	{
1807 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1808 		struct sq_send_hdr *sqe = base_hdr;
1809 
1810 		sqe->wqe_type = wqe->type;
1811 		sqe->flags = wqe->flags;
1812 		sqe->wqe_size = wqe_sz;
1813 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1814 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1815 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1816 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1817 			sqe->length = cpu_to_le32(data_len);
1818 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1819 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1820 						      SQ_SEND_DST_QP_MASK);
1821 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1822 						    SQ_SEND_AVID_MASK);
1823 		} else {
1824 			sqe->length = cpu_to_le32(data_len);
1825 			if (qp->mtu)
1826 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1827 			if (!pkt_num)
1828 				pkt_num = 1;
1829 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1830 		}
1831 		break;
1832 	}
1833 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1834 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1835 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1836 	{
1837 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1838 		struct sq_rdma_hdr *sqe = base_hdr;
1839 
1840 		sqe->wqe_type = wqe->type;
1841 		sqe->flags = wqe->flags;
1842 		sqe->wqe_size = wqe_sz;
1843 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1844 		sqe->length = cpu_to_le32((u32)data_len);
1845 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1846 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1847 		if (qp->mtu)
1848 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1849 		if (!pkt_num)
1850 			pkt_num = 1;
1851 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1852 		break;
1853 	}
1854 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1855 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1856 	{
1857 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1858 		struct sq_atomic_hdr *sqe = base_hdr;
1859 
1860 		sqe->wqe_type = wqe->type;
1861 		sqe->flags = wqe->flags;
1862 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1863 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1864 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1865 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1866 		if (qp->mtu)
1867 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1868 		if (!pkt_num)
1869 			pkt_num = 1;
1870 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1871 		break;
1872 	}
1873 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1874 	{
1875 		struct sq_localinvalidate *sqe = base_hdr;
1876 
1877 		sqe->wqe_type = wqe->type;
1878 		sqe->flags = wqe->flags;
1879 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1880 
1881 		break;
1882 	}
1883 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1884 	{
1885 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1886 		struct sq_fr_pmr_hdr *sqe = base_hdr;
1887 
1888 		sqe->wqe_type = wqe->type;
1889 		sqe->flags = wqe->flags;
1890 		sqe->access_cntl = wqe->frmr.access_cntl |
1891 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1892 		sqe->zero_based_page_size_log =
1893 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1894 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1895 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1896 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1897 		temp32 = cpu_to_le32(wqe->frmr.length);
1898 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1899 		sqe->numlevels_pbl_page_size_log =
1900 			((wqe->frmr.pbl_pg_sz_log <<
1901 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1902 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1903 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1904 					SQ_FR_PMR_NUMLEVELS_MASK);
1905 
1906 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1907 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1908 						wqe->frmr.page_list[i] |
1909 						PTU_PTE_VALID);
1910 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1911 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1912 
1913 		break;
1914 	}
1915 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1916 	{
1917 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1918 		struct sq_bind_hdr *sqe = base_hdr;
1919 
1920 		sqe->wqe_type = wqe->type;
1921 		sqe->flags = wqe->flags;
1922 		sqe->access_cntl = wqe->bind.access_cntl;
1923 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1924 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1925 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1926 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1927 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
1928 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1929 		break;
1930 	}
1931 	default:
1932 		/* Bad wqe, return error */
1933 		rc = -EINVAL;
1934 		goto done;
1935 	}
1936 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1937 	bnxt_qplib_fill_psn_search(qp, wqe, swq);
1938 queue_err:
1939 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
1940 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1941 	qp->wqe_cnt++;
1942 done:
1943 	if (sch_handler) {
1944 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1945 		if (nq_work) {
1946 			nq_work->cq = qp->scq;
1947 			nq_work->nq = qp->scq->nq;
1948 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1949 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1950 		} else {
1951 			dev_err(&hwq->pdev->dev,
1952 				"FP: Failed to allocate SQ nq_work!\n");
1953 			rc = -ENOMEM;
1954 		}
1955 	}
1956 	return rc;
1957 }
1958 
1959 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1960 {
1961 	struct bnxt_qplib_q *rq = &qp->rq;
1962 
1963 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1964 }
1965 
1966 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1967 			 struct bnxt_qplib_swqe *wqe)
1968 {
1969 	struct bnxt_qplib_nq_work *nq_work = NULL;
1970 	struct bnxt_qplib_q *rq = &qp->rq;
1971 	struct rq_wqe_hdr *base_hdr;
1972 	struct rq_ext_hdr *ext_hdr;
1973 	struct bnxt_qplib_hwq *hwq;
1974 	struct bnxt_qplib_swq *swq;
1975 	bool sch_handler = false;
1976 	u16 wqe_sz, idx;
1977 	u32 wqe_idx;
1978 	int rc = 0;
1979 
1980 	hwq = &rq->hwq;
1981 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1982 		dev_err(&hwq->pdev->dev,
1983 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1984 			qp->id, qp->state);
1985 		rc = -EINVAL;
1986 		goto done;
1987 	}
1988 
1989 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1990 		dev_err(&hwq->pdev->dev,
1991 			"FP: QP (0x%x) RQ is full!\n", qp->id);
1992 		rc = -EINVAL;
1993 		goto done;
1994 	}
1995 
1996 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1997 	swq->wr_id = wqe->wr_id;
1998 	swq->slots = rq->dbinfo.max_slot;
1999 
2000 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2001 		sch_handler = true;
2002 		dev_dbg(&hwq->pdev->dev,
2003 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2004 		goto queue_err;
2005 	}
2006 
2007 	idx = 0;
2008 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2009 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2010 	memset(base_hdr, 0, sizeof(struct sq_sge));
2011 	memset(ext_hdr, 0, sizeof(struct sq_sge));
2012 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2013 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2014 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2015 	if (!wqe->num_sge) {
2016 		struct sq_sge *sge;
2017 
2018 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2019 		sge->size = 0;
2020 		wqe_sz++;
2021 	}
2022 	base_hdr->wqe_type = wqe->type;
2023 	base_hdr->flags = wqe->flags;
2024 	base_hdr->wqe_size = wqe_sz;
2025 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2026 queue_err:
2027 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2028 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2029 done:
2030 	if (sch_handler) {
2031 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2032 		if (nq_work) {
2033 			nq_work->cq = qp->rcq;
2034 			nq_work->nq = qp->rcq->nq;
2035 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2036 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2037 		} else {
2038 			dev_err(&hwq->pdev->dev,
2039 				"FP: Failed to allocate RQ nq_work!\n");
2040 			rc = -ENOMEM;
2041 		}
2042 	}
2043 
2044 	return rc;
2045 }
2046 
2047 /* CQ */
2048 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2049 {
2050 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2051 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2052 	struct creq_create_cq_resp resp = {};
2053 	struct bnxt_qplib_cmdqmsg msg = {};
2054 	struct cmdq_create_cq req = {};
2055 	struct bnxt_qplib_pbl *pbl;
2056 	u32 pg_sz_lvl;
2057 	int rc;
2058 
2059 	hwq_attr.res = res;
2060 	hwq_attr.depth = cq->max_wqe;
2061 	hwq_attr.stride = sizeof(struct cq_base);
2062 	hwq_attr.type = HWQ_TYPE_QUEUE;
2063 	hwq_attr.sginfo = &cq->sg_info;
2064 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2065 	if (rc)
2066 		goto exit;
2067 
2068 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2069 				 CMDQ_BASE_OPCODE_CREATE_CQ,
2070 				 sizeof(req));
2071 
2072 	if (!cq->dpi) {
2073 		dev_err(&rcfw->pdev->dev,
2074 			"FP: CREATE_CQ failed due to NULL DPI\n");
2075 		return -EINVAL;
2076 	}
2077 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2078 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2079 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2080 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2081 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2082 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2083 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2084 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2085 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2086 	req.cq_fco_cnq_id = cpu_to_le32(
2087 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2088 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2089 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2090 				sizeof(resp), 0);
2091 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2092 	if (rc)
2093 		goto fail;
2094 
2095 	cq->id = le32_to_cpu(resp.xid);
2096 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2097 	init_waitqueue_head(&cq->waitq);
2098 	INIT_LIST_HEAD(&cq->sqf_head);
2099 	INIT_LIST_HEAD(&cq->rqf_head);
2100 	spin_lock_init(&cq->compl_lock);
2101 	spin_lock_init(&cq->flush_lock);
2102 
2103 	cq->dbinfo.hwq = &cq->hwq;
2104 	cq->dbinfo.xid = cq->id;
2105 	cq->dbinfo.db = cq->dpi->dbr;
2106 	cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2107 
2108 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2109 
2110 	return 0;
2111 
2112 fail:
2113 	bnxt_qplib_free_hwq(res, &cq->hwq);
2114 exit:
2115 	return rc;
2116 }
2117 
2118 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2119 				   struct bnxt_qplib_cq *cq)
2120 {
2121 	bnxt_qplib_free_hwq(res, &cq->hwq);
2122 	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2123 }
2124 
2125 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2126 			 int new_cqes)
2127 {
2128 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2129 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2130 	struct creq_resize_cq_resp resp = {};
2131 	struct bnxt_qplib_cmdqmsg msg = {};
2132 	struct cmdq_resize_cq req = {};
2133 	struct bnxt_qplib_pbl *pbl;
2134 	u32 pg_sz, lvl, new_sz;
2135 	int rc;
2136 
2137 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2138 				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2139 				 sizeof(req));
2140 	hwq_attr.sginfo = &cq->sg_info;
2141 	hwq_attr.res = res;
2142 	hwq_attr.depth = new_cqes;
2143 	hwq_attr.stride = sizeof(struct cq_base);
2144 	hwq_attr.type = HWQ_TYPE_QUEUE;
2145 	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2146 	if (rc)
2147 		return rc;
2148 
2149 	req.cq_cid = cpu_to_le32(cq->id);
2150 	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2151 	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2152 	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2153 				       CMDQ_RESIZE_CQ_LVL_MASK;
2154 	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2155 		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2156 	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2157 	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2158 
2159 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2160 				sizeof(resp), 0);
2161 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2162 	return rc;
2163 }
2164 
2165 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2166 {
2167 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2168 	struct creq_destroy_cq_resp resp = {};
2169 	struct bnxt_qplib_cmdqmsg msg = {};
2170 	struct cmdq_destroy_cq req = {};
2171 	u16 total_cnq_events;
2172 	int rc;
2173 
2174 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2175 				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2176 				 sizeof(req));
2177 
2178 	req.cq_cid = cpu_to_le32(cq->id);
2179 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2180 				sizeof(resp), 0);
2181 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2182 	if (rc)
2183 		return rc;
2184 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2185 	__wait_for_all_nqes(cq, total_cnq_events);
2186 	bnxt_qplib_free_hwq(res, &cq->hwq);
2187 	return 0;
2188 }
2189 
2190 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2191 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2192 {
2193 	struct bnxt_qplib_cqe *cqe;
2194 	u32 start, last;
2195 	int rc = 0;
2196 
2197 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2198 	start = sq->swq_start;
2199 	cqe = *pcqe;
2200 	while (*budget) {
2201 		last = sq->swq_last;
2202 		if (start == last)
2203 			break;
2204 		/* Skip the FENCE WQE completions */
2205 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2206 			bnxt_qplib_cancel_phantom_processing(qp);
2207 			goto skip_compl;
2208 		}
2209 		memset(cqe, 0, sizeof(*cqe));
2210 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2211 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2212 		cqe->qp_handle = (u64)(unsigned long)qp;
2213 		cqe->wr_id = sq->swq[last].wr_id;
2214 		cqe->src_qp = qp->id;
2215 		cqe->type = sq->swq[last].type;
2216 		cqe++;
2217 		(*budget)--;
2218 skip_compl:
2219 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2220 		sq->swq_last = sq->swq[last].next_idx;
2221 	}
2222 	*pcqe = cqe;
2223 	if (!(*budget) && sq->swq_last != start)
2224 		/* Out of budget */
2225 		rc = -EAGAIN;
2226 
2227 	return rc;
2228 }
2229 
2230 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2231 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2232 {
2233 	struct bnxt_qplib_cqe *cqe;
2234 	u32 start, last;
2235 	int opcode = 0;
2236 	int rc = 0;
2237 
2238 	switch (qp->type) {
2239 	case CMDQ_CREATE_QP1_TYPE_GSI:
2240 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2241 		break;
2242 	case CMDQ_CREATE_QP_TYPE_RC:
2243 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2244 		break;
2245 	case CMDQ_CREATE_QP_TYPE_UD:
2246 	case CMDQ_CREATE_QP_TYPE_GSI:
2247 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2248 		break;
2249 	}
2250 
2251 	/* Flush the rest of the RQ */
2252 	start = rq->swq_start;
2253 	cqe = *pcqe;
2254 	while (*budget) {
2255 		last = rq->swq_last;
2256 		if (last == start)
2257 			break;
2258 		memset(cqe, 0, sizeof(*cqe));
2259 		cqe->status =
2260 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2261 		cqe->opcode = opcode;
2262 		cqe->qp_handle = (unsigned long)qp;
2263 		cqe->wr_id = rq->swq[last].wr_id;
2264 		cqe++;
2265 		(*budget)--;
2266 		bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2267 		rq->swq_last = rq->swq[last].next_idx;
2268 	}
2269 	*pcqe = cqe;
2270 	if (!*budget && rq->swq_last != start)
2271 		/* Out of budget */
2272 		rc = -EAGAIN;
2273 
2274 	return rc;
2275 }
2276 
2277 void bnxt_qplib_mark_qp_error(void *qp_handle)
2278 {
2279 	struct bnxt_qplib_qp *qp = qp_handle;
2280 
2281 	if (!qp)
2282 		return;
2283 
2284 	/* Must block new posting of SQ and RQ */
2285 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2286 	bnxt_qplib_cancel_phantom_processing(qp);
2287 }
2288 
2289 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2290  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2291  */
2292 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2293 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2294 {
2295 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2296 	struct bnxt_qplib_q *sq = &qp->sq;
2297 	struct cq_req *peek_req_hwcqe;
2298 	struct bnxt_qplib_qp *peek_qp;
2299 	struct bnxt_qplib_q *peek_sq;
2300 	struct bnxt_qplib_swq *swq;
2301 	struct cq_base *peek_hwcqe;
2302 	int i, rc = 0;
2303 
2304 	/* Normal mode */
2305 	/* Check for the psn_search marking before completing */
2306 	swq = &sq->swq[swq_last];
2307 	if (swq->psn_search &&
2308 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2309 		/* Unmark */
2310 		swq->psn_search->flags_next_psn = cpu_to_le32
2311 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2312 				     & ~0x80000000);
2313 		dev_dbg(&cq->hwq.pdev->dev,
2314 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2315 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2316 		sq->condition = true;
2317 		sq->send_phantom = true;
2318 
2319 		/* TODO: Only ARM if the previous SQE is ARMALL */
2320 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2321 		rc = -EAGAIN;
2322 		goto out;
2323 	}
2324 	if (sq->condition) {
2325 		/* Peek at the completions */
2326 		peek_raw_cq_cons = cq->hwq.cons;
2327 		peek_sw_cq_cons = cq_cons;
2328 		i = cq->hwq.max_elements;
2329 		while (i--) {
2330 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2331 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2332 						       peek_sw_cq_cons, NULL);
2333 			/* If the next hwcqe is VALID */
2334 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2335 					  cq->hwq.max_elements)) {
2336 			/*
2337 			 * The valid test of the entry must be done first before
2338 			 * reading any further.
2339 			 */
2340 				dma_rmb();
2341 				/* If the next hwcqe is a REQ */
2342 				if ((peek_hwcqe->cqe_type_toggle &
2343 				    CQ_BASE_CQE_TYPE_MASK) ==
2344 				    CQ_BASE_CQE_TYPE_REQ) {
2345 					peek_req_hwcqe = (struct cq_req *)
2346 							 peek_hwcqe;
2347 					peek_qp = (struct bnxt_qplib_qp *)
2348 						((unsigned long)
2349 						 le64_to_cpu
2350 						 (peek_req_hwcqe->qp_handle));
2351 					peek_sq = &peek_qp->sq;
2352 					peek_sq_cons_idx =
2353 						((le16_to_cpu(
2354 						  peek_req_hwcqe->sq_cons_idx)
2355 						  - 1) % sq->max_wqe);
2356 					/* If the hwcqe's sq's wr_id matches */
2357 					if (peek_sq == sq &&
2358 					    sq->swq[peek_sq_cons_idx].wr_id ==
2359 					    BNXT_QPLIB_FENCE_WRID) {
2360 						/*
2361 						 *  Unbreak only if the phantom
2362 						 *  comes back
2363 						 */
2364 						dev_dbg(&cq->hwq.pdev->dev,
2365 							"FP: Got Phantom CQE\n");
2366 						sq->condition = false;
2367 						sq->single = true;
2368 						rc = 0;
2369 						goto out;
2370 					}
2371 				}
2372 				/* Valid but not the phantom, so keep looping */
2373 			} else {
2374 				/* Not valid yet, just exit and wait */
2375 				rc = -EINVAL;
2376 				goto out;
2377 			}
2378 			peek_sw_cq_cons++;
2379 			peek_raw_cq_cons++;
2380 		}
2381 		dev_err(&cq->hwq.pdev->dev,
2382 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2383 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2384 		rc = -EINVAL;
2385 	}
2386 out:
2387 	return rc;
2388 }
2389 
2390 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2391 				     struct cq_req *hwcqe,
2392 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2393 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2394 {
2395 	struct bnxt_qplib_swq *swq;
2396 	struct bnxt_qplib_cqe *cqe;
2397 	struct bnxt_qplib_qp *qp;
2398 	struct bnxt_qplib_q *sq;
2399 	u32 cqe_sq_cons;
2400 	int rc = 0;
2401 
2402 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2403 				      le64_to_cpu(hwcqe->qp_handle));
2404 	if (!qp) {
2405 		dev_err(&cq->hwq.pdev->dev,
2406 			"FP: Process Req qp is NULL\n");
2407 		return -EINVAL;
2408 	}
2409 	sq = &qp->sq;
2410 
2411 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2412 	if (qp->sq.flushed) {
2413 		dev_dbg(&cq->hwq.pdev->dev,
2414 			"%s: QP in Flush QP = %p\n", __func__, qp);
2415 		goto done;
2416 	}
2417 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2418 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2419 	 * to the cqe_sq_cons
2420 	 */
2421 	cqe = *pcqe;
2422 	while (*budget) {
2423 		if (sq->swq_last == cqe_sq_cons)
2424 			/* Done */
2425 			break;
2426 
2427 		swq = &sq->swq[sq->swq_last];
2428 		memset(cqe, 0, sizeof(*cqe));
2429 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2430 		cqe->qp_handle = (u64)(unsigned long)qp;
2431 		cqe->src_qp = qp->id;
2432 		cqe->wr_id = swq->wr_id;
2433 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2434 			goto skip;
2435 		cqe->type = swq->type;
2436 
2437 		/* For the last CQE, check for status.  For errors, regardless
2438 		 * of the request being signaled or not, it must complete with
2439 		 * the hwcqe error status
2440 		 */
2441 		if (swq->next_idx == cqe_sq_cons &&
2442 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2443 			cqe->status = hwcqe->status;
2444 			dev_err(&cq->hwq.pdev->dev,
2445 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2446 				sq->swq_last, cqe->wr_id, cqe->status);
2447 			cqe++;
2448 			(*budget)--;
2449 			bnxt_qplib_mark_qp_error(qp);
2450 			/* Add qp to flush list of the CQ */
2451 			bnxt_qplib_add_flush_qp(qp);
2452 		} else {
2453 			/* Before we complete, do WA 9060 */
2454 			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2455 				      cqe_sq_cons)) {
2456 				*lib_qp = qp;
2457 				goto out;
2458 			}
2459 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2460 				cqe->status = CQ_REQ_STATUS_OK;
2461 				cqe++;
2462 				(*budget)--;
2463 			}
2464 		}
2465 skip:
2466 		bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2467 		sq->swq_last = swq->next_idx;
2468 		if (sq->single)
2469 			break;
2470 	}
2471 out:
2472 	*pcqe = cqe;
2473 	if (sq->swq_last != cqe_sq_cons) {
2474 		/* Out of budget */
2475 		rc = -EAGAIN;
2476 		goto done;
2477 	}
2478 	/*
2479 	 * Back to normal completion mode only after it has completed all of
2480 	 * the WC for this CQE
2481 	 */
2482 	sq->single = false;
2483 done:
2484 	return rc;
2485 }
2486 
2487 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2488 {
2489 	spin_lock(&srq->hwq.lock);
2490 	srq->swq[srq->last_idx].next_idx = (int)tag;
2491 	srq->last_idx = (int)tag;
2492 	srq->swq[srq->last_idx].next_idx = -1;
2493 	srq->hwq.cons++; /* Support for SRQE counter */
2494 	spin_unlock(&srq->hwq.lock);
2495 }
2496 
2497 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2498 					struct cq_res_rc *hwcqe,
2499 					struct bnxt_qplib_cqe **pcqe,
2500 					int *budget)
2501 {
2502 	struct bnxt_qplib_srq *srq;
2503 	struct bnxt_qplib_cqe *cqe;
2504 	struct bnxt_qplib_qp *qp;
2505 	struct bnxt_qplib_q *rq;
2506 	u32 wr_id_idx;
2507 	int rc = 0;
2508 
2509 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2510 				      le64_to_cpu(hwcqe->qp_handle));
2511 	if (!qp) {
2512 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2513 		return -EINVAL;
2514 	}
2515 	if (qp->rq.flushed) {
2516 		dev_dbg(&cq->hwq.pdev->dev,
2517 			"%s: QP in Flush QP = %p\n", __func__, qp);
2518 		goto done;
2519 	}
2520 
2521 	cqe = *pcqe;
2522 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2523 	cqe->length = le32_to_cpu(hwcqe->length);
2524 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2525 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2526 	cqe->flags = le16_to_cpu(hwcqe->flags);
2527 	cqe->status = hwcqe->status;
2528 	cqe->qp_handle = (u64)(unsigned long)qp;
2529 
2530 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2531 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2532 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2533 		srq = qp->srq;
2534 		if (!srq)
2535 			return -EINVAL;
2536 		if (wr_id_idx >= srq->hwq.max_elements) {
2537 			dev_err(&cq->hwq.pdev->dev,
2538 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2539 				wr_id_idx, srq->hwq.max_elements);
2540 			return -EINVAL;
2541 		}
2542 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2543 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2544 		cqe++;
2545 		(*budget)--;
2546 		*pcqe = cqe;
2547 	} else {
2548 		struct bnxt_qplib_swq *swq;
2549 
2550 		rq = &qp->rq;
2551 		if (wr_id_idx > (rq->max_wqe - 1)) {
2552 			dev_err(&cq->hwq.pdev->dev,
2553 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2554 				wr_id_idx, rq->max_wqe);
2555 			return -EINVAL;
2556 		}
2557 		if (wr_id_idx != rq->swq_last)
2558 			return -EINVAL;
2559 		swq = &rq->swq[rq->swq_last];
2560 		cqe->wr_id = swq->wr_id;
2561 		cqe++;
2562 		(*budget)--;
2563 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2564 		rq->swq_last = swq->next_idx;
2565 		*pcqe = cqe;
2566 
2567 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2568 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2569 			/* Add qp to flush list of the CQ */
2570 			bnxt_qplib_add_flush_qp(qp);
2571 		}
2572 	}
2573 
2574 done:
2575 	return rc;
2576 }
2577 
2578 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2579 					struct cq_res_ud *hwcqe,
2580 					struct bnxt_qplib_cqe **pcqe,
2581 					int *budget)
2582 {
2583 	struct bnxt_qplib_srq *srq;
2584 	struct bnxt_qplib_cqe *cqe;
2585 	struct bnxt_qplib_qp *qp;
2586 	struct bnxt_qplib_q *rq;
2587 	u32 wr_id_idx;
2588 	int rc = 0;
2589 
2590 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2591 				      le64_to_cpu(hwcqe->qp_handle));
2592 	if (!qp) {
2593 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2594 		return -EINVAL;
2595 	}
2596 	if (qp->rq.flushed) {
2597 		dev_dbg(&cq->hwq.pdev->dev,
2598 			"%s: QP in Flush QP = %p\n", __func__, qp);
2599 		goto done;
2600 	}
2601 	cqe = *pcqe;
2602 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2603 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2604 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2605 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2606 	cqe->flags = le16_to_cpu(hwcqe->flags);
2607 	cqe->status = hwcqe->status;
2608 	cqe->qp_handle = (u64)(unsigned long)qp;
2609 	/*FIXME: Endianness fix needed for smace */
2610 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2611 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2612 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2613 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2614 				  ((le32_to_cpu(
2615 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2616 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2617 
2618 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2619 		srq = qp->srq;
2620 		if (!srq)
2621 			return -EINVAL;
2622 
2623 		if (wr_id_idx >= srq->hwq.max_elements) {
2624 			dev_err(&cq->hwq.pdev->dev,
2625 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2626 				wr_id_idx, srq->hwq.max_elements);
2627 			return -EINVAL;
2628 		}
2629 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2630 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2631 		cqe++;
2632 		(*budget)--;
2633 		*pcqe = cqe;
2634 	} else {
2635 		struct bnxt_qplib_swq *swq;
2636 
2637 		rq = &qp->rq;
2638 		if (wr_id_idx > (rq->max_wqe - 1)) {
2639 			dev_err(&cq->hwq.pdev->dev,
2640 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2641 				wr_id_idx, rq->max_wqe);
2642 			return -EINVAL;
2643 		}
2644 
2645 		if (rq->swq_last != wr_id_idx)
2646 			return -EINVAL;
2647 		swq = &rq->swq[rq->swq_last];
2648 		cqe->wr_id = swq->wr_id;
2649 		cqe++;
2650 		(*budget)--;
2651 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2652 		rq->swq_last = swq->next_idx;
2653 		*pcqe = cqe;
2654 
2655 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2656 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2657 			/* Add qp to flush list of the CQ */
2658 			bnxt_qplib_add_flush_qp(qp);
2659 		}
2660 	}
2661 done:
2662 	return rc;
2663 }
2664 
2665 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2666 {
2667 	struct cq_base *hw_cqe;
2668 	u32 sw_cons, raw_cons;
2669 	bool rc = true;
2670 
2671 	raw_cons = cq->hwq.cons;
2672 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2673 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2674 	 /* Check for Valid bit. If the CQE is valid, return false */
2675 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2676 	return rc;
2677 }
2678 
2679 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2680 						struct cq_res_raweth_qp1 *hwcqe,
2681 						struct bnxt_qplib_cqe **pcqe,
2682 						int *budget)
2683 {
2684 	struct bnxt_qplib_qp *qp;
2685 	struct bnxt_qplib_q *rq;
2686 	struct bnxt_qplib_srq *srq;
2687 	struct bnxt_qplib_cqe *cqe;
2688 	u32 wr_id_idx;
2689 	int rc = 0;
2690 
2691 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2692 				      le64_to_cpu(hwcqe->qp_handle));
2693 	if (!qp) {
2694 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2695 		return -EINVAL;
2696 	}
2697 	if (qp->rq.flushed) {
2698 		dev_dbg(&cq->hwq.pdev->dev,
2699 			"%s: QP in Flush QP = %p\n", __func__, qp);
2700 		goto done;
2701 	}
2702 	cqe = *pcqe;
2703 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2704 	cqe->flags = le16_to_cpu(hwcqe->flags);
2705 	cqe->qp_handle = (u64)(unsigned long)qp;
2706 
2707 	wr_id_idx =
2708 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2709 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2710 	cqe->src_qp = qp->id;
2711 	if (qp->id == 1 && !cqe->length) {
2712 		/* Add workaround for the length misdetection */
2713 		cqe->length = 296;
2714 	} else {
2715 		cqe->length = le16_to_cpu(hwcqe->length);
2716 	}
2717 	cqe->pkey_index = qp->pkey_index;
2718 	memcpy(cqe->smac, qp->smac, 6);
2719 
2720 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2721 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2722 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2723 
2724 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2725 		srq = qp->srq;
2726 		if (!srq) {
2727 			dev_err(&cq->hwq.pdev->dev,
2728 				"FP: SRQ used but not defined??\n");
2729 			return -EINVAL;
2730 		}
2731 		if (wr_id_idx >= srq->hwq.max_elements) {
2732 			dev_err(&cq->hwq.pdev->dev,
2733 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2734 				wr_id_idx, srq->hwq.max_elements);
2735 			return -EINVAL;
2736 		}
2737 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2738 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2739 		cqe++;
2740 		(*budget)--;
2741 		*pcqe = cqe;
2742 	} else {
2743 		struct bnxt_qplib_swq *swq;
2744 
2745 		rq = &qp->rq;
2746 		if (wr_id_idx > (rq->max_wqe - 1)) {
2747 			dev_err(&cq->hwq.pdev->dev,
2748 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2749 				wr_id_idx, rq->max_wqe);
2750 			return -EINVAL;
2751 		}
2752 		if (rq->swq_last != wr_id_idx)
2753 			return -EINVAL;
2754 		swq = &rq->swq[rq->swq_last];
2755 		cqe->wr_id = swq->wr_id;
2756 		cqe++;
2757 		(*budget)--;
2758 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2759 		rq->swq_last = swq->next_idx;
2760 		*pcqe = cqe;
2761 
2762 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2763 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2764 			/* Add qp to flush list of the CQ */
2765 			bnxt_qplib_add_flush_qp(qp);
2766 		}
2767 	}
2768 
2769 done:
2770 	return rc;
2771 }
2772 
2773 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2774 					  struct cq_terminal *hwcqe,
2775 					  struct bnxt_qplib_cqe **pcqe,
2776 					  int *budget)
2777 {
2778 	struct bnxt_qplib_qp *qp;
2779 	struct bnxt_qplib_q *sq, *rq;
2780 	struct bnxt_qplib_cqe *cqe;
2781 	u32 swq_last = 0, cqe_cons;
2782 	int rc = 0;
2783 
2784 	/* Check the Status */
2785 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2786 		dev_warn(&cq->hwq.pdev->dev,
2787 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2788 			 hwcqe->status);
2789 
2790 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2791 				      le64_to_cpu(hwcqe->qp_handle));
2792 	if (!qp) {
2793 		dev_err(&cq->hwq.pdev->dev,
2794 			"FP: CQ Process terminal qp is NULL\n");
2795 		return -EINVAL;
2796 	}
2797 
2798 	/* Must block new posting of SQ and RQ */
2799 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2800 
2801 	sq = &qp->sq;
2802 	rq = &qp->rq;
2803 
2804 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2805 	if (cqe_cons == 0xFFFF)
2806 		goto do_rq;
2807 	cqe_cons %= sq->max_wqe;
2808 
2809 	if (qp->sq.flushed) {
2810 		dev_dbg(&cq->hwq.pdev->dev,
2811 			"%s: QP in Flush QP = %p\n", __func__, qp);
2812 		goto sq_done;
2813 	}
2814 
2815 	/* Terminal CQE can also include aggregated successful CQEs prior.
2816 	 * So we must complete all CQEs from the current sq's cons to the
2817 	 * cq_cons with status OK
2818 	 */
2819 	cqe = *pcqe;
2820 	while (*budget) {
2821 		swq_last = sq->swq_last;
2822 		if (swq_last == cqe_cons)
2823 			break;
2824 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2825 			memset(cqe, 0, sizeof(*cqe));
2826 			cqe->status = CQ_REQ_STATUS_OK;
2827 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2828 			cqe->qp_handle = (u64)(unsigned long)qp;
2829 			cqe->src_qp = qp->id;
2830 			cqe->wr_id = sq->swq[swq_last].wr_id;
2831 			cqe->type = sq->swq[swq_last].type;
2832 			cqe++;
2833 			(*budget)--;
2834 		}
2835 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2836 		sq->swq_last = sq->swq[swq_last].next_idx;
2837 	}
2838 	*pcqe = cqe;
2839 	if (!(*budget) && swq_last != cqe_cons) {
2840 		/* Out of budget */
2841 		rc = -EAGAIN;
2842 		goto sq_done;
2843 	}
2844 sq_done:
2845 	if (rc)
2846 		return rc;
2847 do_rq:
2848 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2849 	if (cqe_cons == 0xFFFF) {
2850 		goto done;
2851 	} else if (cqe_cons > rq->max_wqe - 1) {
2852 		dev_err(&cq->hwq.pdev->dev,
2853 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2854 			cqe_cons, rq->max_wqe);
2855 		rc = -EINVAL;
2856 		goto done;
2857 	}
2858 
2859 	if (qp->rq.flushed) {
2860 		dev_dbg(&cq->hwq.pdev->dev,
2861 			"%s: QP in Flush QP = %p\n", __func__, qp);
2862 		rc = 0;
2863 		goto done;
2864 	}
2865 
2866 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2867 	 * from the current rq->cons to the rq->prod regardless what the
2868 	 * rq->cons the terminal CQE indicates
2869 	 */
2870 
2871 	/* Add qp to flush list of the CQ */
2872 	bnxt_qplib_add_flush_qp(qp);
2873 done:
2874 	return rc;
2875 }
2876 
2877 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2878 					struct cq_cutoff *hwcqe)
2879 {
2880 	/* Check the Status */
2881 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2882 		dev_err(&cq->hwq.pdev->dev,
2883 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2884 			hwcqe->status);
2885 		return -EINVAL;
2886 	}
2887 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2888 	wake_up_interruptible(&cq->waitq);
2889 
2890 	return 0;
2891 }
2892 
2893 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2894 				  struct bnxt_qplib_cqe *cqe,
2895 				  int num_cqes)
2896 {
2897 	struct bnxt_qplib_qp *qp = NULL;
2898 	u32 budget = num_cqes;
2899 	unsigned long flags;
2900 
2901 	spin_lock_irqsave(&cq->flush_lock, flags);
2902 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2903 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2904 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2905 	}
2906 
2907 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2908 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2909 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2910 	}
2911 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2912 
2913 	return num_cqes - budget;
2914 }
2915 
2916 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2917 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2918 {
2919 	struct cq_base *hw_cqe;
2920 	u32 sw_cons, raw_cons;
2921 	int budget, rc = 0;
2922 	u8 type;
2923 
2924 	raw_cons = cq->hwq.cons;
2925 	budget = num_cqes;
2926 
2927 	while (budget) {
2928 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2929 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2930 
2931 		/* Check for Valid bit */
2932 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2933 			break;
2934 
2935 		/*
2936 		 * The valid test of the entry must be done first before
2937 		 * reading any further.
2938 		 */
2939 		dma_rmb();
2940 		/* From the device's respective CQE format to qplib_wc*/
2941 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2942 		switch (type) {
2943 		case CQ_BASE_CQE_TYPE_REQ:
2944 			rc = bnxt_qplib_cq_process_req(cq,
2945 						       (struct cq_req *)hw_cqe,
2946 						       &cqe, &budget,
2947 						       sw_cons, lib_qp);
2948 			break;
2949 		case CQ_BASE_CQE_TYPE_RES_RC:
2950 			rc = bnxt_qplib_cq_process_res_rc(cq,
2951 							  (struct cq_res_rc *)
2952 							  hw_cqe, &cqe,
2953 							  &budget);
2954 			break;
2955 		case CQ_BASE_CQE_TYPE_RES_UD:
2956 			rc = bnxt_qplib_cq_process_res_ud
2957 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2958 					 &budget);
2959 			break;
2960 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2961 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2962 					(cq, (struct cq_res_raweth_qp1 *)
2963 					 hw_cqe, &cqe, &budget);
2964 			break;
2965 		case CQ_BASE_CQE_TYPE_TERMINAL:
2966 			rc = bnxt_qplib_cq_process_terminal
2967 					(cq, (struct cq_terminal *)hw_cqe,
2968 					 &cqe, &budget);
2969 			break;
2970 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2971 			bnxt_qplib_cq_process_cutoff
2972 					(cq, (struct cq_cutoff *)hw_cqe);
2973 			/* Done processing this CQ */
2974 			goto exit;
2975 		default:
2976 			dev_err(&cq->hwq.pdev->dev,
2977 				"process_cq unknown type 0x%lx\n",
2978 				hw_cqe->cqe_type_toggle &
2979 				CQ_BASE_CQE_TYPE_MASK);
2980 			rc = -EINVAL;
2981 			break;
2982 		}
2983 		if (rc < 0) {
2984 			if (rc == -EAGAIN)
2985 				break;
2986 			/* Error while processing the CQE, just skip to the
2987 			 * next one
2988 			 */
2989 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2990 				dev_err(&cq->hwq.pdev->dev,
2991 					"process_cqe error rc = 0x%x\n", rc);
2992 		}
2993 		raw_cons++;
2994 	}
2995 	if (cq->hwq.cons != raw_cons) {
2996 		cq->hwq.cons = raw_cons;
2997 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2998 	}
2999 exit:
3000 	return num_cqes - budget;
3001 }
3002 
3003 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3004 {
3005 	if (arm_type)
3006 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3007 	/* Using cq->arm_state variable to track whether to issue cq handler */
3008 	atomic_set(&cq->arm_state, 1);
3009 }
3010 
3011 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3012 {
3013 	flush_workqueue(qp->scq->nq->cqn_wq);
3014 	if (qp->scq != qp->rcq)
3015 		flush_workqueue(qp->rcq->nq->cqn_wq);
3016 }
3017