1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/spinlock.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
45 
46 #include "roce_hsi.h"
47 
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
50 #include "qplib_sp.h"
51 #include "qplib_fp.h"
52 
53 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
54 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
56 
57 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
58 {
59 	qp->sq.condition = false;
60 	qp->sq.send_phantom = false;
61 	qp->sq.single = false;
62 }
63 
64 /* Flush list */
65 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
66 {
67 	struct bnxt_qplib_cq *scq, *rcq;
68 
69 	scq = qp->scq;
70 	rcq = qp->rcq;
71 
72 	if (!qp->sq.flushed) {
73 		dev_dbg(&scq->hwq.pdev->dev,
74 			"QPLIB: FP: Adding to SQ Flush list = %p",
75 			qp);
76 		bnxt_qplib_cancel_phantom_processing(qp);
77 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
78 		qp->sq.flushed = true;
79 	}
80 	if (!qp->srq) {
81 		if (!qp->rq.flushed) {
82 			dev_dbg(&rcq->hwq.pdev->dev,
83 				"QPLIB: FP: Adding to RQ Flush list = %p",
84 				qp);
85 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 			qp->rq.flushed = true;
87 		}
88 	}
89 }
90 
91 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 				       unsigned long *flags)
93 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94 {
95 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 	if (qp->scq == qp->rcq)
97 		__acquire(&qp->rcq->flush_lock);
98 	else
99 		spin_lock(&qp->rcq->flush_lock);
100 }
101 
102 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 				       unsigned long *flags)
104 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105 {
106 	if (qp->scq == qp->rcq)
107 		__release(&qp->rcq->flush_lock);
108 	else
109 		spin_unlock(&qp->rcq->flush_lock);
110 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111 }
112 
113 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
114 {
115 	unsigned long flags;
116 
117 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118 	__bnxt_qplib_add_flush_qp(qp);
119 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
120 }
121 
122 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
123 {
124 	if (qp->sq.flushed) {
125 		qp->sq.flushed = false;
126 		list_del(&qp->sq_flush);
127 	}
128 	if (!qp->srq) {
129 		if (qp->rq.flushed) {
130 			qp->rq.flushed = false;
131 			list_del(&qp->rq_flush);
132 		}
133 	}
134 }
135 
136 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
137 {
138 	unsigned long flags;
139 
140 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
142 	qp->sq.hwq.prod = 0;
143 	qp->sq.hwq.cons = 0;
144 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
145 	qp->rq.hwq.prod = 0;
146 	qp->rq.hwq.cons = 0;
147 
148 	__bnxt_qplib_del_flush_qp(qp);
149 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
150 }
151 
152 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
153 {
154 	struct bnxt_qplib_nq_work *nq_work =
155 			container_of(work, struct bnxt_qplib_nq_work, work);
156 
157 	struct bnxt_qplib_cq *cq = nq_work->cq;
158 	struct bnxt_qplib_nq *nq = nq_work->nq;
159 
160 	if (cq && nq) {
161 		spin_lock_bh(&cq->compl_lock);
162 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163 			dev_dbg(&nq->pdev->dev,
164 				"%s:Trigger cq  = %p event nq = %p\n",
165 				__func__, cq, nq);
166 			nq->cqn_handler(nq, cq);
167 		}
168 		spin_unlock_bh(&cq->compl_lock);
169 	}
170 	kfree(nq_work);
171 }
172 
173 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174 				       struct bnxt_qplib_qp *qp)
175 {
176 	struct bnxt_qplib_q *rq = &qp->rq;
177 	struct bnxt_qplib_q *sq = &qp->sq;
178 
179 	if (qp->rq_hdr_buf)
180 		dma_free_coherent(&res->pdev->dev,
181 				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
182 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183 	if (qp->sq_hdr_buf)
184 		dma_free_coherent(&res->pdev->dev,
185 				  sq->hwq.max_elements * qp->sq_hdr_buf_size,
186 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187 	qp->rq_hdr_buf = NULL;
188 	qp->sq_hdr_buf = NULL;
189 	qp->rq_hdr_buf_map = 0;
190 	qp->sq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_size = 0;
192 	qp->rq_hdr_buf_size = 0;
193 }
194 
195 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 				       struct bnxt_qplib_qp *qp)
197 {
198 	struct bnxt_qplib_q *rq = &qp->rq;
199 	struct bnxt_qplib_q *sq = &qp->rq;
200 	int rc = 0;
201 
202 	if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
203 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204 					sq->hwq.max_elements *
205 					qp->sq_hdr_buf_size,
206 					&qp->sq_hdr_buf_map, GFP_KERNEL);
207 		if (!qp->sq_hdr_buf) {
208 			rc = -ENOMEM;
209 			dev_err(&res->pdev->dev,
210 				"QPLIB: Failed to create sq_hdr_buf");
211 			goto fail;
212 		}
213 	}
214 
215 	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
216 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 						    rq->hwq.max_elements *
218 						    qp->rq_hdr_buf_size,
219 						    &qp->rq_hdr_buf_map,
220 						    GFP_KERNEL);
221 		if (!qp->rq_hdr_buf) {
222 			rc = -ENOMEM;
223 			dev_err(&res->pdev->dev,
224 				"QPLIB: Failed to create rq_hdr_buf");
225 			goto fail;
226 		}
227 	}
228 	return 0;
229 
230 fail:
231 	bnxt_qplib_free_qp_hdr_buf(res, qp);
232 	return rc;
233 }
234 
235 static void bnxt_qplib_service_nq(unsigned long data)
236 {
237 	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
238 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
239 	struct nq_base *nqe, **nq_ptr;
240 	struct bnxt_qplib_cq *cq;
241 	int num_cqne_processed = 0;
242 	int num_srqne_processed = 0;
243 	u32 sw_cons, raw_cons;
244 	u16 type;
245 	int budget = nq->budget;
246 	u64 q_handle;
247 
248 	/* Service the NQ until empty */
249 	raw_cons = hwq->cons;
250 	while (budget--) {
251 		sw_cons = HWQ_CMP(raw_cons, hwq);
252 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
253 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
254 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
255 			break;
256 
257 		/*
258 		 * The valid test of the entry must be done first before
259 		 * reading any further.
260 		 */
261 		dma_rmb();
262 
263 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
264 		switch (type) {
265 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
266 		{
267 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
268 
269 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
270 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
271 						     << 32;
272 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
273 			bnxt_qplib_arm_cq_enable(cq);
274 			spin_lock_bh(&cq->compl_lock);
275 			atomic_set(&cq->arm_state, 0);
276 			if (!nq->cqn_handler(nq, (cq)))
277 				num_cqne_processed++;
278 			else
279 				dev_warn(&nq->pdev->dev,
280 					 "QPLIB: cqn - type 0x%x not handled",
281 					 type);
282 			spin_unlock_bh(&cq->compl_lock);
283 			break;
284 		}
285 		case NQ_BASE_TYPE_SRQ_EVENT:
286 		{
287 			struct nq_srq_event *nqsrqe =
288 						(struct nq_srq_event *)nqe;
289 
290 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
291 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
292 				     << 32;
293 			bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
294 					   DBR_DBR_TYPE_SRQ_ARMENA);
295 			if (!nq->srqn_handler(nq,
296 					      (struct bnxt_qplib_srq *)q_handle,
297 					      nqsrqe->event))
298 				num_srqne_processed++;
299 			else
300 				dev_warn(&nq->pdev->dev,
301 					 "QPLIB: SRQ event 0x%x not handled",
302 					 nqsrqe->event);
303 			break;
304 		}
305 		case NQ_BASE_TYPE_DBQ_EVENT:
306 			break;
307 		default:
308 			dev_warn(&nq->pdev->dev,
309 				 "QPLIB: nqe with type = 0x%x not handled",
310 				 type);
311 			break;
312 		}
313 		raw_cons++;
314 	}
315 	if (hwq->cons != raw_cons) {
316 		hwq->cons = raw_cons;
317 		NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
318 	}
319 }
320 
321 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
322 {
323 	struct bnxt_qplib_nq *nq = dev_instance;
324 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
325 	struct nq_base **nq_ptr;
326 	u32 sw_cons;
327 
328 	/* Prefetch the NQ element */
329 	sw_cons = HWQ_CMP(hwq->cons, hwq);
330 	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
331 	prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
332 
333 	/* Fan out to CPU affinitized kthreads? */
334 	tasklet_schedule(&nq->worker);
335 
336 	return IRQ_HANDLED;
337 }
338 
339 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
340 {
341 	if (nq->cqn_wq) {
342 		destroy_workqueue(nq->cqn_wq);
343 		nq->cqn_wq = NULL;
344 	}
345 	/* Make sure the HW is stopped! */
346 	synchronize_irq(nq->vector);
347 	tasklet_disable(&nq->worker);
348 	tasklet_kill(&nq->worker);
349 
350 	if (nq->requested) {
351 		irq_set_affinity_hint(nq->vector, NULL);
352 		free_irq(nq->vector, nq);
353 		nq->requested = false;
354 	}
355 	if (nq->bar_reg_iomem)
356 		iounmap(nq->bar_reg_iomem);
357 	nq->bar_reg_iomem = NULL;
358 
359 	nq->cqn_handler = NULL;
360 	nq->srqn_handler = NULL;
361 	nq->vector = 0;
362 }
363 
364 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
365 			 int nq_idx, int msix_vector, int bar_reg_offset,
366 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
367 					    struct bnxt_qplib_cq *),
368 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
369 					     struct bnxt_qplib_srq *,
370 					     u8 event))
371 {
372 	resource_size_t nq_base;
373 	int rc = -1;
374 
375 	nq->pdev = pdev;
376 	nq->vector = msix_vector;
377 	if (cqn_handler)
378 		nq->cqn_handler = cqn_handler;
379 
380 	if (srqn_handler)
381 		nq->srqn_handler = srqn_handler;
382 
383 	tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
384 
385 	/* Have a task to schedule CQ notifiers in post send case */
386 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
387 	if (!nq->cqn_wq)
388 		goto fail;
389 
390 	nq->requested = false;
391 	memset(nq->name, 0, 32);
392 	sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
393 	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
394 	if (rc) {
395 		dev_err(&nq->pdev->dev,
396 			"Failed to request IRQ for NQ: %#x", rc);
397 		goto fail;
398 	}
399 
400 	cpumask_clear(&nq->mask);
401 	cpumask_set_cpu(nq_idx, &nq->mask);
402 	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
403 	if (rc) {
404 		dev_warn(&nq->pdev->dev,
405 			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
406 			 nq->vector, nq_idx);
407 	}
408 
409 	nq->requested = true;
410 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
411 	nq->bar_reg_off = bar_reg_offset;
412 	nq_base = pci_resource_start(pdev, nq->bar_reg);
413 	if (!nq_base) {
414 		rc = -ENOMEM;
415 		goto fail;
416 	}
417 	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
418 	if (!nq->bar_reg_iomem) {
419 		rc = -ENOMEM;
420 		goto fail;
421 	}
422 	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
423 
424 	return 0;
425 fail:
426 	bnxt_qplib_disable_nq(nq);
427 	return rc;
428 }
429 
430 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
431 {
432 	if (nq->hwq.max_elements) {
433 		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
434 		nq->hwq.max_elements = 0;
435 	}
436 }
437 
438 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
439 {
440 	nq->pdev = pdev;
441 	if (!nq->hwq.max_elements ||
442 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
443 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
444 
445 	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
446 				      &nq->hwq.max_elements,
447 				      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
448 				      PAGE_SIZE, HWQ_TYPE_L2_CMPL))
449 		return -ENOMEM;
450 
451 	nq->budget = 8;
452 	return 0;
453 }
454 
455 /* SRQ */
456 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
457 {
458 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
459 	struct dbr_dbr db_msg = { 0 };
460 	void __iomem *db;
461 	u32 sw_prod = 0;
462 
463 	/* Ring DB */
464 	sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
465 		   HWQ_CMP(srq_hwq->prod, srq_hwq);
466 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
467 				   DBR_DBR_INDEX_MASK);
468 	db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
469 					DBR_DBR_XID_MASK) | arm_type);
470 	db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
471 		srq->dbr_base : srq->dpi->dbr;
472 	wmb(); /* barrier before db ring */
473 	__iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
474 }
475 
476 int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
477 			   struct bnxt_qplib_srq *srq)
478 {
479 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
480 	struct cmdq_destroy_srq req;
481 	struct creq_destroy_srq_resp resp;
482 	u16 cmd_flags = 0;
483 	int rc;
484 
485 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
486 
487 	/* Configure the request */
488 	req.srq_cid = cpu_to_le32(srq->id);
489 
490 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
491 					  (void *)&resp, NULL, 0);
492 	if (rc)
493 		return rc;
494 
495 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
496 	kfree(srq->swq);
497 	return 0;
498 }
499 
500 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
501 			  struct bnxt_qplib_srq *srq)
502 {
503 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
504 	struct cmdq_create_srq req;
505 	struct creq_create_srq_resp resp;
506 	struct bnxt_qplib_pbl *pbl;
507 	u16 cmd_flags = 0;
508 	int rc, idx;
509 
510 	srq->hwq.max_elements = srq->max_wqe;
511 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
512 				       srq->nmap, &srq->hwq.max_elements,
513 				       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
514 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
515 	if (rc)
516 		goto exit;
517 
518 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
519 			   GFP_KERNEL);
520 	if (!srq->swq) {
521 		rc = -ENOMEM;
522 		goto fail;
523 	}
524 
525 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
526 
527 	/* Configure the request */
528 	req.dpi = cpu_to_le32(srq->dpi->dpi);
529 	req.srq_handle = cpu_to_le64(srq);
530 
531 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
532 	pbl = &srq->hwq.pbl[PBL_LVL_0];
533 	req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
534 				      CMDQ_CREATE_SRQ_LVL_MASK) <<
535 				      CMDQ_CREATE_SRQ_LVL_SFT) |
536 				      (pbl->pg_size == ROCE_PG_SIZE_4K ?
537 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
538 				       pbl->pg_size == ROCE_PG_SIZE_8K ?
539 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
540 				       pbl->pg_size == ROCE_PG_SIZE_64K ?
541 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
542 				       pbl->pg_size == ROCE_PG_SIZE_2M ?
543 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
544 				       pbl->pg_size == ROCE_PG_SIZE_8M ?
545 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
546 				       pbl->pg_size == ROCE_PG_SIZE_1G ?
547 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
548 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
549 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
550 	req.pd_id = cpu_to_le32(srq->pd->id);
551 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
552 
553 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
554 					  (void *)&resp, NULL, 0);
555 	if (rc)
556 		goto fail;
557 
558 	spin_lock_init(&srq->lock);
559 	srq->start_idx = 0;
560 	srq->last_idx = srq->hwq.max_elements - 1;
561 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
562 		srq->swq[idx].next_idx = idx + 1;
563 	srq->swq[srq->last_idx].next_idx = -1;
564 
565 	srq->id = le32_to_cpu(resp.xid);
566 	srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
567 	if (srq->threshold)
568 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
569 	srq->arm_req = false;
570 
571 	return 0;
572 fail:
573 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
574 	kfree(srq->swq);
575 exit:
576 	return rc;
577 }
578 
579 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
580 			  struct bnxt_qplib_srq *srq)
581 {
582 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
583 	u32 sw_prod, sw_cons, count = 0;
584 
585 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
586 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
587 
588 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
589 				    srq_hwq->max_elements - sw_cons + sw_prod;
590 	if (count > srq->threshold) {
591 		srq->arm_req = false;
592 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
593 	} else {
594 		/* Deferred arming */
595 		srq->arm_req = true;
596 	}
597 
598 	return 0;
599 }
600 
601 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
602 			 struct bnxt_qplib_srq *srq)
603 {
604 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 	struct cmdq_query_srq req;
606 	struct creq_query_srq_resp resp;
607 	struct bnxt_qplib_rcfw_sbuf *sbuf;
608 	struct creq_query_srq_resp_sb *sb;
609 	u16 cmd_flags = 0;
610 	int rc = 0;
611 
612 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
613 	req.srq_cid = cpu_to_le32(srq->id);
614 
615 	/* Configure the request */
616 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
617 	if (!sbuf)
618 		return -ENOMEM;
619 	sb = sbuf->sb;
620 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
621 					  (void *)sbuf, 0);
622 	srq->threshold = le16_to_cpu(sb->srq_limit);
623 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
624 
625 	return rc;
626 }
627 
628 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
629 			     struct bnxt_qplib_swqe *wqe)
630 {
631 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
632 	struct rq_wqe *srqe, **srqe_ptr;
633 	struct sq_sge *hw_sge;
634 	u32 sw_prod, sw_cons, count = 0;
635 	int i, rc = 0, next;
636 
637 	spin_lock(&srq_hwq->lock);
638 	if (srq->start_idx == srq->last_idx) {
639 		dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
640 			srq->id);
641 		rc = -EINVAL;
642 		spin_unlock(&srq_hwq->lock);
643 		goto done;
644 	}
645 	next = srq->start_idx;
646 	srq->start_idx = srq->swq[next].next_idx;
647 	spin_unlock(&srq_hwq->lock);
648 
649 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
650 	srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
651 	srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
652 	memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
653 	/* Calculate wqe_size16 and data_len */
654 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
655 	     i < wqe->num_sge; i++, hw_sge++) {
656 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
657 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
658 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
659 	}
660 	srqe->wqe_type = wqe->type;
661 	srqe->flags = wqe->flags;
662 	srqe->wqe_size = wqe->num_sge +
663 			((offsetof(typeof(*srqe), data) + 15) >> 4);
664 	srqe->wr_id[0] = cpu_to_le32((u32)next);
665 	srq->swq[next].wr_id = wqe->wr_id;
666 
667 	srq_hwq->prod++;
668 
669 	spin_lock(&srq_hwq->lock);
670 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
671 	/* retaining srq_hwq->cons for this logic
672 	 * actually the lock is only required to
673 	 * read srq_hwq->cons.
674 	 */
675 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
676 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
677 				    srq_hwq->max_elements - sw_cons + sw_prod;
678 	spin_unlock(&srq_hwq->lock);
679 	/* Ring DB */
680 	bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
681 	if (srq->arm_req == true && count > srq->threshold) {
682 		srq->arm_req = false;
683 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
684 	}
685 done:
686 	return rc;
687 }
688 
689 /* QP */
690 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
691 {
692 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
693 	struct cmdq_create_qp1 req;
694 	struct creq_create_qp1_resp resp;
695 	struct bnxt_qplib_pbl *pbl;
696 	struct bnxt_qplib_q *sq = &qp->sq;
697 	struct bnxt_qplib_q *rq = &qp->rq;
698 	int rc;
699 	u16 cmd_flags = 0;
700 	u32 qp_flags = 0;
701 
702 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
703 
704 	/* General */
705 	req.type = qp->type;
706 	req.dpi = cpu_to_le32(qp->dpi->dpi);
707 	req.qp_handle = cpu_to_le64(qp->qp_handle);
708 
709 	/* SQ */
710 	sq->hwq.max_elements = sq->max_wqe;
711 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
712 				       &sq->hwq.max_elements,
713 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
714 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
715 	if (rc)
716 		goto exit;
717 
718 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
719 	if (!sq->swq) {
720 		rc = -ENOMEM;
721 		goto fail_sq;
722 	}
723 	pbl = &sq->hwq.pbl[PBL_LVL_0];
724 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
725 	req.sq_pg_size_sq_lvl =
726 		((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
727 				<<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
728 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
729 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
730 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
731 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
732 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
733 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
734 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
735 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
736 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
737 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
738 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
739 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
740 		 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
741 
742 	if (qp->scq)
743 		req.scq_cid = cpu_to_le32(qp->scq->id);
744 
745 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
746 
747 	/* RQ */
748 	if (rq->max_wqe) {
749 		rq->hwq.max_elements = qp->rq.max_wqe;
750 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
751 					       &rq->hwq.max_elements,
752 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
753 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
754 		if (rc)
755 			goto fail_sq;
756 
757 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
758 				  GFP_KERNEL);
759 		if (!rq->swq) {
760 			rc = -ENOMEM;
761 			goto fail_rq;
762 		}
763 		pbl = &rq->hwq.pbl[PBL_LVL_0];
764 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
765 		req.rq_pg_size_rq_lvl =
766 			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
767 			 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
768 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
769 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
770 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
771 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
772 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
773 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
774 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
775 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
776 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
777 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
778 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
779 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
780 				 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
781 		if (qp->rcq)
782 			req.rcq_cid = cpu_to_le32(qp->rcq->id);
783 	}
784 
785 	/* Header buffer - allow hdr_buf pass in */
786 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
787 	if (rc) {
788 		rc = -ENOMEM;
789 		goto fail;
790 	}
791 	req.qp_flags = cpu_to_le32(qp_flags);
792 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
793 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
794 
795 	req.sq_fwo_sq_sge =
796 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
797 			    CMDQ_CREATE_QP1_SQ_SGE_SFT);
798 	req.rq_fwo_rq_sge =
799 		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
800 			    CMDQ_CREATE_QP1_RQ_SGE_SFT);
801 
802 	req.pd_id = cpu_to_le32(qp->pd->id);
803 
804 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
805 					  (void *)&resp, NULL, 0);
806 	if (rc)
807 		goto fail;
808 
809 	qp->id = le32_to_cpu(resp.xid);
810 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
811 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
812 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
813 
814 	return 0;
815 
816 fail:
817 	bnxt_qplib_free_qp_hdr_buf(res, qp);
818 fail_rq:
819 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
820 	kfree(rq->swq);
821 fail_sq:
822 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
823 	kfree(sq->swq);
824 exit:
825 	return rc;
826 }
827 
828 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
829 {
830 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
831 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
832 	struct cmdq_create_qp req;
833 	struct creq_create_qp_resp resp;
834 	struct bnxt_qplib_pbl *pbl;
835 	struct sq_psn_search **psn_search_ptr;
836 	unsigned long int psn_search, poff = 0;
837 	struct bnxt_qplib_q *sq = &qp->sq;
838 	struct bnxt_qplib_q *rq = &qp->rq;
839 	struct bnxt_qplib_hwq *xrrq;
840 	int i, rc, req_size, psn_sz;
841 	u16 cmd_flags = 0, max_ssge;
842 	u32 sw_prod, qp_flags = 0;
843 
844 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
845 
846 	/* General */
847 	req.type = qp->type;
848 	req.dpi = cpu_to_le32(qp->dpi->dpi);
849 	req.qp_handle = cpu_to_le64(qp->qp_handle);
850 
851 	/* SQ */
852 	psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
853 		 sizeof(struct sq_psn_search) : 0;
854 	sq->hwq.max_elements = sq->max_wqe;
855 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
856 				       sq->nmap, &sq->hwq.max_elements,
857 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
858 				       psn_sz,
859 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
860 	if (rc)
861 		goto exit;
862 
863 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
864 	if (!sq->swq) {
865 		rc = -ENOMEM;
866 		goto fail_sq;
867 	}
868 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
869 	if (psn_sz) {
870 		psn_search_ptr = (struct sq_psn_search **)
871 				  &hw_sq_send_ptr[get_sqe_pg
872 					(sq->hwq.max_elements)];
873 		psn_search = (unsigned long int)
874 			      &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
875 			      [get_sqe_idx(sq->hwq.max_elements)];
876 		if (psn_search & ~PAGE_MASK) {
877 			/* If the psn_search does not start on a page boundary,
878 			 * then calculate the offset
879 			 */
880 			poff = (psn_search & ~PAGE_MASK) /
881 				BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
882 		}
883 		for (i = 0; i < sq->hwq.max_elements; i++)
884 			sq->swq[i].psn_search =
885 				&psn_search_ptr[get_psne_pg(i + poff)]
886 					       [get_psne_idx(i + poff)];
887 	}
888 	pbl = &sq->hwq.pbl[PBL_LVL_0];
889 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
890 	req.sq_pg_size_sq_lvl =
891 		((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
892 				 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
893 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
894 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
895 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
896 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
897 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
898 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
899 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
900 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
901 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
902 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
903 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
904 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
905 		 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
906 
907 	/* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
908 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
909 	for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
910 		hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
911 						[get_sqe_idx(sw_prod)];
912 		hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
913 	}
914 
915 	if (qp->scq)
916 		req.scq_cid = cpu_to_le32(qp->scq->id);
917 
918 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
919 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
920 	if (qp->sig_type)
921 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
922 
923 	/* RQ */
924 	if (rq->max_wqe) {
925 		rq->hwq.max_elements = rq->max_wqe;
926 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
927 					       rq->nmap, &rq->hwq.max_elements,
928 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
929 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
930 		if (rc)
931 			goto fail_sq;
932 
933 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
934 				  GFP_KERNEL);
935 		if (!rq->swq) {
936 			rc = -ENOMEM;
937 			goto fail_rq;
938 		}
939 		pbl = &rq->hwq.pbl[PBL_LVL_0];
940 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
941 		req.rq_pg_size_rq_lvl =
942 			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
943 			 CMDQ_CREATE_QP_RQ_LVL_SFT) |
944 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
945 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
946 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
947 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
948 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
949 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
950 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
951 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
952 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
953 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
954 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
955 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
956 				 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
957 	} else {
958 		/* SRQ */
959 		if (qp->srq) {
960 			qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
961 			req.srq_cid = cpu_to_le32(qp->srq->id);
962 		}
963 	}
964 
965 	if (qp->rcq)
966 		req.rcq_cid = cpu_to_le32(qp->rcq->id);
967 	req.qp_flags = cpu_to_le32(qp_flags);
968 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
969 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
970 	qp->sq_hdr_buf = NULL;
971 	qp->rq_hdr_buf = NULL;
972 
973 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
974 	if (rc)
975 		goto fail_rq;
976 
977 	/* CTRL-22434: Irrespective of the requested SGE count on the SQ
978 	 * always create the QP with max send sges possible if the requested
979 	 * inline size is greater than 0.
980 	 */
981 	max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
982 	req.sq_fwo_sq_sge = cpu_to_le16(
983 				((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
984 				 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
985 	req.rq_fwo_rq_sge = cpu_to_le16(
986 				((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
987 				 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
988 	/* ORRQ and IRRQ */
989 	if (psn_sz) {
990 		xrrq = &qp->orrq;
991 		xrrq->max_elements =
992 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
993 		req_size = xrrq->max_elements *
994 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
995 		req_size &= ~(PAGE_SIZE - 1);
996 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
997 					       &xrrq->max_elements,
998 					       BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
999 					       0, req_size, HWQ_TYPE_CTX);
1000 		if (rc)
1001 			goto fail_buf_free;
1002 		pbl = &xrrq->pbl[PBL_LVL_0];
1003 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1004 
1005 		xrrq = &qp->irrq;
1006 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1007 						qp->max_dest_rd_atomic);
1008 		req_size = xrrq->max_elements *
1009 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1010 		req_size &= ~(PAGE_SIZE - 1);
1011 
1012 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1013 					       &xrrq->max_elements,
1014 					       BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1015 					       0, req_size, HWQ_TYPE_CTX);
1016 		if (rc)
1017 			goto fail_orrq;
1018 
1019 		pbl = &xrrq->pbl[PBL_LVL_0];
1020 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1021 	}
1022 	req.pd_id = cpu_to_le32(qp->pd->id);
1023 
1024 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1025 					  (void *)&resp, NULL, 0);
1026 	if (rc)
1027 		goto fail;
1028 
1029 	qp->id = le32_to_cpu(resp.xid);
1030 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1031 	INIT_LIST_HEAD(&qp->sq_flush);
1032 	INIT_LIST_HEAD(&qp->rq_flush);
1033 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
1034 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1035 
1036 	return 0;
1037 
1038 fail:
1039 	if (qp->irrq.max_elements)
1040 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1041 fail_orrq:
1042 	if (qp->orrq.max_elements)
1043 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1044 fail_buf_free:
1045 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1046 fail_rq:
1047 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1048 	kfree(rq->swq);
1049 fail_sq:
1050 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1051 	kfree(sq->swq);
1052 exit:
1053 	return rc;
1054 }
1055 
1056 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1057 {
1058 	switch (qp->state) {
1059 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1060 		/* INIT->RTR, configure the path_mtu to the default
1061 		 * 2048 if not being requested
1062 		 */
1063 		if (!(qp->modify_flags &
1064 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1065 			qp->modify_flags |=
1066 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1067 			qp->path_mtu =
1068 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1069 		}
1070 		qp->modify_flags &=
1071 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1072 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1073 		if (qp->max_dest_rd_atomic < 1)
1074 			qp->max_dest_rd_atomic = 1;
1075 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1076 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1077 		if (!(qp->modify_flags &
1078 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1079 			qp->modify_flags |=
1080 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1081 			qp->ah.sgid_index = 0;
1082 		}
1083 		break;
1084 	default:
1085 		break;
1086 	}
1087 }
1088 
1089 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1090 {
1091 	switch (qp->state) {
1092 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1093 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1094 		if (qp->max_rd_atomic < 1)
1095 			qp->max_rd_atomic = 1;
1096 		/* Bono FW does not allow PKEY_INDEX,
1097 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1098 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1099 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1100 		 * modification
1101 		 */
1102 		qp->modify_flags &=
1103 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1104 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1105 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1106 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1107 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1108 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1109 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1110 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1111 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1112 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1113 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1114 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1115 		break;
1116 	default:
1117 		break;
1118 	}
1119 }
1120 
1121 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1122 {
1123 	switch (qp->cur_qp_state) {
1124 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1125 		break;
1126 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1127 		__modify_flags_from_init_state(qp);
1128 		break;
1129 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1130 		__modify_flags_from_rtr_state(qp);
1131 		break;
1132 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1133 		break;
1134 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1135 		break;
1136 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1137 		break;
1138 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1139 		break;
1140 	default:
1141 		break;
1142 	}
1143 }
1144 
1145 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1146 {
1147 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1148 	struct cmdq_modify_qp req;
1149 	struct creq_modify_qp_resp resp;
1150 	u16 cmd_flags = 0, pkey;
1151 	u32 temp32[4];
1152 	u32 bmask;
1153 	int rc;
1154 
1155 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1156 
1157 	/* Filter out the qp_attr_mask based on the state->new transition */
1158 	__filter_modify_flags(qp);
1159 	bmask = qp->modify_flags;
1160 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1161 	req.qp_cid = cpu_to_le32(qp->id);
1162 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1163 		req.network_type_en_sqd_async_notify_new_state =
1164 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1165 				(qp->en_sqd_async_notify ?
1166 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1167 	}
1168 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1169 
1170 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1171 		req.access = qp->access;
1172 
1173 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1174 		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1175 					 qp->pkey_index, &pkey))
1176 			req.pkey = cpu_to_le16(pkey);
1177 	}
1178 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1179 		req.qkey = cpu_to_le32(qp->qkey);
1180 
1181 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1182 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1183 		req.dgid[0] = cpu_to_le32(temp32[0]);
1184 		req.dgid[1] = cpu_to_le32(temp32[1]);
1185 		req.dgid[2] = cpu_to_le32(temp32[2]);
1186 		req.dgid[3] = cpu_to_le32(temp32[3]);
1187 	}
1188 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1189 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1190 
1191 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1192 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1193 					     [qp->ah.sgid_index]);
1194 
1195 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1196 		req.hop_limit = qp->ah.hop_limit;
1197 
1198 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1199 		req.traffic_class = qp->ah.traffic_class;
1200 
1201 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1202 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1203 
1204 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1205 		req.path_mtu = qp->path_mtu;
1206 
1207 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1208 		req.timeout = qp->timeout;
1209 
1210 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1211 		req.retry_cnt = qp->retry_cnt;
1212 
1213 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1214 		req.rnr_retry = qp->rnr_retry;
1215 
1216 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1217 		req.min_rnr_timer = qp->min_rnr_timer;
1218 
1219 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1220 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1221 
1222 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1223 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1224 
1225 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1226 		req.max_rd_atomic =
1227 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1228 
1229 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1230 		req.max_dest_rd_atomic =
1231 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1232 
1233 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1234 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1235 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1236 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1237 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1238 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1239 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1240 
1241 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1242 
1243 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1244 					  (void *)&resp, NULL, 0);
1245 	if (rc)
1246 		return rc;
1247 	qp->cur_qp_state = qp->state;
1248 	return 0;
1249 }
1250 
1251 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1252 {
1253 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1254 	struct cmdq_query_qp req;
1255 	struct creq_query_qp_resp resp;
1256 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1257 	struct creq_query_qp_resp_sb *sb;
1258 	u16 cmd_flags = 0;
1259 	u32 temp32[4];
1260 	int i, rc = 0;
1261 
1262 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1263 
1264 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1265 	if (!sbuf)
1266 		return -ENOMEM;
1267 	sb = sbuf->sb;
1268 
1269 	req.qp_cid = cpu_to_le32(qp->id);
1270 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1271 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1272 					  (void *)sbuf, 0);
1273 	if (rc)
1274 		goto bail;
1275 	/* Extract the context from the side buffer */
1276 	qp->state = sb->en_sqd_async_notify_state &
1277 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1278 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1279 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1280 				  true : false;
1281 	qp->access = sb->access;
1282 	qp->pkey_index = le16_to_cpu(sb->pkey);
1283 	qp->qkey = le32_to_cpu(sb->qkey);
1284 
1285 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1286 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1287 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1288 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1289 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1290 
1291 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1292 
1293 	qp->ah.sgid_index = 0;
1294 	for (i = 0; i < res->sgid_tbl.max; i++) {
1295 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1296 			qp->ah.sgid_index = i;
1297 			break;
1298 		}
1299 	}
1300 	if (i == res->sgid_tbl.max)
1301 		dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1302 
1303 	qp->ah.hop_limit = sb->hop_limit;
1304 	qp->ah.traffic_class = sb->traffic_class;
1305 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1306 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1307 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1308 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1309 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1310 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1311 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1312 	qp->timeout = sb->timeout;
1313 	qp->retry_cnt = sb->retry_cnt;
1314 	qp->rnr_retry = sb->rnr_retry;
1315 	qp->min_rnr_timer = sb->min_rnr_timer;
1316 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1317 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1318 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1319 	qp->max_dest_rd_atomic =
1320 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1321 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1322 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1323 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1324 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1325 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1326 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1327 	memcpy(qp->smac, sb->src_mac, 6);
1328 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1329 bail:
1330 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1331 	return rc;
1332 }
1333 
1334 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1335 {
1336 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1337 	struct cq_base *hw_cqe, **hw_cqe_ptr;
1338 	int i;
1339 
1340 	for (i = 0; i < cq_hwq->max_elements; i++) {
1341 		hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1342 		hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1343 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1344 			continue;
1345 		/*
1346 		 * The valid test of the entry must be done first before
1347 		 * reading any further.
1348 		 */
1349 		dma_rmb();
1350 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1351 		case CQ_BASE_CQE_TYPE_REQ:
1352 		case CQ_BASE_CQE_TYPE_TERMINAL:
1353 		{
1354 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1355 
1356 			if (qp == le64_to_cpu(cqe->qp_handle))
1357 				cqe->qp_handle = 0;
1358 			break;
1359 		}
1360 		case CQ_BASE_CQE_TYPE_RES_RC:
1361 		case CQ_BASE_CQE_TYPE_RES_UD:
1362 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1363 		{
1364 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1365 
1366 			if (qp == le64_to_cpu(cqe->qp_handle))
1367 				cqe->qp_handle = 0;
1368 			break;
1369 		}
1370 		default:
1371 			break;
1372 		}
1373 	}
1374 }
1375 
1376 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1377 			  struct bnxt_qplib_qp *qp)
1378 {
1379 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1380 	struct cmdq_destroy_qp req;
1381 	struct creq_destroy_qp_resp resp;
1382 	u16 cmd_flags = 0;
1383 	int rc;
1384 
1385 	rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1386 	rcfw->qp_tbl[qp->id].qp_handle = NULL;
1387 
1388 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1389 
1390 	req.qp_cid = cpu_to_le32(qp->id);
1391 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1392 					  (void *)&resp, NULL, 0);
1393 	if (rc) {
1394 		rcfw->qp_tbl[qp->id].qp_id = qp->id;
1395 		rcfw->qp_tbl[qp->id].qp_handle = qp;
1396 		return rc;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1403 			    struct bnxt_qplib_qp *qp)
1404 {
1405 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1406 	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1407 	kfree(qp->sq.swq);
1408 
1409 	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1410 	kfree(qp->rq.swq);
1411 
1412 	if (qp->irrq.max_elements)
1413 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1414 	if (qp->orrq.max_elements)
1415 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1416 
1417 }
1418 
1419 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1420 				struct bnxt_qplib_sge *sge)
1421 {
1422 	struct bnxt_qplib_q *sq = &qp->sq;
1423 	u32 sw_prod;
1424 
1425 	memset(sge, 0, sizeof(*sge));
1426 
1427 	if (qp->sq_hdr_buf) {
1428 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1429 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1430 					 sw_prod * qp->sq_hdr_buf_size);
1431 		sge->lkey = 0xFFFFFFFF;
1432 		sge->size = qp->sq_hdr_buf_size;
1433 		return qp->sq_hdr_buf + sw_prod * sge->size;
1434 	}
1435 	return NULL;
1436 }
1437 
1438 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1439 {
1440 	struct bnxt_qplib_q *rq = &qp->rq;
1441 
1442 	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1443 }
1444 
1445 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1446 {
1447 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1448 }
1449 
1450 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1451 				struct bnxt_qplib_sge *sge)
1452 {
1453 	struct bnxt_qplib_q *rq = &qp->rq;
1454 	u32 sw_prod;
1455 
1456 	memset(sge, 0, sizeof(*sge));
1457 
1458 	if (qp->rq_hdr_buf) {
1459 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1460 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1461 					 sw_prod * qp->rq_hdr_buf_size);
1462 		sge->lkey = 0xFFFFFFFF;
1463 		sge->size = qp->rq_hdr_buf_size;
1464 		return qp->rq_hdr_buf + sw_prod * sge->size;
1465 	}
1466 	return NULL;
1467 }
1468 
1469 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1470 {
1471 	struct bnxt_qplib_q *sq = &qp->sq;
1472 	struct dbr_dbr db_msg = { 0 };
1473 	u32 sw_prod;
1474 
1475 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1476 
1477 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1478 				   DBR_DBR_INDEX_MASK);
1479 	db_msg.type_xid =
1480 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1481 			    DBR_DBR_TYPE_SQ);
1482 	/* Flush all the WQE writes to HW */
1483 	wmb();
1484 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1485 }
1486 
1487 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1488 			 struct bnxt_qplib_swqe *wqe)
1489 {
1490 	struct bnxt_qplib_q *sq = &qp->sq;
1491 	struct bnxt_qplib_swq *swq;
1492 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1493 	struct sq_sge *hw_sge;
1494 	struct bnxt_qplib_nq_work *nq_work = NULL;
1495 	bool sch_handler = false;
1496 	u32 sw_prod;
1497 	u8 wqe_size16;
1498 	int i, rc = 0, data_len = 0, pkt_num = 0;
1499 	__le32 temp32;
1500 
1501 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1502 		if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1503 			sch_handler = true;
1504 			dev_dbg(&sq->hwq.pdev->dev,
1505 				"%s Error QP. Scheduling for poll_cq\n",
1506 				__func__);
1507 			goto queue_err;
1508 		}
1509 	}
1510 
1511 	if (bnxt_qplib_queue_full(sq)) {
1512 		dev_err(&sq->hwq.pdev->dev,
1513 			"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1514 			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1515 			sq->q_full_delta);
1516 		rc = -ENOMEM;
1517 		goto done;
1518 	}
1519 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1520 	swq = &sq->swq[sw_prod];
1521 	swq->wr_id = wqe->wr_id;
1522 	swq->type = wqe->type;
1523 	swq->flags = wqe->flags;
1524 	if (qp->sig_type)
1525 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1526 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1527 
1528 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1529 	hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1530 					[get_sqe_idx(sw_prod)];
1531 
1532 	memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1533 
1534 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1535 		/* Copy the inline data */
1536 		if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1537 			dev_warn(&sq->hwq.pdev->dev,
1538 				 "QPLIB: Inline data length > 96 detected");
1539 			data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1540 		} else {
1541 			data_len = wqe->inline_len;
1542 		}
1543 		memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1544 		wqe_size16 = (data_len + 15) >> 4;
1545 	} else {
1546 		for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1547 		     i < wqe->num_sge; i++, hw_sge++) {
1548 			hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1549 			hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1550 			hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1551 			data_len += wqe->sg_list[i].size;
1552 		}
1553 		/* Each SGE entry = 1 WQE size16 */
1554 		wqe_size16 = wqe->num_sge;
1555 		/* HW requires wqe size has room for atleast one SGE even if
1556 		 * none was supplied by ULP
1557 		 */
1558 		if (!wqe->num_sge)
1559 			wqe_size16++;
1560 	}
1561 
1562 	/* Specifics */
1563 	switch (wqe->type) {
1564 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1565 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1566 			/* Assemble info for Raw Ethertype QPs */
1567 			struct sq_send_raweth_qp1 *sqe =
1568 				(struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1569 
1570 			sqe->wqe_type = wqe->type;
1571 			sqe->flags = wqe->flags;
1572 			sqe->wqe_size = wqe_size16 +
1573 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1574 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1575 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1576 			sqe->length = cpu_to_le32(data_len);
1577 			sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1578 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1579 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1580 
1581 			break;
1582 		}
1583 		/* fall thru */
1584 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1585 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1586 	{
1587 		struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1588 
1589 		sqe->wqe_type = wqe->type;
1590 		sqe->flags = wqe->flags;
1591 		sqe->wqe_size = wqe_size16 +
1592 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1593 		sqe->inv_key_or_imm_data = cpu_to_le32(
1594 						wqe->send.inv_key);
1595 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1596 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1597 			sqe->dst_qp = cpu_to_le32(
1598 					wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1599 			sqe->length = cpu_to_le32(data_len);
1600 			sqe->avid = cpu_to_le32(wqe->send.avid &
1601 						SQ_SEND_AVID_MASK);
1602 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1603 		} else {
1604 			sqe->length = cpu_to_le32(data_len);
1605 			sqe->dst_qp = 0;
1606 			sqe->avid = 0;
1607 			if (qp->mtu)
1608 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1609 			if (!pkt_num)
1610 				pkt_num = 1;
1611 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1612 		}
1613 		break;
1614 	}
1615 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1616 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1617 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1618 	{
1619 		struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1620 
1621 		sqe->wqe_type = wqe->type;
1622 		sqe->flags = wqe->flags;
1623 		sqe->wqe_size = wqe_size16 +
1624 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1625 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1626 		sqe->length = cpu_to_le32((u32)data_len);
1627 		sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1628 		sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1629 		if (qp->mtu)
1630 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1631 		if (!pkt_num)
1632 			pkt_num = 1;
1633 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1634 		break;
1635 	}
1636 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1637 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1638 	{
1639 		struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1640 
1641 		sqe->wqe_type = wqe->type;
1642 		sqe->flags = wqe->flags;
1643 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1644 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1645 		sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1646 		sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1647 		if (qp->mtu)
1648 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1649 		if (!pkt_num)
1650 			pkt_num = 1;
1651 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1652 		break;
1653 	}
1654 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1655 	{
1656 		struct sq_localinvalidate *sqe =
1657 				(struct sq_localinvalidate *)hw_sq_send_hdr;
1658 
1659 		sqe->wqe_type = wqe->type;
1660 		sqe->flags = wqe->flags;
1661 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1662 
1663 		break;
1664 	}
1665 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1666 	{
1667 		struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1668 
1669 		sqe->wqe_type = wqe->type;
1670 		sqe->flags = wqe->flags;
1671 		sqe->access_cntl = wqe->frmr.access_cntl |
1672 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1673 		sqe->zero_based_page_size_log =
1674 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1675 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1676 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1677 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1678 		temp32 = cpu_to_le32(wqe->frmr.length);
1679 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1680 		sqe->numlevels_pbl_page_size_log =
1681 			((wqe->frmr.pbl_pg_sz_log <<
1682 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1683 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1684 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1685 					SQ_FR_PMR_NUMLEVELS_MASK);
1686 
1687 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1688 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1689 						wqe->frmr.page_list[i] |
1690 						PTU_PTE_VALID);
1691 		sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1692 		sqe->va = cpu_to_le64(wqe->frmr.va);
1693 
1694 		break;
1695 	}
1696 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1697 	{
1698 		struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1699 
1700 		sqe->wqe_type = wqe->type;
1701 		sqe->flags = wqe->flags;
1702 		sqe->access_cntl = wqe->bind.access_cntl;
1703 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1704 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1705 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1706 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1707 		sqe->va = cpu_to_le64(wqe->bind.va);
1708 		temp32 = cpu_to_le32(wqe->bind.length);
1709 		memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1710 		break;
1711 	}
1712 	default:
1713 		/* Bad wqe, return error */
1714 		rc = -EINVAL;
1715 		goto done;
1716 	}
1717 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1718 	if (swq->psn_search) {
1719 		swq->psn_search->opcode_start_psn = cpu_to_le32(
1720 			((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1721 			 SQ_PSN_SEARCH_START_PSN_MASK) |
1722 			((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1723 			 SQ_PSN_SEARCH_OPCODE_MASK));
1724 		swq->psn_search->flags_next_psn = cpu_to_le32(
1725 			((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1726 			 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1727 	}
1728 queue_err:
1729 	if (sch_handler) {
1730 		/* Store the ULP info in the software structures */
1731 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1732 		swq = &sq->swq[sw_prod];
1733 		swq->wr_id = wqe->wr_id;
1734 		swq->type = wqe->type;
1735 		swq->flags = wqe->flags;
1736 		if (qp->sig_type)
1737 			swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1738 		swq->start_psn = sq->psn & BTH_PSN_MASK;
1739 	}
1740 	sq->hwq.prod++;
1741 	qp->wqe_cnt++;
1742 
1743 done:
1744 	if (sch_handler) {
1745 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1746 		if (nq_work) {
1747 			nq_work->cq = qp->scq;
1748 			nq_work->nq = qp->scq->nq;
1749 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1750 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1751 		} else {
1752 			dev_err(&sq->hwq.pdev->dev,
1753 				"QPLIB: FP: Failed to allocate SQ nq_work!");
1754 			rc = -ENOMEM;
1755 		}
1756 	}
1757 	return rc;
1758 }
1759 
1760 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1761 {
1762 	struct bnxt_qplib_q *rq = &qp->rq;
1763 	struct dbr_dbr db_msg = { 0 };
1764 	u32 sw_prod;
1765 
1766 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1767 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1768 				   DBR_DBR_INDEX_MASK);
1769 	db_msg.type_xid =
1770 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1771 			    DBR_DBR_TYPE_RQ);
1772 
1773 	/* Flush the writes to HW Rx WQE before the ringing Rx DB */
1774 	wmb();
1775 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1776 }
1777 
1778 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1779 			 struct bnxt_qplib_swqe *wqe)
1780 {
1781 	struct bnxt_qplib_q *rq = &qp->rq;
1782 	struct rq_wqe *rqe, **rqe_ptr;
1783 	struct sq_sge *hw_sge;
1784 	struct bnxt_qplib_nq_work *nq_work = NULL;
1785 	bool sch_handler = false;
1786 	u32 sw_prod;
1787 	int i, rc = 0;
1788 
1789 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1790 		sch_handler = true;
1791 		dev_dbg(&rq->hwq.pdev->dev,
1792 			"%s Error QP. Scheduling for poll_cq\n",
1793 			__func__);
1794 		goto queue_err;
1795 	}
1796 	if (bnxt_qplib_queue_full(rq)) {
1797 		dev_err(&rq->hwq.pdev->dev,
1798 			"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1799 		rc = -EINVAL;
1800 		goto done;
1801 	}
1802 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1803 	rq->swq[sw_prod].wr_id = wqe->wr_id;
1804 
1805 	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1806 	rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1807 
1808 	memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1809 
1810 	/* Calculate wqe_size16 and data_len */
1811 	for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1812 	     i < wqe->num_sge; i++, hw_sge++) {
1813 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1814 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1815 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1816 	}
1817 	rqe->wqe_type = wqe->type;
1818 	rqe->flags = wqe->flags;
1819 	rqe->wqe_size = wqe->num_sge +
1820 			((offsetof(typeof(*rqe), data) + 15) >> 4);
1821 	/* HW requires wqe size has room for atleast one SGE even if none
1822 	 * was supplied by ULP
1823 	 */
1824 	if (!wqe->num_sge)
1825 		rqe->wqe_size++;
1826 
1827 	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
1828 	rqe->wr_id[0] = cpu_to_le32(sw_prod);
1829 
1830 queue_err:
1831 	if (sch_handler) {
1832 		/* Store the ULP info in the software structures */
1833 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1834 		rq->swq[sw_prod].wr_id = wqe->wr_id;
1835 	}
1836 
1837 	rq->hwq.prod++;
1838 	if (sch_handler) {
1839 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1840 		if (nq_work) {
1841 			nq_work->cq = qp->rcq;
1842 			nq_work->nq = qp->rcq->nq;
1843 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1844 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1845 		} else {
1846 			dev_err(&rq->hwq.pdev->dev,
1847 				"QPLIB: FP: Failed to allocate RQ nq_work!");
1848 			rc = -ENOMEM;
1849 		}
1850 	}
1851 done:
1852 	return rc;
1853 }
1854 
1855 /* CQ */
1856 
1857 /* Spinlock must be held */
1858 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1859 {
1860 	struct dbr_dbr db_msg = { 0 };
1861 
1862 	db_msg.type_xid =
1863 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1864 			    DBR_DBR_TYPE_CQ_ARMENA);
1865 	/* Flush memory writes before enabling the CQ */
1866 	wmb();
1867 	__iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1868 }
1869 
1870 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1871 {
1872 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1873 	struct dbr_dbr db_msg = { 0 };
1874 	u32 sw_cons;
1875 
1876 	/* Ring DB */
1877 	sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1878 	db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1879 				    DBR_DBR_INDEX_MASK);
1880 	db_msg.type_xid =
1881 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1882 			    arm_type);
1883 	/* flush memory writes before arming the CQ */
1884 	wmb();
1885 	__iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1886 }
1887 
1888 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1889 {
1890 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1891 	struct cmdq_create_cq req;
1892 	struct creq_create_cq_resp resp;
1893 	struct bnxt_qplib_pbl *pbl;
1894 	u16 cmd_flags = 0;
1895 	int rc;
1896 
1897 	cq->hwq.max_elements = cq->max_wqe;
1898 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1899 				       cq->nmap, &cq->hwq.max_elements,
1900 				       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1901 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
1902 	if (rc)
1903 		goto exit;
1904 
1905 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1906 
1907 	if (!cq->dpi) {
1908 		dev_err(&rcfw->pdev->dev,
1909 			"QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1910 		return -EINVAL;
1911 	}
1912 	req.dpi = cpu_to_le32(cq->dpi->dpi);
1913 	req.cq_handle = cpu_to_le64(cq->cq_handle);
1914 
1915 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1916 	pbl = &cq->hwq.pbl[PBL_LVL_0];
1917 	req.pg_size_lvl = cpu_to_le32(
1918 	    ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1919 						CMDQ_CREATE_CQ_LVL_SFT) |
1920 	    (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1921 	     pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1922 	     pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1923 	     pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1924 	     pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1925 	     pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1926 	     CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1927 
1928 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1929 
1930 	req.cq_fco_cnq_id = cpu_to_le32(
1931 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1932 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1933 
1934 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1935 					  (void *)&resp, NULL, 0);
1936 	if (rc)
1937 		goto fail;
1938 
1939 	cq->id = le32_to_cpu(resp.xid);
1940 	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1941 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1942 	init_waitqueue_head(&cq->waitq);
1943 	INIT_LIST_HEAD(&cq->sqf_head);
1944 	INIT_LIST_HEAD(&cq->rqf_head);
1945 	spin_lock_init(&cq->compl_lock);
1946 
1947 	bnxt_qplib_arm_cq_enable(cq);
1948 	return 0;
1949 
1950 fail:
1951 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1952 exit:
1953 	return rc;
1954 }
1955 
1956 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1957 {
1958 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1959 	struct cmdq_destroy_cq req;
1960 	struct creq_destroy_cq_resp resp;
1961 	u16 cmd_flags = 0;
1962 	int rc;
1963 
1964 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1965 
1966 	req.cq_cid = cpu_to_le32(cq->id);
1967 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1968 					  (void *)&resp, NULL, 0);
1969 	if (rc)
1970 		return rc;
1971 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1972 	return 0;
1973 }
1974 
1975 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
1976 		      struct bnxt_qplib_cqe **pcqe, int *budget)
1977 {
1978 	u32 sw_prod, sw_cons;
1979 	struct bnxt_qplib_cqe *cqe;
1980 	int rc = 0;
1981 
1982 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
1983 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1984 	cqe = *pcqe;
1985 	while (*budget) {
1986 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1987 		if (sw_cons == sw_prod) {
1988 			break;
1989 		}
1990 		/* Skip the FENCE WQE completions */
1991 		if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
1992 			bnxt_qplib_cancel_phantom_processing(qp);
1993 			goto skip_compl;
1994 		}
1995 		memset(cqe, 0, sizeof(*cqe));
1996 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
1997 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1998 		cqe->qp_handle = (u64)(unsigned long)qp;
1999 		cqe->wr_id = sq->swq[sw_cons].wr_id;
2000 		cqe->src_qp = qp->id;
2001 		cqe->type = sq->swq[sw_cons].type;
2002 		cqe++;
2003 		(*budget)--;
2004 skip_compl:
2005 		sq->hwq.cons++;
2006 	}
2007 	*pcqe = cqe;
2008 	if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2009 		/* Out of budget */
2010 		rc = -EAGAIN;
2011 
2012 	return rc;
2013 }
2014 
2015 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2016 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2017 {
2018 	struct bnxt_qplib_cqe *cqe;
2019 	u32 sw_prod, sw_cons;
2020 	int rc = 0;
2021 	int opcode = 0;
2022 
2023 	switch (qp->type) {
2024 	case CMDQ_CREATE_QP1_TYPE_GSI:
2025 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2026 		break;
2027 	case CMDQ_CREATE_QP_TYPE_RC:
2028 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2029 		break;
2030 	case CMDQ_CREATE_QP_TYPE_UD:
2031 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2032 		break;
2033 	}
2034 
2035 	/* Flush the rest of the RQ */
2036 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2037 	cqe = *pcqe;
2038 	while (*budget) {
2039 		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2040 		if (sw_cons == sw_prod)
2041 			break;
2042 		memset(cqe, 0, sizeof(*cqe));
2043 		cqe->status =
2044 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2045 		cqe->opcode = opcode;
2046 		cqe->qp_handle = (unsigned long)qp;
2047 		cqe->wr_id = rq->swq[sw_cons].wr_id;
2048 		cqe++;
2049 		(*budget)--;
2050 		rq->hwq.cons++;
2051 	}
2052 	*pcqe = cqe;
2053 	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2054 		/* Out of budget */
2055 		rc = -EAGAIN;
2056 
2057 	return rc;
2058 }
2059 
2060 void bnxt_qplib_mark_qp_error(void *qp_handle)
2061 {
2062 	struct bnxt_qplib_qp *qp = qp_handle;
2063 
2064 	if (!qp)
2065 		return;
2066 
2067 	/* Must block new posting of SQ and RQ */
2068 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2069 	bnxt_qplib_cancel_phantom_processing(qp);
2070 }
2071 
2072 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2073  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2074  */
2075 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2076 		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2077 {
2078 	struct bnxt_qplib_q *sq = &qp->sq;
2079 	struct bnxt_qplib_swq *swq;
2080 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2081 	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2082 	struct cq_req *peek_req_hwcqe;
2083 	struct bnxt_qplib_qp *peek_qp;
2084 	struct bnxt_qplib_q *peek_sq;
2085 	int i, rc = 0;
2086 
2087 	/* Normal mode */
2088 	/* Check for the psn_search marking before completing */
2089 	swq = &sq->swq[sw_sq_cons];
2090 	if (swq->psn_search &&
2091 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2092 		/* Unmark */
2093 		swq->psn_search->flags_next_psn = cpu_to_le32
2094 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2095 				     & ~0x80000000);
2096 		dev_dbg(&cq->hwq.pdev->dev,
2097 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2098 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2099 		sq->condition = true;
2100 		sq->send_phantom = true;
2101 
2102 		/* TODO: Only ARM if the previous SQE is ARMALL */
2103 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
2104 
2105 		rc = -EAGAIN;
2106 		goto out;
2107 	}
2108 	if (sq->condition) {
2109 		/* Peek at the completions */
2110 		peek_raw_cq_cons = cq->hwq.cons;
2111 		peek_sw_cq_cons = cq_cons;
2112 		i = cq->hwq.max_elements;
2113 		while (i--) {
2114 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2115 			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2116 			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2117 						     [CQE_IDX(peek_sw_cq_cons)];
2118 			/* If the next hwcqe is VALID */
2119 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2120 					  cq->hwq.max_elements)) {
2121 			/*
2122 			 * The valid test of the entry must be done first before
2123 			 * reading any further.
2124 			 */
2125 				dma_rmb();
2126 				/* If the next hwcqe is a REQ */
2127 				if ((peek_hwcqe->cqe_type_toggle &
2128 				    CQ_BASE_CQE_TYPE_MASK) ==
2129 				    CQ_BASE_CQE_TYPE_REQ) {
2130 					peek_req_hwcqe = (struct cq_req *)
2131 							 peek_hwcqe;
2132 					peek_qp = (struct bnxt_qplib_qp *)
2133 						((unsigned long)
2134 						 le64_to_cpu
2135 						 (peek_req_hwcqe->qp_handle));
2136 					peek_sq = &peek_qp->sq;
2137 					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2138 						peek_req_hwcqe->sq_cons_idx) - 1
2139 						, &sq->hwq);
2140 					/* If the hwcqe's sq's wr_id matches */
2141 					if (peek_sq == sq &&
2142 					    sq->swq[peek_sq_cons_idx].wr_id ==
2143 					    BNXT_QPLIB_FENCE_WRID) {
2144 						/*
2145 						 *  Unbreak only if the phantom
2146 						 *  comes back
2147 						 */
2148 						dev_dbg(&cq->hwq.pdev->dev,
2149 							"FP:Got Phantom CQE");
2150 						sq->condition = false;
2151 						sq->single = true;
2152 						rc = 0;
2153 						goto out;
2154 					}
2155 				}
2156 				/* Valid but not the phantom, so keep looping */
2157 			} else {
2158 				/* Not valid yet, just exit and wait */
2159 				rc = -EINVAL;
2160 				goto out;
2161 			}
2162 			peek_sw_cq_cons++;
2163 			peek_raw_cq_cons++;
2164 		}
2165 		dev_err(&cq->hwq.pdev->dev,
2166 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
2167 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2168 		rc = -EINVAL;
2169 	}
2170 out:
2171 	return rc;
2172 }
2173 
2174 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2175 				     struct cq_req *hwcqe,
2176 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2177 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2178 {
2179 	struct bnxt_qplib_qp *qp;
2180 	struct bnxt_qplib_q *sq;
2181 	struct bnxt_qplib_cqe *cqe;
2182 	u32 sw_sq_cons, cqe_sq_cons;
2183 	struct bnxt_qplib_swq *swq;
2184 	int rc = 0;
2185 
2186 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2187 				      le64_to_cpu(hwcqe->qp_handle));
2188 	if (!qp) {
2189 		dev_err(&cq->hwq.pdev->dev,
2190 			"QPLIB: FP: Process Req qp is NULL");
2191 		return -EINVAL;
2192 	}
2193 	sq = &qp->sq;
2194 
2195 	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2196 	if (cqe_sq_cons > sq->hwq.max_elements) {
2197 		dev_err(&cq->hwq.pdev->dev,
2198 			"QPLIB: FP: CQ Process req reported ");
2199 		dev_err(&cq->hwq.pdev->dev,
2200 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2201 			cqe_sq_cons, sq->hwq.max_elements);
2202 		return -EINVAL;
2203 	}
2204 
2205 	if (qp->sq.flushed) {
2206 		dev_dbg(&cq->hwq.pdev->dev,
2207 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2208 		goto done;
2209 	}
2210 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2211 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2212 	 * to the cqe_sq_cons
2213 	 */
2214 	cqe = *pcqe;
2215 	while (*budget) {
2216 		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2217 		if (sw_sq_cons == cqe_sq_cons)
2218 			/* Done */
2219 			break;
2220 
2221 		swq = &sq->swq[sw_sq_cons];
2222 		memset(cqe, 0, sizeof(*cqe));
2223 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2224 		cqe->qp_handle = (u64)(unsigned long)qp;
2225 		cqe->src_qp = qp->id;
2226 		cqe->wr_id = swq->wr_id;
2227 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2228 			goto skip;
2229 		cqe->type = swq->type;
2230 
2231 		/* For the last CQE, check for status.  For errors, regardless
2232 		 * of the request being signaled or not, it must complete with
2233 		 * the hwcqe error status
2234 		 */
2235 		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2236 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2237 			cqe->status = hwcqe->status;
2238 			dev_err(&cq->hwq.pdev->dev,
2239 				"QPLIB: FP: CQ Processed Req ");
2240 			dev_err(&cq->hwq.pdev->dev,
2241 				"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
2242 				sw_sq_cons, cqe->wr_id, cqe->status);
2243 			cqe++;
2244 			(*budget)--;
2245 			bnxt_qplib_mark_qp_error(qp);
2246 			/* Add qp to flush list of the CQ */
2247 			bnxt_qplib_add_flush_qp(qp);
2248 		} else {
2249 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2250 				/* Before we complete, do WA 9060 */
2251 				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2252 					      cqe_sq_cons)) {
2253 					*lib_qp = qp;
2254 					goto out;
2255 				}
2256 				cqe->status = CQ_REQ_STATUS_OK;
2257 				cqe++;
2258 				(*budget)--;
2259 			}
2260 		}
2261 skip:
2262 		sq->hwq.cons++;
2263 		if (sq->single)
2264 			break;
2265 	}
2266 out:
2267 	*pcqe = cqe;
2268 	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2269 		/* Out of budget */
2270 		rc = -EAGAIN;
2271 		goto done;
2272 	}
2273 	/*
2274 	 * Back to normal completion mode only after it has completed all of
2275 	 * the WC for this CQE
2276 	 */
2277 	sq->single = false;
2278 done:
2279 	return rc;
2280 }
2281 
2282 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2283 {
2284 	spin_lock(&srq->hwq.lock);
2285 	srq->swq[srq->last_idx].next_idx = (int)tag;
2286 	srq->last_idx = (int)tag;
2287 	srq->swq[srq->last_idx].next_idx = -1;
2288 	srq->hwq.cons++; /* Support for SRQE counter */
2289 	spin_unlock(&srq->hwq.lock);
2290 }
2291 
2292 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2293 					struct cq_res_rc *hwcqe,
2294 					struct bnxt_qplib_cqe **pcqe,
2295 					int *budget)
2296 {
2297 	struct bnxt_qplib_qp *qp;
2298 	struct bnxt_qplib_q *rq;
2299 	struct bnxt_qplib_srq *srq;
2300 	struct bnxt_qplib_cqe *cqe;
2301 	u32 wr_id_idx;
2302 	int rc = 0;
2303 
2304 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2305 				      le64_to_cpu(hwcqe->qp_handle));
2306 	if (!qp) {
2307 		dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2308 		return -EINVAL;
2309 	}
2310 	if (qp->rq.flushed) {
2311 		dev_dbg(&cq->hwq.pdev->dev,
2312 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2313 		goto done;
2314 	}
2315 
2316 	cqe = *pcqe;
2317 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2318 	cqe->length = le32_to_cpu(hwcqe->length);
2319 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2320 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2321 	cqe->flags = le16_to_cpu(hwcqe->flags);
2322 	cqe->status = hwcqe->status;
2323 	cqe->qp_handle = (u64)(unsigned long)qp;
2324 
2325 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2326 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2327 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2328 		srq = qp->srq;
2329 		if (!srq)
2330 			return -EINVAL;
2331 		if (wr_id_idx > srq->hwq.max_elements) {
2332 			dev_err(&cq->hwq.pdev->dev,
2333 				"QPLIB: FP: CQ Process RC ");
2334 			dev_err(&cq->hwq.pdev->dev,
2335 				"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2336 				wr_id_idx, srq->hwq.max_elements);
2337 			return -EINVAL;
2338 		}
2339 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2340 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2341 		cqe++;
2342 		(*budget)--;
2343 		*pcqe = cqe;
2344 	} else {
2345 		rq = &qp->rq;
2346 		if (wr_id_idx > rq->hwq.max_elements) {
2347 			dev_err(&cq->hwq.pdev->dev,
2348 				"QPLIB: FP: CQ Process RC ");
2349 			dev_err(&cq->hwq.pdev->dev,
2350 				"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2351 				wr_id_idx, rq->hwq.max_elements);
2352 			return -EINVAL;
2353 		}
2354 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2355 		cqe++;
2356 		(*budget)--;
2357 		rq->hwq.cons++;
2358 		*pcqe = cqe;
2359 
2360 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2361 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2362 			/* Add qp to flush list of the CQ */
2363 			bnxt_qplib_add_flush_qp(qp);
2364 		}
2365 	}
2366 
2367 done:
2368 	return rc;
2369 }
2370 
2371 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2372 					struct cq_res_ud *hwcqe,
2373 					struct bnxt_qplib_cqe **pcqe,
2374 					int *budget)
2375 {
2376 	struct bnxt_qplib_qp *qp;
2377 	struct bnxt_qplib_q *rq;
2378 	struct bnxt_qplib_srq *srq;
2379 	struct bnxt_qplib_cqe *cqe;
2380 	u32 wr_id_idx;
2381 	int rc = 0;
2382 
2383 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2384 				      le64_to_cpu(hwcqe->qp_handle));
2385 	if (!qp) {
2386 		dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2387 		return -EINVAL;
2388 	}
2389 	if (qp->rq.flushed) {
2390 		dev_dbg(&cq->hwq.pdev->dev,
2391 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2392 		goto done;
2393 	}
2394 	cqe = *pcqe;
2395 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2396 	cqe->length = le32_to_cpu(hwcqe->length);
2397 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2398 	cqe->flags = le16_to_cpu(hwcqe->flags);
2399 	cqe->status = hwcqe->status;
2400 	cqe->qp_handle = (u64)(unsigned long)qp;
2401 	memcpy(cqe->smac, hwcqe->src_mac, 6);
2402 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2403 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2404 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2405 				  ((le32_to_cpu(
2406 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2407 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2408 
2409 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2410 		srq = qp->srq;
2411 		if (!srq)
2412 			return -EINVAL;
2413 
2414 		if (wr_id_idx > srq->hwq.max_elements) {
2415 			dev_err(&cq->hwq.pdev->dev,
2416 				"QPLIB: FP: CQ Process UD ");
2417 			dev_err(&cq->hwq.pdev->dev,
2418 				"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2419 				wr_id_idx, srq->hwq.max_elements);
2420 			return -EINVAL;
2421 		}
2422 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2423 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2424 		cqe++;
2425 		(*budget)--;
2426 		*pcqe = cqe;
2427 	} else {
2428 		rq = &qp->rq;
2429 		if (wr_id_idx > rq->hwq.max_elements) {
2430 			dev_err(&cq->hwq.pdev->dev,
2431 				"QPLIB: FP: CQ Process UD ");
2432 			dev_err(&cq->hwq.pdev->dev,
2433 				"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2434 				wr_id_idx, rq->hwq.max_elements);
2435 			return -EINVAL;
2436 		}
2437 
2438 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2439 		cqe++;
2440 		(*budget)--;
2441 		rq->hwq.cons++;
2442 		*pcqe = cqe;
2443 
2444 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2445 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2446 			/* Add qp to flush list of the CQ */
2447 			bnxt_qplib_add_flush_qp(qp);
2448 		}
2449 	}
2450 done:
2451 	return rc;
2452 }
2453 
2454 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2455 {
2456 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2457 	u32 sw_cons, raw_cons;
2458 	bool rc = true;
2459 
2460 	raw_cons = cq->hwq.cons;
2461 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2462 	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2463 	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2464 
2465 	 /* Check for Valid bit. If the CQE is valid, return false */
2466 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2467 	return rc;
2468 }
2469 
2470 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2471 						struct cq_res_raweth_qp1 *hwcqe,
2472 						struct bnxt_qplib_cqe **pcqe,
2473 						int *budget)
2474 {
2475 	struct bnxt_qplib_qp *qp;
2476 	struct bnxt_qplib_q *rq;
2477 	struct bnxt_qplib_srq *srq;
2478 	struct bnxt_qplib_cqe *cqe;
2479 	u32 wr_id_idx;
2480 	int rc = 0;
2481 
2482 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2483 				      le64_to_cpu(hwcqe->qp_handle));
2484 	if (!qp) {
2485 		dev_err(&cq->hwq.pdev->dev,
2486 			"QPLIB: process_cq Raw/QP1 qp is NULL");
2487 		return -EINVAL;
2488 	}
2489 	if (qp->rq.flushed) {
2490 		dev_dbg(&cq->hwq.pdev->dev,
2491 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2492 		goto done;
2493 	}
2494 	cqe = *pcqe;
2495 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2496 	cqe->flags = le16_to_cpu(hwcqe->flags);
2497 	cqe->qp_handle = (u64)(unsigned long)qp;
2498 
2499 	wr_id_idx =
2500 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2501 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2502 	cqe->src_qp = qp->id;
2503 	if (qp->id == 1 && !cqe->length) {
2504 		/* Add workaround for the length misdetection */
2505 		cqe->length = 296;
2506 	} else {
2507 		cqe->length = le16_to_cpu(hwcqe->length);
2508 	}
2509 	cqe->pkey_index = qp->pkey_index;
2510 	memcpy(cqe->smac, qp->smac, 6);
2511 
2512 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2513 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2514 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2515 
2516 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2517 		srq = qp->srq;
2518 		if (!srq) {
2519 			dev_err(&cq->hwq.pdev->dev,
2520 				"QPLIB: FP: SRQ used but not defined??");
2521 			return -EINVAL;
2522 		}
2523 		if (wr_id_idx > srq->hwq.max_elements) {
2524 			dev_err(&cq->hwq.pdev->dev,
2525 				"QPLIB: FP: CQ Process Raw/QP1 ");
2526 			dev_err(&cq->hwq.pdev->dev,
2527 				"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2528 				wr_id_idx, srq->hwq.max_elements);
2529 			return -EINVAL;
2530 		}
2531 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2532 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2533 		cqe++;
2534 		(*budget)--;
2535 		*pcqe = cqe;
2536 	} else {
2537 		rq = &qp->rq;
2538 		if (wr_id_idx > rq->hwq.max_elements) {
2539 			dev_err(&cq->hwq.pdev->dev,
2540 				"QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2541 			dev_err(&cq->hwq.pdev->dev,
2542 				"QPLIB: ix 0x%x exceeded RQ max 0x%x",
2543 				wr_id_idx, rq->hwq.max_elements);
2544 			return -EINVAL;
2545 		}
2546 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2547 		cqe++;
2548 		(*budget)--;
2549 		rq->hwq.cons++;
2550 		*pcqe = cqe;
2551 
2552 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2553 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2554 			/* Add qp to flush list of the CQ */
2555 			bnxt_qplib_add_flush_qp(qp);
2556 		}
2557 	}
2558 
2559 done:
2560 	return rc;
2561 }
2562 
2563 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2564 					  struct cq_terminal *hwcqe,
2565 					  struct bnxt_qplib_cqe **pcqe,
2566 					  int *budget)
2567 {
2568 	struct bnxt_qplib_qp *qp;
2569 	struct bnxt_qplib_q *sq, *rq;
2570 	struct bnxt_qplib_cqe *cqe;
2571 	u32 sw_cons = 0, cqe_cons;
2572 	int rc = 0;
2573 
2574 	/* Check the Status */
2575 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2576 		dev_warn(&cq->hwq.pdev->dev,
2577 			 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2578 			 hwcqe->status);
2579 
2580 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2581 				      le64_to_cpu(hwcqe->qp_handle));
2582 	if (!qp) {
2583 		dev_err(&cq->hwq.pdev->dev,
2584 			"QPLIB: FP: CQ Process terminal qp is NULL");
2585 		return -EINVAL;
2586 	}
2587 
2588 	/* Must block new posting of SQ and RQ */
2589 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2590 
2591 	sq = &qp->sq;
2592 	rq = &qp->rq;
2593 
2594 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2595 	if (cqe_cons == 0xFFFF)
2596 		goto do_rq;
2597 
2598 	if (cqe_cons > sq->hwq.max_elements) {
2599 		dev_err(&cq->hwq.pdev->dev,
2600 			"QPLIB: FP: CQ Process terminal reported ");
2601 		dev_err(&cq->hwq.pdev->dev,
2602 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2603 			cqe_cons, sq->hwq.max_elements);
2604 		goto do_rq;
2605 	}
2606 
2607 	if (qp->sq.flushed) {
2608 		dev_dbg(&cq->hwq.pdev->dev,
2609 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2610 		goto sq_done;
2611 	}
2612 
2613 	/* Terminal CQE can also include aggregated successful CQEs prior.
2614 	 * So we must complete all CQEs from the current sq's cons to the
2615 	 * cq_cons with status OK
2616 	 */
2617 	cqe = *pcqe;
2618 	while (*budget) {
2619 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2620 		if (sw_cons == cqe_cons)
2621 			break;
2622 		if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2623 			memset(cqe, 0, sizeof(*cqe));
2624 			cqe->status = CQ_REQ_STATUS_OK;
2625 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2626 			cqe->qp_handle = (u64)(unsigned long)qp;
2627 			cqe->src_qp = qp->id;
2628 			cqe->wr_id = sq->swq[sw_cons].wr_id;
2629 			cqe->type = sq->swq[sw_cons].type;
2630 			cqe++;
2631 			(*budget)--;
2632 		}
2633 		sq->hwq.cons++;
2634 	}
2635 	*pcqe = cqe;
2636 	if (!(*budget) && sw_cons != cqe_cons) {
2637 		/* Out of budget */
2638 		rc = -EAGAIN;
2639 		goto sq_done;
2640 	}
2641 sq_done:
2642 	if (rc)
2643 		return rc;
2644 do_rq:
2645 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2646 	if (cqe_cons == 0xFFFF) {
2647 		goto done;
2648 	} else if (cqe_cons > rq->hwq.max_elements) {
2649 		dev_err(&cq->hwq.pdev->dev,
2650 			"QPLIB: FP: CQ Processed terminal ");
2651 		dev_err(&cq->hwq.pdev->dev,
2652 			"QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2653 			cqe_cons, rq->hwq.max_elements);
2654 		goto done;
2655 	}
2656 
2657 	if (qp->rq.flushed) {
2658 		dev_dbg(&cq->hwq.pdev->dev,
2659 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2660 		rc = 0;
2661 		goto done;
2662 	}
2663 
2664 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2665 	 * from the current rq->cons to the rq->prod regardless what the
2666 	 * rq->cons the terminal CQE indicates
2667 	 */
2668 
2669 	/* Add qp to flush list of the CQ */
2670 	bnxt_qplib_add_flush_qp(qp);
2671 done:
2672 	return rc;
2673 }
2674 
2675 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2676 					struct cq_cutoff *hwcqe)
2677 {
2678 	/* Check the Status */
2679 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2680 		dev_err(&cq->hwq.pdev->dev,
2681 			"QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2682 			hwcqe->status);
2683 		return -EINVAL;
2684 	}
2685 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2686 	wake_up_interruptible(&cq->waitq);
2687 
2688 	return 0;
2689 }
2690 
2691 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2692 				  struct bnxt_qplib_cqe *cqe,
2693 				  int num_cqes)
2694 {
2695 	struct bnxt_qplib_qp *qp = NULL;
2696 	u32 budget = num_cqes;
2697 	unsigned long flags;
2698 
2699 	spin_lock_irqsave(&cq->flush_lock, flags);
2700 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2701 		dev_dbg(&cq->hwq.pdev->dev,
2702 			"QPLIB: FP: Flushing SQ QP= %p",
2703 			qp);
2704 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2705 	}
2706 
2707 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2708 		dev_dbg(&cq->hwq.pdev->dev,
2709 			"QPLIB: FP: Flushing RQ QP= %p",
2710 			qp);
2711 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2712 	}
2713 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2714 
2715 	return num_cqes - budget;
2716 }
2717 
2718 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2719 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2720 {
2721 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2722 	u32 sw_cons, raw_cons;
2723 	int budget, rc = 0;
2724 
2725 	raw_cons = cq->hwq.cons;
2726 	budget = num_cqes;
2727 
2728 	while (budget) {
2729 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2730 		hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2731 		hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2732 
2733 		/* Check for Valid bit */
2734 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2735 			break;
2736 
2737 		/*
2738 		 * The valid test of the entry must be done first before
2739 		 * reading any further.
2740 		 */
2741 		dma_rmb();
2742 		/* From the device's respective CQE format to qplib_wc*/
2743 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2744 		case CQ_BASE_CQE_TYPE_REQ:
2745 			rc = bnxt_qplib_cq_process_req(cq,
2746 						       (struct cq_req *)hw_cqe,
2747 						       &cqe, &budget,
2748 						       sw_cons, lib_qp);
2749 			break;
2750 		case CQ_BASE_CQE_TYPE_RES_RC:
2751 			rc = bnxt_qplib_cq_process_res_rc(cq,
2752 							  (struct cq_res_rc *)
2753 							  hw_cqe, &cqe,
2754 							  &budget);
2755 			break;
2756 		case CQ_BASE_CQE_TYPE_RES_UD:
2757 			rc = bnxt_qplib_cq_process_res_ud
2758 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2759 					 &budget);
2760 			break;
2761 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2762 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2763 					(cq, (struct cq_res_raweth_qp1 *)
2764 					 hw_cqe, &cqe, &budget);
2765 			break;
2766 		case CQ_BASE_CQE_TYPE_TERMINAL:
2767 			rc = bnxt_qplib_cq_process_terminal
2768 					(cq, (struct cq_terminal *)hw_cqe,
2769 					 &cqe, &budget);
2770 			break;
2771 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2772 			bnxt_qplib_cq_process_cutoff
2773 					(cq, (struct cq_cutoff *)hw_cqe);
2774 			/* Done processing this CQ */
2775 			goto exit;
2776 		default:
2777 			dev_err(&cq->hwq.pdev->dev,
2778 				"QPLIB: process_cq unknown type 0x%lx",
2779 				hw_cqe->cqe_type_toggle &
2780 				CQ_BASE_CQE_TYPE_MASK);
2781 			rc = -EINVAL;
2782 			break;
2783 		}
2784 		if (rc < 0) {
2785 			if (rc == -EAGAIN)
2786 				break;
2787 			/* Error while processing the CQE, just skip to the
2788 			 * next one
2789 			 */
2790 			dev_err(&cq->hwq.pdev->dev,
2791 				"QPLIB: process_cqe error rc = 0x%x", rc);
2792 		}
2793 		raw_cons++;
2794 	}
2795 	if (cq->hwq.cons != raw_cons) {
2796 		cq->hwq.cons = raw_cons;
2797 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2798 	}
2799 exit:
2800 	return num_cqes - budget;
2801 }
2802 
2803 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2804 {
2805 	if (arm_type)
2806 		bnxt_qplib_arm_cq(cq, arm_type);
2807 	/* Using cq->arm_state variable to track whether to issue cq handler */
2808 	atomic_set(&cq->arm_state, 1);
2809 }
2810 
2811 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2812 {
2813 	flush_workqueue(qp->scq->nq->cqn_wq);
2814 	if (qp->scq != qp->rcq)
2815 		flush_workqueue(qp->rcq->nq->cqn_wq);
2816 }
2817