1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/spinlock.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
45 
46 #include "roce_hsi.h"
47 
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
50 #include "qplib_sp.h"
51 #include "qplib_fp.h"
52 
53 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
54 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55 
56 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
57 {
58 	qp->sq.condition = false;
59 	qp->sq.send_phantom = false;
60 	qp->sq.single = false;
61 }
62 
63 /* Flush list */
64 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
65 {
66 	struct bnxt_qplib_cq *scq, *rcq;
67 
68 	scq = qp->scq;
69 	rcq = qp->rcq;
70 
71 	if (!qp->sq.flushed) {
72 		dev_dbg(&scq->hwq.pdev->dev,
73 			"QPLIB: FP: Adding to SQ Flush list = %p",
74 			qp);
75 		bnxt_qplib_cancel_phantom_processing(qp);
76 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
77 		qp->sq.flushed = true;
78 	}
79 	if (!qp->srq) {
80 		if (!qp->rq.flushed) {
81 			dev_dbg(&rcq->hwq.pdev->dev,
82 				"QPLIB: FP: Adding to RQ Flush list = %p",
83 				qp);
84 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
85 			qp->rq.flushed = true;
86 		}
87 	}
88 }
89 
90 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
91 				 unsigned long *flags)
92 	__acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
93 {
94 	spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
95 	if (qp->scq == qp->rcq)
96 		__acquire(&qp->rcq->hwq.lock);
97 	else
98 		spin_lock(&qp->rcq->hwq.lock);
99 }
100 
101 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
102 				 unsigned long *flags)
103 	__releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
104 {
105 	if (qp->scq == qp->rcq)
106 		__release(&qp->rcq->hwq.lock);
107 	else
108 		spin_unlock(&qp->rcq->hwq.lock);
109 	spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
110 }
111 
112 static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
113 						      struct bnxt_qplib_cq *cq)
114 {
115 	struct bnxt_qplib_cq *buddy_cq = NULL;
116 
117 	if (qp->scq == qp->rcq)
118 		buddy_cq = NULL;
119 	else if (qp->scq == cq)
120 		buddy_cq = qp->rcq;
121 	else
122 		buddy_cq = qp->scq;
123 	return buddy_cq;
124 }
125 
126 static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
127 				     struct bnxt_qplib_cq *cq)
128 	__acquires(&buddy_cq->hwq.lock)
129 {
130 	struct bnxt_qplib_cq *buddy_cq = NULL;
131 
132 	buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
133 	if (!buddy_cq)
134 		__acquire(&cq->hwq.lock);
135 	else
136 		spin_lock(&buddy_cq->hwq.lock);
137 }
138 
139 static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
140 				       struct bnxt_qplib_cq *cq)
141 	__releases(&buddy_cq->hwq.lock)
142 {
143 	struct bnxt_qplib_cq *buddy_cq = NULL;
144 
145 	buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
146 	if (!buddy_cq)
147 		__release(&cq->hwq.lock);
148 	else
149 		spin_unlock(&buddy_cq->hwq.lock);
150 }
151 
152 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
153 {
154 	unsigned long flags;
155 
156 	bnxt_qplib_acquire_cq_locks(qp, &flags);
157 	__bnxt_qplib_add_flush_qp(qp);
158 	bnxt_qplib_release_cq_locks(qp, &flags);
159 }
160 
161 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
162 {
163 	struct bnxt_qplib_cq *scq, *rcq;
164 
165 	scq = qp->scq;
166 	rcq = qp->rcq;
167 
168 	if (qp->sq.flushed) {
169 		qp->sq.flushed = false;
170 		list_del(&qp->sq_flush);
171 	}
172 	if (!qp->srq) {
173 		if (qp->rq.flushed) {
174 			qp->rq.flushed = false;
175 			list_del(&qp->rq_flush);
176 		}
177 	}
178 }
179 
180 void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
181 {
182 	unsigned long flags;
183 
184 	bnxt_qplib_acquire_cq_locks(qp, &flags);
185 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
186 	qp->sq.hwq.prod = 0;
187 	qp->sq.hwq.cons = 0;
188 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
189 	qp->rq.hwq.prod = 0;
190 	qp->rq.hwq.cons = 0;
191 
192 	__bnxt_qplib_del_flush_qp(qp);
193 	bnxt_qplib_release_cq_locks(qp, &flags);
194 }
195 
196 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
197 {
198 	struct bnxt_qplib_nq_work *nq_work =
199 			container_of(work, struct bnxt_qplib_nq_work, work);
200 
201 	struct bnxt_qplib_cq *cq = nq_work->cq;
202 	struct bnxt_qplib_nq *nq = nq_work->nq;
203 
204 	if (cq && nq) {
205 		spin_lock_bh(&cq->compl_lock);
206 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
207 			dev_dbg(&nq->pdev->dev,
208 				"%s:Trigger cq  = %p event nq = %p\n",
209 				__func__, cq, nq);
210 			nq->cqn_handler(nq, cq);
211 		}
212 		spin_unlock_bh(&cq->compl_lock);
213 	}
214 	kfree(nq_work);
215 }
216 
217 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
218 				       struct bnxt_qplib_qp *qp)
219 {
220 	struct bnxt_qplib_q *rq = &qp->rq;
221 	struct bnxt_qplib_q *sq = &qp->sq;
222 
223 	if (qp->rq_hdr_buf)
224 		dma_free_coherent(&res->pdev->dev,
225 				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
226 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
227 	if (qp->sq_hdr_buf)
228 		dma_free_coherent(&res->pdev->dev,
229 				  sq->hwq.max_elements * qp->sq_hdr_buf_size,
230 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
231 	qp->rq_hdr_buf = NULL;
232 	qp->sq_hdr_buf = NULL;
233 	qp->rq_hdr_buf_map = 0;
234 	qp->sq_hdr_buf_map = 0;
235 	qp->sq_hdr_buf_size = 0;
236 	qp->rq_hdr_buf_size = 0;
237 }
238 
239 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
240 				       struct bnxt_qplib_qp *qp)
241 {
242 	struct bnxt_qplib_q *rq = &qp->rq;
243 	struct bnxt_qplib_q *sq = &qp->rq;
244 	int rc = 0;
245 
246 	if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
247 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
248 					sq->hwq.max_elements *
249 					qp->sq_hdr_buf_size,
250 					&qp->sq_hdr_buf_map, GFP_KERNEL);
251 		if (!qp->sq_hdr_buf) {
252 			rc = -ENOMEM;
253 			dev_err(&res->pdev->dev,
254 				"QPLIB: Failed to create sq_hdr_buf");
255 			goto fail;
256 		}
257 	}
258 
259 	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
260 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
261 						    rq->hwq.max_elements *
262 						    qp->rq_hdr_buf_size,
263 						    &qp->rq_hdr_buf_map,
264 						    GFP_KERNEL);
265 		if (!qp->rq_hdr_buf) {
266 			rc = -ENOMEM;
267 			dev_err(&res->pdev->dev,
268 				"QPLIB: Failed to create rq_hdr_buf");
269 			goto fail;
270 		}
271 	}
272 	return 0;
273 
274 fail:
275 	bnxt_qplib_free_qp_hdr_buf(res, qp);
276 	return rc;
277 }
278 
279 static void bnxt_qplib_service_nq(unsigned long data)
280 {
281 	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
282 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
283 	struct nq_base *nqe, **nq_ptr;
284 	struct bnxt_qplib_cq *cq;
285 	int num_cqne_processed = 0;
286 	u32 sw_cons, raw_cons;
287 	u16 type;
288 	int budget = nq->budget;
289 	u64 q_handle;
290 
291 	/* Service the NQ until empty */
292 	raw_cons = hwq->cons;
293 	while (budget--) {
294 		sw_cons = HWQ_CMP(raw_cons, hwq);
295 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
296 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
297 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
298 			break;
299 
300 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
301 		switch (type) {
302 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
303 		{
304 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
305 
306 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
307 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
308 						     << 32;
309 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
310 			bnxt_qplib_arm_cq_enable(cq);
311 			spin_lock_bh(&cq->compl_lock);
312 			atomic_set(&cq->arm_state, 0);
313 			if (!nq->cqn_handler(nq, (cq)))
314 				num_cqne_processed++;
315 			else
316 				dev_warn(&nq->pdev->dev,
317 					 "QPLIB: cqn - type 0x%x not handled",
318 					 type);
319 			spin_unlock_bh(&cq->compl_lock);
320 			break;
321 		}
322 		case NQ_BASE_TYPE_DBQ_EVENT:
323 			break;
324 		default:
325 			dev_warn(&nq->pdev->dev,
326 				 "QPLIB: nqe with type = 0x%x not handled",
327 				 type);
328 			break;
329 		}
330 		raw_cons++;
331 	}
332 	if (hwq->cons != raw_cons) {
333 		hwq->cons = raw_cons;
334 		NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
335 	}
336 }
337 
338 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
339 {
340 	struct bnxt_qplib_nq *nq = dev_instance;
341 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
342 	struct nq_base **nq_ptr;
343 	u32 sw_cons;
344 
345 	/* Prefetch the NQ element */
346 	sw_cons = HWQ_CMP(hwq->cons, hwq);
347 	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
348 	prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
349 
350 	/* Fan out to CPU affinitized kthreads? */
351 	tasklet_schedule(&nq->worker);
352 
353 	return IRQ_HANDLED;
354 }
355 
356 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
357 {
358 	if (nq->cqn_wq) {
359 		destroy_workqueue(nq->cqn_wq);
360 		nq->cqn_wq = NULL;
361 	}
362 	/* Make sure the HW is stopped! */
363 	synchronize_irq(nq->vector);
364 	tasklet_disable(&nq->worker);
365 	tasklet_kill(&nq->worker);
366 
367 	if (nq->requested) {
368 		irq_set_affinity_hint(nq->vector, NULL);
369 		free_irq(nq->vector, nq);
370 		nq->requested = false;
371 	}
372 	if (nq->bar_reg_iomem)
373 		iounmap(nq->bar_reg_iomem);
374 	nq->bar_reg_iomem = NULL;
375 
376 	nq->cqn_handler = NULL;
377 	nq->srqn_handler = NULL;
378 	nq->vector = 0;
379 }
380 
381 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
382 			 int nq_idx, int msix_vector, int bar_reg_offset,
383 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
384 					    struct bnxt_qplib_cq *),
385 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
386 					     void *, u8 event))
387 {
388 	resource_size_t nq_base;
389 	int rc = -1;
390 
391 	nq->pdev = pdev;
392 	nq->vector = msix_vector;
393 
394 	nq->cqn_handler = cqn_handler;
395 
396 	nq->srqn_handler = srqn_handler;
397 
398 	tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
399 
400 	/* Have a task to schedule CQ notifiers in post send case */
401 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
402 	if (!nq->cqn_wq)
403 		goto fail;
404 
405 	nq->requested = false;
406 	memset(nq->name, 0, 32);
407 	sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
408 	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
409 	if (rc) {
410 		dev_err(&nq->pdev->dev,
411 			"Failed to request IRQ for NQ: %#x", rc);
412 		bnxt_qplib_disable_nq(nq);
413 		goto fail;
414 	}
415 
416 	cpumask_clear(&nq->mask);
417 	cpumask_set_cpu(nq_idx, &nq->mask);
418 	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
419 	if (rc) {
420 		dev_warn(&nq->pdev->dev,
421 			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
422 			 nq->vector, nq_idx);
423 	}
424 
425 	nq->requested = true;
426 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
427 	nq->bar_reg_off = bar_reg_offset;
428 	nq_base = pci_resource_start(pdev, nq->bar_reg);
429 	if (!nq_base) {
430 		rc = -ENOMEM;
431 		goto fail;
432 	}
433 	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
434 	if (!nq->bar_reg_iomem) {
435 		rc = -ENOMEM;
436 		goto fail;
437 	}
438 	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
439 
440 	return 0;
441 fail:
442 	bnxt_qplib_disable_nq(nq);
443 	return rc;
444 }
445 
446 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
447 {
448 	if (nq->hwq.max_elements) {
449 		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
450 		nq->hwq.max_elements = 0;
451 	}
452 }
453 
454 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
455 {
456 	nq->pdev = pdev;
457 	if (!nq->hwq.max_elements ||
458 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
459 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
460 
461 	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
462 				      &nq->hwq.max_elements,
463 				      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
464 				      PAGE_SIZE, HWQ_TYPE_L2_CMPL))
465 		return -ENOMEM;
466 
467 	nq->budget = 8;
468 	return 0;
469 }
470 
471 /* QP */
472 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
473 {
474 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
475 	struct cmdq_create_qp1 req;
476 	struct creq_create_qp1_resp resp;
477 	struct bnxt_qplib_pbl *pbl;
478 	struct bnxt_qplib_q *sq = &qp->sq;
479 	struct bnxt_qplib_q *rq = &qp->rq;
480 	int rc;
481 	u16 cmd_flags = 0;
482 	u32 qp_flags = 0;
483 
484 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
485 
486 	/* General */
487 	req.type = qp->type;
488 	req.dpi = cpu_to_le32(qp->dpi->dpi);
489 	req.qp_handle = cpu_to_le64(qp->qp_handle);
490 
491 	/* SQ */
492 	sq->hwq.max_elements = sq->max_wqe;
493 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
494 				       &sq->hwq.max_elements,
495 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
496 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
497 	if (rc)
498 		goto exit;
499 
500 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
501 	if (!sq->swq) {
502 		rc = -ENOMEM;
503 		goto fail_sq;
504 	}
505 	pbl = &sq->hwq.pbl[PBL_LVL_0];
506 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
507 	req.sq_pg_size_sq_lvl =
508 		((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
509 				<<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
510 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
511 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
512 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
513 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
514 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
515 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
516 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
517 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
518 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
519 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
520 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
521 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
522 		 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
523 
524 	if (qp->scq)
525 		req.scq_cid = cpu_to_le32(qp->scq->id);
526 
527 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
528 
529 	/* RQ */
530 	if (rq->max_wqe) {
531 		rq->hwq.max_elements = qp->rq.max_wqe;
532 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
533 					       &rq->hwq.max_elements,
534 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
535 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
536 		if (rc)
537 			goto fail_sq;
538 
539 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
540 				  GFP_KERNEL);
541 		if (!rq->swq) {
542 			rc = -ENOMEM;
543 			goto fail_rq;
544 		}
545 		pbl = &rq->hwq.pbl[PBL_LVL_0];
546 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
547 		req.rq_pg_size_rq_lvl =
548 			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
549 			 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
550 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
551 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
552 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
553 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
554 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
555 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
556 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
557 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
558 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
559 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
560 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
561 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
562 				 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
563 		if (qp->rcq)
564 			req.rcq_cid = cpu_to_le32(qp->rcq->id);
565 	}
566 
567 	/* Header buffer - allow hdr_buf pass in */
568 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
569 	if (rc) {
570 		rc = -ENOMEM;
571 		goto fail;
572 	}
573 	req.qp_flags = cpu_to_le32(qp_flags);
574 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
575 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
576 
577 	req.sq_fwo_sq_sge =
578 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
579 			    CMDQ_CREATE_QP1_SQ_SGE_SFT);
580 	req.rq_fwo_rq_sge =
581 		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
582 			    CMDQ_CREATE_QP1_RQ_SGE_SFT);
583 
584 	req.pd_id = cpu_to_le32(qp->pd->id);
585 
586 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
587 					  (void *)&resp, NULL, 0);
588 	if (rc)
589 		goto fail;
590 
591 	qp->id = le32_to_cpu(resp.xid);
592 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
593 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
594 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
595 
596 	return 0;
597 
598 fail:
599 	bnxt_qplib_free_qp_hdr_buf(res, qp);
600 fail_rq:
601 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
602 	kfree(rq->swq);
603 fail_sq:
604 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
605 	kfree(sq->swq);
606 exit:
607 	return rc;
608 }
609 
610 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
611 {
612 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
613 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
614 	struct cmdq_create_qp req;
615 	struct creq_create_qp_resp resp;
616 	struct bnxt_qplib_pbl *pbl;
617 	struct sq_psn_search **psn_search_ptr;
618 	unsigned long int psn_search, poff = 0;
619 	struct bnxt_qplib_q *sq = &qp->sq;
620 	struct bnxt_qplib_q *rq = &qp->rq;
621 	struct bnxt_qplib_hwq *xrrq;
622 	int i, rc, req_size, psn_sz;
623 	u16 cmd_flags = 0, max_ssge;
624 	u32 sw_prod, qp_flags = 0;
625 
626 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
627 
628 	/* General */
629 	req.type = qp->type;
630 	req.dpi = cpu_to_le32(qp->dpi->dpi);
631 	req.qp_handle = cpu_to_le64(qp->qp_handle);
632 
633 	/* SQ */
634 	psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
635 		 sizeof(struct sq_psn_search) : 0;
636 	sq->hwq.max_elements = sq->max_wqe;
637 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
638 				       sq->nmap, &sq->hwq.max_elements,
639 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
640 				       psn_sz,
641 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
642 	if (rc)
643 		goto exit;
644 
645 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
646 	if (!sq->swq) {
647 		rc = -ENOMEM;
648 		goto fail_sq;
649 	}
650 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
651 	if (psn_sz) {
652 		psn_search_ptr = (struct sq_psn_search **)
653 				  &hw_sq_send_ptr[get_sqe_pg
654 					(sq->hwq.max_elements)];
655 		psn_search = (unsigned long int)
656 			      &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
657 			      [get_sqe_idx(sq->hwq.max_elements)];
658 		if (psn_search & ~PAGE_MASK) {
659 			/* If the psn_search does not start on a page boundary,
660 			 * then calculate the offset
661 			 */
662 			poff = (psn_search & ~PAGE_MASK) /
663 				BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
664 		}
665 		for (i = 0; i < sq->hwq.max_elements; i++)
666 			sq->swq[i].psn_search =
667 				&psn_search_ptr[get_psne_pg(i + poff)]
668 					       [get_psne_idx(i + poff)];
669 	}
670 	pbl = &sq->hwq.pbl[PBL_LVL_0];
671 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
672 	req.sq_pg_size_sq_lvl =
673 		((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
674 				 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
675 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
676 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
677 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
678 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
679 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
680 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
681 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
682 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
683 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
684 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
685 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
686 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
687 		 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
688 
689 	/* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
690 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
691 	for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
692 		hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
693 						[get_sqe_idx(sw_prod)];
694 		hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
695 	}
696 
697 	if (qp->scq)
698 		req.scq_cid = cpu_to_le32(qp->scq->id);
699 
700 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
701 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
702 	if (qp->sig_type)
703 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
704 
705 	/* RQ */
706 	if (rq->max_wqe) {
707 		rq->hwq.max_elements = rq->max_wqe;
708 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
709 					       rq->nmap, &rq->hwq.max_elements,
710 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
711 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
712 		if (rc)
713 			goto fail_sq;
714 
715 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
716 				  GFP_KERNEL);
717 		if (!rq->swq) {
718 			rc = -ENOMEM;
719 			goto fail_rq;
720 		}
721 		pbl = &rq->hwq.pbl[PBL_LVL_0];
722 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
723 		req.rq_pg_size_rq_lvl =
724 			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
725 			 CMDQ_CREATE_QP_RQ_LVL_SFT) |
726 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
727 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
728 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
729 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
730 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
731 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
732 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
733 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
734 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
735 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
736 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
737 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
738 				 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
739 	}
740 
741 	if (qp->rcq)
742 		req.rcq_cid = cpu_to_le32(qp->rcq->id);
743 	req.qp_flags = cpu_to_le32(qp_flags);
744 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
745 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
746 	qp->sq_hdr_buf = NULL;
747 	qp->rq_hdr_buf = NULL;
748 
749 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
750 	if (rc)
751 		goto fail_rq;
752 
753 	/* CTRL-22434: Irrespective of the requested SGE count on the SQ
754 	 * always create the QP with max send sges possible if the requested
755 	 * inline size is greater than 0.
756 	 */
757 	max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
758 	req.sq_fwo_sq_sge = cpu_to_le16(
759 				((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
760 				 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
761 	req.rq_fwo_rq_sge = cpu_to_le16(
762 				((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
763 				 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
764 	/* ORRQ and IRRQ */
765 	if (psn_sz) {
766 		xrrq = &qp->orrq;
767 		xrrq->max_elements =
768 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
769 		req_size = xrrq->max_elements *
770 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
771 		req_size &= ~(PAGE_SIZE - 1);
772 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
773 					       &xrrq->max_elements,
774 					       BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
775 					       0, req_size, HWQ_TYPE_CTX);
776 		if (rc)
777 			goto fail_buf_free;
778 		pbl = &xrrq->pbl[PBL_LVL_0];
779 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
780 
781 		xrrq = &qp->irrq;
782 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
783 						qp->max_dest_rd_atomic);
784 		req_size = xrrq->max_elements *
785 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
786 		req_size &= ~(PAGE_SIZE - 1);
787 
788 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
789 					       &xrrq->max_elements,
790 					       BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
791 					       0, req_size, HWQ_TYPE_CTX);
792 		if (rc)
793 			goto fail_orrq;
794 
795 		pbl = &xrrq->pbl[PBL_LVL_0];
796 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
797 	}
798 	req.pd_id = cpu_to_le32(qp->pd->id);
799 
800 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
801 					  (void *)&resp, NULL, 0);
802 	if (rc)
803 		goto fail;
804 
805 	qp->id = le32_to_cpu(resp.xid);
806 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
807 	INIT_LIST_HEAD(&qp->sq_flush);
808 	INIT_LIST_HEAD(&qp->rq_flush);
809 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
810 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
811 
812 	return 0;
813 
814 fail:
815 	if (qp->irrq.max_elements)
816 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
817 fail_orrq:
818 	if (qp->orrq.max_elements)
819 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
820 fail_buf_free:
821 	bnxt_qplib_free_qp_hdr_buf(res, qp);
822 fail_rq:
823 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
824 	kfree(rq->swq);
825 fail_sq:
826 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
827 	kfree(sq->swq);
828 exit:
829 	return rc;
830 }
831 
832 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
833 {
834 	switch (qp->state) {
835 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
836 		/* INIT->RTR, configure the path_mtu to the default
837 		 * 2048 if not being requested
838 		 */
839 		if (!(qp->modify_flags &
840 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
841 			qp->modify_flags |=
842 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
843 			qp->path_mtu =
844 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
845 		}
846 		qp->modify_flags &=
847 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
848 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
849 		if (qp->max_dest_rd_atomic < 1)
850 			qp->max_dest_rd_atomic = 1;
851 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
852 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
853 		if (!(qp->modify_flags &
854 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
855 			qp->modify_flags |=
856 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
857 			qp->ah.sgid_index = 0;
858 		}
859 		break;
860 	default:
861 		break;
862 	}
863 }
864 
865 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
866 {
867 	switch (qp->state) {
868 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
869 		/* Bono FW requires the max_rd_atomic to be >= 1 */
870 		if (qp->max_rd_atomic < 1)
871 			qp->max_rd_atomic = 1;
872 		/* Bono FW does not allow PKEY_INDEX,
873 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
874 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
875 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
876 		 * modification
877 		 */
878 		qp->modify_flags &=
879 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
880 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
881 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
882 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
883 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
884 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
885 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
886 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
887 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
888 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
889 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
890 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
891 		break;
892 	default:
893 		break;
894 	}
895 }
896 
897 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
898 {
899 	switch (qp->cur_qp_state) {
900 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
901 		break;
902 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
903 		__modify_flags_from_init_state(qp);
904 		break;
905 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
906 		__modify_flags_from_rtr_state(qp);
907 		break;
908 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
909 		break;
910 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
911 		break;
912 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
913 		break;
914 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
915 		break;
916 	default:
917 		break;
918 	}
919 }
920 
921 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
922 {
923 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
924 	struct cmdq_modify_qp req;
925 	struct creq_modify_qp_resp resp;
926 	u16 cmd_flags = 0, pkey;
927 	u32 temp32[4];
928 	u32 bmask;
929 	int rc;
930 
931 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
932 
933 	/* Filter out the qp_attr_mask based on the state->new transition */
934 	__filter_modify_flags(qp);
935 	bmask = qp->modify_flags;
936 	req.modify_mask = cpu_to_le32(qp->modify_flags);
937 	req.qp_cid = cpu_to_le32(qp->id);
938 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
939 		req.network_type_en_sqd_async_notify_new_state =
940 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
941 				(qp->en_sqd_async_notify ?
942 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
943 	}
944 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
945 
946 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
947 		req.access = qp->access;
948 
949 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
950 		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
951 					 qp->pkey_index, &pkey))
952 			req.pkey = cpu_to_le16(pkey);
953 	}
954 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
955 		req.qkey = cpu_to_le32(qp->qkey);
956 
957 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
958 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
959 		req.dgid[0] = cpu_to_le32(temp32[0]);
960 		req.dgid[1] = cpu_to_le32(temp32[1]);
961 		req.dgid[2] = cpu_to_le32(temp32[2]);
962 		req.dgid[3] = cpu_to_le32(temp32[3]);
963 	}
964 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
965 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
966 
967 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
968 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
969 					     [qp->ah.sgid_index]);
970 
971 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
972 		req.hop_limit = qp->ah.hop_limit;
973 
974 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
975 		req.traffic_class = qp->ah.traffic_class;
976 
977 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
978 		memcpy(req.dest_mac, qp->ah.dmac, 6);
979 
980 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
981 		req.path_mtu = qp->path_mtu;
982 
983 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
984 		req.timeout = qp->timeout;
985 
986 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
987 		req.retry_cnt = qp->retry_cnt;
988 
989 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
990 		req.rnr_retry = qp->rnr_retry;
991 
992 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
993 		req.min_rnr_timer = qp->min_rnr_timer;
994 
995 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
996 		req.rq_psn = cpu_to_le32(qp->rq.psn);
997 
998 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
999 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1000 
1001 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1002 		req.max_rd_atomic =
1003 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1004 
1005 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1006 		req.max_dest_rd_atomic =
1007 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1008 
1009 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1010 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1011 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1012 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1013 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1014 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1015 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1016 
1017 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1018 
1019 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1020 					  (void *)&resp, NULL, 0);
1021 	if (rc)
1022 		return rc;
1023 	qp->cur_qp_state = qp->state;
1024 	return 0;
1025 }
1026 
1027 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1028 {
1029 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1030 	struct cmdq_query_qp req;
1031 	struct creq_query_qp_resp resp;
1032 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1033 	struct creq_query_qp_resp_sb *sb;
1034 	u16 cmd_flags = 0;
1035 	u32 temp32[4];
1036 	int i, rc = 0;
1037 
1038 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1039 
1040 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1041 	if (!sbuf)
1042 		return -ENOMEM;
1043 	sb = sbuf->sb;
1044 
1045 	req.qp_cid = cpu_to_le32(qp->id);
1046 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1047 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1048 					  (void *)sbuf, 0);
1049 	if (rc)
1050 		goto bail;
1051 	/* Extract the context from the side buffer */
1052 	qp->state = sb->en_sqd_async_notify_state &
1053 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1054 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1055 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1056 				  true : false;
1057 	qp->access = sb->access;
1058 	qp->pkey_index = le16_to_cpu(sb->pkey);
1059 	qp->qkey = le32_to_cpu(sb->qkey);
1060 
1061 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1062 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1063 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1064 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1065 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1066 
1067 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1068 
1069 	qp->ah.sgid_index = 0;
1070 	for (i = 0; i < res->sgid_tbl.max; i++) {
1071 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1072 			qp->ah.sgid_index = i;
1073 			break;
1074 		}
1075 	}
1076 	if (i == res->sgid_tbl.max)
1077 		dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1078 
1079 	qp->ah.hop_limit = sb->hop_limit;
1080 	qp->ah.traffic_class = sb->traffic_class;
1081 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1082 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1083 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1084 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1085 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1086 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1087 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1088 	qp->timeout = sb->timeout;
1089 	qp->retry_cnt = sb->retry_cnt;
1090 	qp->rnr_retry = sb->rnr_retry;
1091 	qp->min_rnr_timer = sb->min_rnr_timer;
1092 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1093 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1094 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1095 	qp->max_dest_rd_atomic =
1096 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1097 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1098 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1099 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1100 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1101 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1102 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1103 	memcpy(qp->smac, sb->src_mac, 6);
1104 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1105 bail:
1106 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1107 	return rc;
1108 }
1109 
1110 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1111 {
1112 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1113 	struct cq_base *hw_cqe, **hw_cqe_ptr;
1114 	int i;
1115 
1116 	for (i = 0; i < cq_hwq->max_elements; i++) {
1117 		hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1118 		hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1119 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1120 			continue;
1121 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1122 		case CQ_BASE_CQE_TYPE_REQ:
1123 		case CQ_BASE_CQE_TYPE_TERMINAL:
1124 		{
1125 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1126 
1127 			if (qp == le64_to_cpu(cqe->qp_handle))
1128 				cqe->qp_handle = 0;
1129 			break;
1130 		}
1131 		case CQ_BASE_CQE_TYPE_RES_RC:
1132 		case CQ_BASE_CQE_TYPE_RES_UD:
1133 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1134 		{
1135 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1136 
1137 			if (qp == le64_to_cpu(cqe->qp_handle))
1138 				cqe->qp_handle = 0;
1139 			break;
1140 		}
1141 		default:
1142 			break;
1143 		}
1144 	}
1145 }
1146 
1147 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1148 			  struct bnxt_qplib_qp *qp)
1149 {
1150 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1151 	struct cmdq_destroy_qp req;
1152 	struct creq_destroy_qp_resp resp;
1153 	unsigned long flags;
1154 	u16 cmd_flags = 0;
1155 	int rc;
1156 
1157 	rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1158 	rcfw->qp_tbl[qp->id].qp_handle = NULL;
1159 
1160 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1161 
1162 	req.qp_cid = cpu_to_le32(qp->id);
1163 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1164 					  (void *)&resp, NULL, 0);
1165 	if (rc) {
1166 		rcfw->qp_tbl[qp->id].qp_id = qp->id;
1167 		rcfw->qp_tbl[qp->id].qp_handle = qp;
1168 		return rc;
1169 	}
1170 
1171 	/* Must walk the associated CQs to nullified the QP ptr */
1172 	spin_lock_irqsave(&qp->scq->hwq.lock, flags);
1173 
1174 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
1175 
1176 	if (qp->rcq && qp->rcq != qp->scq) {
1177 		spin_lock(&qp->rcq->hwq.lock);
1178 		__clean_cq(qp->rcq, (u64)(unsigned long)qp);
1179 		spin_unlock(&qp->rcq->hwq.lock);
1180 	}
1181 
1182 	spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
1183 
1184 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1185 	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1186 	kfree(qp->sq.swq);
1187 
1188 	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1189 	kfree(qp->rq.swq);
1190 
1191 	if (qp->irrq.max_elements)
1192 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1193 	if (qp->orrq.max_elements)
1194 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1195 
1196 	return 0;
1197 }
1198 
1199 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1200 				struct bnxt_qplib_sge *sge)
1201 {
1202 	struct bnxt_qplib_q *sq = &qp->sq;
1203 	u32 sw_prod;
1204 
1205 	memset(sge, 0, sizeof(*sge));
1206 
1207 	if (qp->sq_hdr_buf) {
1208 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1209 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1210 					 sw_prod * qp->sq_hdr_buf_size);
1211 		sge->lkey = 0xFFFFFFFF;
1212 		sge->size = qp->sq_hdr_buf_size;
1213 		return qp->sq_hdr_buf + sw_prod * sge->size;
1214 	}
1215 	return NULL;
1216 }
1217 
1218 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1219 {
1220 	struct bnxt_qplib_q *rq = &qp->rq;
1221 
1222 	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1223 }
1224 
1225 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1226 {
1227 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1228 }
1229 
1230 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1231 				struct bnxt_qplib_sge *sge)
1232 {
1233 	struct bnxt_qplib_q *rq = &qp->rq;
1234 	u32 sw_prod;
1235 
1236 	memset(sge, 0, sizeof(*sge));
1237 
1238 	if (qp->rq_hdr_buf) {
1239 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1240 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1241 					 sw_prod * qp->rq_hdr_buf_size);
1242 		sge->lkey = 0xFFFFFFFF;
1243 		sge->size = qp->rq_hdr_buf_size;
1244 		return qp->rq_hdr_buf + sw_prod * sge->size;
1245 	}
1246 	return NULL;
1247 }
1248 
1249 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1250 {
1251 	struct bnxt_qplib_q *sq = &qp->sq;
1252 	struct dbr_dbr db_msg = { 0 };
1253 	u32 sw_prod;
1254 
1255 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1256 
1257 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1258 				   DBR_DBR_INDEX_MASK);
1259 	db_msg.type_xid =
1260 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1261 			    DBR_DBR_TYPE_SQ);
1262 	/* Flush all the WQE writes to HW */
1263 	wmb();
1264 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1265 }
1266 
1267 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1268 			 struct bnxt_qplib_swqe *wqe)
1269 {
1270 	struct bnxt_qplib_q *sq = &qp->sq;
1271 	struct bnxt_qplib_swq *swq;
1272 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1273 	struct sq_sge *hw_sge;
1274 	struct bnxt_qplib_nq_work *nq_work = NULL;
1275 	bool sch_handler = false;
1276 	u32 sw_prod;
1277 	u8 wqe_size16;
1278 	int i, rc = 0, data_len = 0, pkt_num = 0;
1279 	__le32 temp32;
1280 
1281 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1282 		if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1283 			sch_handler = true;
1284 			dev_dbg(&sq->hwq.pdev->dev,
1285 				"%s Error QP. Scheduling for poll_cq\n",
1286 				__func__);
1287 			goto queue_err;
1288 		}
1289 	}
1290 
1291 	if (bnxt_qplib_queue_full(sq)) {
1292 		dev_err(&sq->hwq.pdev->dev,
1293 			"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1294 			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1295 			sq->q_full_delta);
1296 		rc = -ENOMEM;
1297 		goto done;
1298 	}
1299 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1300 	swq = &sq->swq[sw_prod];
1301 	swq->wr_id = wqe->wr_id;
1302 	swq->type = wqe->type;
1303 	swq->flags = wqe->flags;
1304 	if (qp->sig_type)
1305 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1306 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1307 
1308 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1309 	hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1310 					[get_sqe_idx(sw_prod)];
1311 
1312 	memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1313 
1314 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1315 		/* Copy the inline data */
1316 		if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1317 			dev_warn(&sq->hwq.pdev->dev,
1318 				 "QPLIB: Inline data length > 96 detected");
1319 			data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1320 		} else {
1321 			data_len = wqe->inline_len;
1322 		}
1323 		memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1324 		wqe_size16 = (data_len + 15) >> 4;
1325 	} else {
1326 		for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1327 		     i < wqe->num_sge; i++, hw_sge++) {
1328 			hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1329 			hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1330 			hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1331 			data_len += wqe->sg_list[i].size;
1332 		}
1333 		/* Each SGE entry = 1 WQE size16 */
1334 		wqe_size16 = wqe->num_sge;
1335 		/* HW requires wqe size has room for atleast one SGE even if
1336 		 * none was supplied by ULP
1337 		 */
1338 		if (!wqe->num_sge)
1339 			wqe_size16++;
1340 	}
1341 
1342 	/* Specifics */
1343 	switch (wqe->type) {
1344 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1345 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1346 			/* Assemble info for Raw Ethertype QPs */
1347 			struct sq_send_raweth_qp1 *sqe =
1348 				(struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1349 
1350 			sqe->wqe_type = wqe->type;
1351 			sqe->flags = wqe->flags;
1352 			sqe->wqe_size = wqe_size16 +
1353 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1354 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1355 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1356 			sqe->length = cpu_to_le32(data_len);
1357 			sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1358 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1359 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1360 
1361 			break;
1362 		}
1363 		/* else, just fall thru */
1364 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1365 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1366 	{
1367 		struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1368 
1369 		sqe->wqe_type = wqe->type;
1370 		sqe->flags = wqe->flags;
1371 		sqe->wqe_size = wqe_size16 +
1372 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1373 		sqe->inv_key_or_imm_data = cpu_to_le32(
1374 						wqe->send.inv_key);
1375 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1376 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1377 			sqe->dst_qp = cpu_to_le32(
1378 					wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1379 			sqe->length = cpu_to_le32(data_len);
1380 			sqe->avid = cpu_to_le32(wqe->send.avid &
1381 						SQ_SEND_AVID_MASK);
1382 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1383 		} else {
1384 			sqe->length = cpu_to_le32(data_len);
1385 			sqe->dst_qp = 0;
1386 			sqe->avid = 0;
1387 			if (qp->mtu)
1388 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1389 			if (!pkt_num)
1390 				pkt_num = 1;
1391 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1392 		}
1393 		break;
1394 	}
1395 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1396 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1397 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1398 	{
1399 		struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1400 
1401 		sqe->wqe_type = wqe->type;
1402 		sqe->flags = wqe->flags;
1403 		sqe->wqe_size = wqe_size16 +
1404 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1405 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1406 		sqe->length = cpu_to_le32((u32)data_len);
1407 		sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1408 		sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1409 		if (qp->mtu)
1410 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1411 		if (!pkt_num)
1412 			pkt_num = 1;
1413 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1414 		break;
1415 	}
1416 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1417 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1418 	{
1419 		struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1420 
1421 		sqe->wqe_type = wqe->type;
1422 		sqe->flags = wqe->flags;
1423 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1424 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1425 		sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1426 		sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1427 		if (qp->mtu)
1428 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1429 		if (!pkt_num)
1430 			pkt_num = 1;
1431 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1432 		break;
1433 	}
1434 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1435 	{
1436 		struct sq_localinvalidate *sqe =
1437 				(struct sq_localinvalidate *)hw_sq_send_hdr;
1438 
1439 		sqe->wqe_type = wqe->type;
1440 		sqe->flags = wqe->flags;
1441 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1442 
1443 		break;
1444 	}
1445 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1446 	{
1447 		struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1448 
1449 		sqe->wqe_type = wqe->type;
1450 		sqe->flags = wqe->flags;
1451 		sqe->access_cntl = wqe->frmr.access_cntl |
1452 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1453 		sqe->zero_based_page_size_log =
1454 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1455 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1456 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1457 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1458 		temp32 = cpu_to_le32(wqe->frmr.length);
1459 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1460 		sqe->numlevels_pbl_page_size_log =
1461 			((wqe->frmr.pbl_pg_sz_log <<
1462 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1463 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1464 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1465 					SQ_FR_PMR_NUMLEVELS_MASK);
1466 
1467 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1468 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1469 						wqe->frmr.page_list[i] |
1470 						PTU_PTE_VALID);
1471 		sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1472 		sqe->va = cpu_to_le64(wqe->frmr.va);
1473 
1474 		break;
1475 	}
1476 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1477 	{
1478 		struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1479 
1480 		sqe->wqe_type = wqe->type;
1481 		sqe->flags = wqe->flags;
1482 		sqe->access_cntl = wqe->bind.access_cntl;
1483 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1484 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1485 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1486 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1487 		sqe->va = cpu_to_le64(wqe->bind.va);
1488 		temp32 = cpu_to_le32(wqe->bind.length);
1489 		memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1490 		break;
1491 	}
1492 	default:
1493 		/* Bad wqe, return error */
1494 		rc = -EINVAL;
1495 		goto done;
1496 	}
1497 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1498 	if (swq->psn_search) {
1499 		swq->psn_search->opcode_start_psn = cpu_to_le32(
1500 			((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1501 			 SQ_PSN_SEARCH_START_PSN_MASK) |
1502 			((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1503 			 SQ_PSN_SEARCH_OPCODE_MASK));
1504 		swq->psn_search->flags_next_psn = cpu_to_le32(
1505 			((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1506 			 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1507 	}
1508 queue_err:
1509 	if (sch_handler) {
1510 		/* Store the ULP info in the software structures */
1511 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1512 		swq = &sq->swq[sw_prod];
1513 		swq->wr_id = wqe->wr_id;
1514 		swq->type = wqe->type;
1515 		swq->flags = wqe->flags;
1516 		if (qp->sig_type)
1517 			swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1518 		swq->start_psn = sq->psn & BTH_PSN_MASK;
1519 	}
1520 	sq->hwq.prod++;
1521 	qp->wqe_cnt++;
1522 
1523 done:
1524 	if (sch_handler) {
1525 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1526 		if (nq_work) {
1527 			nq_work->cq = qp->scq;
1528 			nq_work->nq = qp->scq->nq;
1529 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1530 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1531 		} else {
1532 			dev_err(&sq->hwq.pdev->dev,
1533 				"QPLIB: FP: Failed to allocate SQ nq_work!");
1534 			rc = -ENOMEM;
1535 		}
1536 	}
1537 	return rc;
1538 }
1539 
1540 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1541 {
1542 	struct bnxt_qplib_q *rq = &qp->rq;
1543 	struct dbr_dbr db_msg = { 0 };
1544 	u32 sw_prod;
1545 
1546 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1547 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1548 				   DBR_DBR_INDEX_MASK);
1549 	db_msg.type_xid =
1550 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1551 			    DBR_DBR_TYPE_RQ);
1552 
1553 	/* Flush the writes to HW Rx WQE before the ringing Rx DB */
1554 	wmb();
1555 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1556 }
1557 
1558 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1559 			 struct bnxt_qplib_swqe *wqe)
1560 {
1561 	struct bnxt_qplib_q *rq = &qp->rq;
1562 	struct rq_wqe *rqe, **rqe_ptr;
1563 	struct sq_sge *hw_sge;
1564 	struct bnxt_qplib_nq_work *nq_work = NULL;
1565 	bool sch_handler = false;
1566 	u32 sw_prod;
1567 	int i, rc = 0;
1568 
1569 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1570 		sch_handler = true;
1571 		dev_dbg(&rq->hwq.pdev->dev,
1572 			"%s Error QP. Scheduling for poll_cq\n",
1573 			__func__);
1574 		goto queue_err;
1575 	}
1576 	if (bnxt_qplib_queue_full(rq)) {
1577 		dev_err(&rq->hwq.pdev->dev,
1578 			"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1579 		rc = -EINVAL;
1580 		goto done;
1581 	}
1582 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1583 	rq->swq[sw_prod].wr_id = wqe->wr_id;
1584 
1585 	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1586 	rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1587 
1588 	memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1589 
1590 	/* Calculate wqe_size16 and data_len */
1591 	for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1592 	     i < wqe->num_sge; i++, hw_sge++) {
1593 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1594 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1595 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1596 	}
1597 	rqe->wqe_type = wqe->type;
1598 	rqe->flags = wqe->flags;
1599 	rqe->wqe_size = wqe->num_sge +
1600 			((offsetof(typeof(*rqe), data) + 15) >> 4);
1601 	/* HW requires wqe size has room for atleast one SGE even if none
1602 	 * was supplied by ULP
1603 	 */
1604 	if (!wqe->num_sge)
1605 		rqe->wqe_size++;
1606 
1607 	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
1608 	rqe->wr_id[0] = cpu_to_le32(sw_prod);
1609 
1610 queue_err:
1611 	if (sch_handler) {
1612 		/* Store the ULP info in the software structures */
1613 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1614 		rq->swq[sw_prod].wr_id = wqe->wr_id;
1615 	}
1616 
1617 	rq->hwq.prod++;
1618 	if (sch_handler) {
1619 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1620 		if (nq_work) {
1621 			nq_work->cq = qp->rcq;
1622 			nq_work->nq = qp->rcq->nq;
1623 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1624 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1625 		} else {
1626 			dev_err(&rq->hwq.pdev->dev,
1627 				"QPLIB: FP: Failed to allocate RQ nq_work!");
1628 			rc = -ENOMEM;
1629 		}
1630 	}
1631 done:
1632 	return rc;
1633 }
1634 
1635 /* CQ */
1636 
1637 /* Spinlock must be held */
1638 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1639 {
1640 	struct dbr_dbr db_msg = { 0 };
1641 
1642 	db_msg.type_xid =
1643 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1644 			    DBR_DBR_TYPE_CQ_ARMENA);
1645 	/* Flush memory writes before enabling the CQ */
1646 	wmb();
1647 	__iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1648 }
1649 
1650 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1651 {
1652 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1653 	struct dbr_dbr db_msg = { 0 };
1654 	u32 sw_cons;
1655 
1656 	/* Ring DB */
1657 	sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1658 	db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1659 				    DBR_DBR_INDEX_MASK);
1660 	db_msg.type_xid =
1661 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1662 			    arm_type);
1663 	/* flush memory writes before arming the CQ */
1664 	wmb();
1665 	__iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1666 }
1667 
1668 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1669 {
1670 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1671 	struct cmdq_create_cq req;
1672 	struct creq_create_cq_resp resp;
1673 	struct bnxt_qplib_pbl *pbl;
1674 	u16 cmd_flags = 0;
1675 	int rc;
1676 
1677 	cq->hwq.max_elements = cq->max_wqe;
1678 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1679 				       cq->nmap, &cq->hwq.max_elements,
1680 				       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1681 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
1682 	if (rc)
1683 		goto exit;
1684 
1685 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1686 
1687 	if (!cq->dpi) {
1688 		dev_err(&rcfw->pdev->dev,
1689 			"QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1690 		return -EINVAL;
1691 	}
1692 	req.dpi = cpu_to_le32(cq->dpi->dpi);
1693 	req.cq_handle = cpu_to_le64(cq->cq_handle);
1694 
1695 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1696 	pbl = &cq->hwq.pbl[PBL_LVL_0];
1697 	req.pg_size_lvl = cpu_to_le32(
1698 	    ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1699 						CMDQ_CREATE_CQ_LVL_SFT) |
1700 	    (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1701 	     pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1702 	     pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1703 	     pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1704 	     pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1705 	     pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1706 	     CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1707 
1708 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1709 
1710 	req.cq_fco_cnq_id = cpu_to_le32(
1711 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1712 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1713 
1714 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1715 					  (void *)&resp, NULL, 0);
1716 	if (rc)
1717 		goto fail;
1718 
1719 	cq->id = le32_to_cpu(resp.xid);
1720 	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1721 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1722 	init_waitqueue_head(&cq->waitq);
1723 	INIT_LIST_HEAD(&cq->sqf_head);
1724 	INIT_LIST_HEAD(&cq->rqf_head);
1725 	spin_lock_init(&cq->compl_lock);
1726 
1727 	bnxt_qplib_arm_cq_enable(cq);
1728 	return 0;
1729 
1730 fail:
1731 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1732 exit:
1733 	return rc;
1734 }
1735 
1736 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1737 {
1738 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1739 	struct cmdq_destroy_cq req;
1740 	struct creq_destroy_cq_resp resp;
1741 	u16 cmd_flags = 0;
1742 	int rc;
1743 
1744 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1745 
1746 	req.cq_cid = cpu_to_le32(cq->id);
1747 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1748 					  (void *)&resp, NULL, 0);
1749 	if (rc)
1750 		return rc;
1751 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1752 	return 0;
1753 }
1754 
1755 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
1756 		      struct bnxt_qplib_cqe **pcqe, int *budget)
1757 {
1758 	u32 sw_prod, sw_cons;
1759 	struct bnxt_qplib_cqe *cqe;
1760 	int rc = 0;
1761 
1762 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
1763 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1764 	cqe = *pcqe;
1765 	while (*budget) {
1766 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1767 		if (sw_cons == sw_prod) {
1768 			break;
1769 		}
1770 		/* Skip the FENCE WQE completions */
1771 		if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
1772 			bnxt_qplib_cancel_phantom_processing(qp);
1773 			goto skip_compl;
1774 		}
1775 		memset(cqe, 0, sizeof(*cqe));
1776 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
1777 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1778 		cqe->qp_handle = (u64)(unsigned long)qp;
1779 		cqe->wr_id = sq->swq[sw_cons].wr_id;
1780 		cqe->src_qp = qp->id;
1781 		cqe->type = sq->swq[sw_cons].type;
1782 		cqe++;
1783 		(*budget)--;
1784 skip_compl:
1785 		sq->hwq.cons++;
1786 	}
1787 	*pcqe = cqe;
1788 	if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
1789 		/* Out of budget */
1790 		rc = -EAGAIN;
1791 
1792 	return rc;
1793 }
1794 
1795 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
1796 		      struct bnxt_qplib_cqe **pcqe, int *budget)
1797 {
1798 	struct bnxt_qplib_cqe *cqe;
1799 	u32 sw_prod, sw_cons;
1800 	int rc = 0;
1801 	int opcode = 0;
1802 
1803 	switch (qp->type) {
1804 	case CMDQ_CREATE_QP1_TYPE_GSI:
1805 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
1806 		break;
1807 	case CMDQ_CREATE_QP_TYPE_RC:
1808 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
1809 		break;
1810 	case CMDQ_CREATE_QP_TYPE_UD:
1811 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
1812 		break;
1813 	}
1814 
1815 	/* Flush the rest of the RQ */
1816 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1817 	cqe = *pcqe;
1818 	while (*budget) {
1819 		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
1820 		if (sw_cons == sw_prod)
1821 			break;
1822 		memset(cqe, 0, sizeof(*cqe));
1823 		cqe->status =
1824 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
1825 		cqe->opcode = opcode;
1826 		cqe->qp_handle = (unsigned long)qp;
1827 		cqe->wr_id = rq->swq[sw_cons].wr_id;
1828 		cqe++;
1829 		(*budget)--;
1830 		rq->hwq.cons++;
1831 	}
1832 	*pcqe = cqe;
1833 	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
1834 		/* Out of budget */
1835 		rc = -EAGAIN;
1836 
1837 	return rc;
1838 }
1839 
1840 void bnxt_qplib_mark_qp_error(void *qp_handle)
1841 {
1842 	struct bnxt_qplib_qp *qp = qp_handle;
1843 
1844 	if (!qp)
1845 		return;
1846 
1847 	/* Must block new posting of SQ and RQ */
1848 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
1849 	bnxt_qplib_cancel_phantom_processing(qp);
1850 
1851 	/* Add qp to flush list of the CQ */
1852 	__bnxt_qplib_add_flush_qp(qp);
1853 }
1854 
1855 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1856  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1857  */
1858 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
1859 		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
1860 {
1861 	struct bnxt_qplib_q *sq = &qp->sq;
1862 	struct bnxt_qplib_swq *swq;
1863 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
1864 	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
1865 	struct cq_req *peek_req_hwcqe;
1866 	struct bnxt_qplib_qp *peek_qp;
1867 	struct bnxt_qplib_q *peek_sq;
1868 	int i, rc = 0;
1869 
1870 	/* Normal mode */
1871 	/* Check for the psn_search marking before completing */
1872 	swq = &sq->swq[sw_sq_cons];
1873 	if (swq->psn_search &&
1874 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
1875 		/* Unmark */
1876 		swq->psn_search->flags_next_psn = cpu_to_le32
1877 			(le32_to_cpu(swq->psn_search->flags_next_psn)
1878 				     & ~0x80000000);
1879 		dev_dbg(&cq->hwq.pdev->dev,
1880 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1881 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1882 		sq->condition = true;
1883 		sq->send_phantom = true;
1884 
1885 		/* TODO: Only ARM if the previous SQE is ARMALL */
1886 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
1887 
1888 		rc = -EAGAIN;
1889 		goto out;
1890 	}
1891 	if (sq->condition) {
1892 		/* Peek at the completions */
1893 		peek_raw_cq_cons = cq->hwq.cons;
1894 		peek_sw_cq_cons = cq_cons;
1895 		i = cq->hwq.max_elements;
1896 		while (i--) {
1897 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
1898 			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1899 			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
1900 						     [CQE_IDX(peek_sw_cq_cons)];
1901 			/* If the next hwcqe is VALID */
1902 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
1903 					  cq->hwq.max_elements)) {
1904 				/* If the next hwcqe is a REQ */
1905 				if ((peek_hwcqe->cqe_type_toggle &
1906 				    CQ_BASE_CQE_TYPE_MASK) ==
1907 				    CQ_BASE_CQE_TYPE_REQ) {
1908 					peek_req_hwcqe = (struct cq_req *)
1909 							 peek_hwcqe;
1910 					peek_qp = (struct bnxt_qplib_qp *)
1911 						((unsigned long)
1912 						 le64_to_cpu
1913 						 (peek_req_hwcqe->qp_handle));
1914 					peek_sq = &peek_qp->sq;
1915 					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
1916 						peek_req_hwcqe->sq_cons_idx) - 1
1917 						, &sq->hwq);
1918 					/* If the hwcqe's sq's wr_id matches */
1919 					if (peek_sq == sq &&
1920 					    sq->swq[peek_sq_cons_idx].wr_id ==
1921 					    BNXT_QPLIB_FENCE_WRID) {
1922 						/*
1923 						 *  Unbreak only if the phantom
1924 						 *  comes back
1925 						 */
1926 						dev_dbg(&cq->hwq.pdev->dev,
1927 							"FP:Got Phantom CQE");
1928 						sq->condition = false;
1929 						sq->single = true;
1930 						rc = 0;
1931 						goto out;
1932 					}
1933 				}
1934 				/* Valid but not the phantom, so keep looping */
1935 			} else {
1936 				/* Not valid yet, just exit and wait */
1937 				rc = -EINVAL;
1938 				goto out;
1939 			}
1940 			peek_sw_cq_cons++;
1941 			peek_raw_cq_cons++;
1942 		}
1943 		dev_err(&cq->hwq.pdev->dev,
1944 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1945 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1946 		rc = -EINVAL;
1947 	}
1948 out:
1949 	return rc;
1950 }
1951 
1952 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1953 				     struct cq_req *hwcqe,
1954 				     struct bnxt_qplib_cqe **pcqe, int *budget,
1955 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
1956 {
1957 	struct bnxt_qplib_qp *qp;
1958 	struct bnxt_qplib_q *sq;
1959 	struct bnxt_qplib_cqe *cqe;
1960 	u32 sw_sq_cons, cqe_sq_cons;
1961 	struct bnxt_qplib_swq *swq;
1962 	int rc = 0;
1963 
1964 	qp = (struct bnxt_qplib_qp *)((unsigned long)
1965 				      le64_to_cpu(hwcqe->qp_handle));
1966 	if (!qp) {
1967 		dev_err(&cq->hwq.pdev->dev,
1968 			"QPLIB: FP: Process Req qp is NULL");
1969 		return -EINVAL;
1970 	}
1971 	sq = &qp->sq;
1972 
1973 	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
1974 	if (cqe_sq_cons > sq->hwq.max_elements) {
1975 		dev_err(&cq->hwq.pdev->dev,
1976 			"QPLIB: FP: CQ Process req reported ");
1977 		dev_err(&cq->hwq.pdev->dev,
1978 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
1979 			cqe_sq_cons, sq->hwq.max_elements);
1980 		return -EINVAL;
1981 	}
1982 
1983 	if (qp->sq.flushed) {
1984 		dev_dbg(&cq->hwq.pdev->dev,
1985 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
1986 		goto done;
1987 	}
1988 	/* Require to walk the sq's swq to fabricate CQEs for all previously
1989 	 * signaled SWQEs due to CQE aggregation from the current sq cons
1990 	 * to the cqe_sq_cons
1991 	 */
1992 	cqe = *pcqe;
1993 	while (*budget) {
1994 		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1995 		if (sw_sq_cons == cqe_sq_cons)
1996 			/* Done */
1997 			break;
1998 
1999 		swq = &sq->swq[sw_sq_cons];
2000 		memset(cqe, 0, sizeof(*cqe));
2001 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2002 		cqe->qp_handle = (u64)(unsigned long)qp;
2003 		cqe->src_qp = qp->id;
2004 		cqe->wr_id = swq->wr_id;
2005 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2006 			goto skip;
2007 		cqe->type = swq->type;
2008 
2009 		/* For the last CQE, check for status.  For errors, regardless
2010 		 * of the request being signaled or not, it must complete with
2011 		 * the hwcqe error status
2012 		 */
2013 		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2014 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2015 			cqe->status = hwcqe->status;
2016 			dev_err(&cq->hwq.pdev->dev,
2017 				"QPLIB: FP: CQ Processed Req ");
2018 			dev_err(&cq->hwq.pdev->dev,
2019 				"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
2020 				sw_sq_cons, cqe->wr_id, cqe->status);
2021 			cqe++;
2022 			(*budget)--;
2023 			bnxt_qplib_lock_buddy_cq(qp, cq);
2024 			bnxt_qplib_mark_qp_error(qp);
2025 			bnxt_qplib_unlock_buddy_cq(qp, cq);
2026 		} else {
2027 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2028 				/* Before we complete, do WA 9060 */
2029 				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2030 					      cqe_sq_cons)) {
2031 					*lib_qp = qp;
2032 					goto out;
2033 				}
2034 				cqe->status = CQ_REQ_STATUS_OK;
2035 				cqe++;
2036 				(*budget)--;
2037 			}
2038 		}
2039 skip:
2040 		sq->hwq.cons++;
2041 		if (sq->single)
2042 			break;
2043 	}
2044 out:
2045 	*pcqe = cqe;
2046 	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2047 		/* Out of budget */
2048 		rc = -EAGAIN;
2049 		goto done;
2050 	}
2051 	/*
2052 	 * Back to normal completion mode only after it has completed all of
2053 	 * the WC for this CQE
2054 	 */
2055 	sq->single = false;
2056 done:
2057 	return rc;
2058 }
2059 
2060 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2061 					struct cq_res_rc *hwcqe,
2062 					struct bnxt_qplib_cqe **pcqe,
2063 					int *budget)
2064 {
2065 	struct bnxt_qplib_qp *qp;
2066 	struct bnxt_qplib_q *rq;
2067 	struct bnxt_qplib_cqe *cqe;
2068 	u32 wr_id_idx;
2069 	int rc = 0;
2070 
2071 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2072 				      le64_to_cpu(hwcqe->qp_handle));
2073 	if (!qp) {
2074 		dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2075 		return -EINVAL;
2076 	}
2077 	if (qp->rq.flushed) {
2078 		dev_dbg(&cq->hwq.pdev->dev,
2079 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2080 		goto done;
2081 	}
2082 
2083 	cqe = *pcqe;
2084 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2085 	cqe->length = le32_to_cpu(hwcqe->length);
2086 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2087 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2088 	cqe->flags = le16_to_cpu(hwcqe->flags);
2089 	cqe->status = hwcqe->status;
2090 	cqe->qp_handle = (u64)(unsigned long)qp;
2091 
2092 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2093 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2094 	rq = &qp->rq;
2095 	if (wr_id_idx > rq->hwq.max_elements) {
2096 		dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
2097 		dev_err(&cq->hwq.pdev->dev,
2098 			"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2099 			wr_id_idx, rq->hwq.max_elements);
2100 		return -EINVAL;
2101 	}
2102 
2103 	cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2104 	cqe++;
2105 	(*budget)--;
2106 	rq->hwq.cons++;
2107 	*pcqe = cqe;
2108 
2109 	if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2110 		 /* Add qp to flush list of the CQ */
2111 		bnxt_qplib_lock_buddy_cq(qp, cq);
2112 		__bnxt_qplib_add_flush_qp(qp);
2113 		bnxt_qplib_unlock_buddy_cq(qp, cq);
2114 	}
2115 
2116 done:
2117 	return rc;
2118 }
2119 
2120 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2121 					struct cq_res_ud *hwcqe,
2122 					struct bnxt_qplib_cqe **pcqe,
2123 					int *budget)
2124 {
2125 	struct bnxt_qplib_qp *qp;
2126 	struct bnxt_qplib_q *rq;
2127 	struct bnxt_qplib_cqe *cqe;
2128 	u32 wr_id_idx;
2129 	int rc = 0;
2130 
2131 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2132 				      le64_to_cpu(hwcqe->qp_handle));
2133 	if (!qp) {
2134 		dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2135 		return -EINVAL;
2136 	}
2137 	if (qp->rq.flushed) {
2138 		dev_dbg(&cq->hwq.pdev->dev,
2139 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2140 		goto done;
2141 	}
2142 	cqe = *pcqe;
2143 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2144 	cqe->length = le32_to_cpu(hwcqe->length);
2145 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2146 	cqe->flags = le16_to_cpu(hwcqe->flags);
2147 	cqe->status = hwcqe->status;
2148 	cqe->qp_handle = (u64)(unsigned long)qp;
2149 	memcpy(cqe->smac, hwcqe->src_mac, 6);
2150 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2151 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2152 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2153 				  ((le32_to_cpu(
2154 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2155 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2156 
2157 	rq = &qp->rq;
2158 	if (wr_id_idx > rq->hwq.max_elements) {
2159 		dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
2160 		dev_err(&cq->hwq.pdev->dev,
2161 			"QPLIB: wr_id idx %#x exceeded RQ max %#x",
2162 			wr_id_idx, rq->hwq.max_elements);
2163 		return -EINVAL;
2164 	}
2165 
2166 	cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2167 	cqe++;
2168 	(*budget)--;
2169 	rq->hwq.cons++;
2170 	*pcqe = cqe;
2171 
2172 	if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2173 		/* Add qp to flush list of the CQ */
2174 		bnxt_qplib_lock_buddy_cq(qp, cq);
2175 		__bnxt_qplib_add_flush_qp(qp);
2176 		bnxt_qplib_unlock_buddy_cq(qp, cq);
2177 	}
2178 done:
2179 	return rc;
2180 }
2181 
2182 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2183 {
2184 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2185 	unsigned long flags;
2186 	u32 sw_cons, raw_cons;
2187 	bool rc = true;
2188 
2189 	spin_lock_irqsave(&cq->hwq.lock, flags);
2190 	raw_cons = cq->hwq.cons;
2191 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2192 	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2193 	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2194 
2195 	 /* Check for Valid bit. If the CQE is valid, return false */
2196 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2197 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2198 	return rc;
2199 }
2200 
2201 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2202 						struct cq_res_raweth_qp1 *hwcqe,
2203 						struct bnxt_qplib_cqe **pcqe,
2204 						int *budget)
2205 {
2206 	struct bnxt_qplib_qp *qp;
2207 	struct bnxt_qplib_q *rq;
2208 	struct bnxt_qplib_cqe *cqe;
2209 	u32 wr_id_idx;
2210 	int rc = 0;
2211 
2212 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2213 				      le64_to_cpu(hwcqe->qp_handle));
2214 	if (!qp) {
2215 		dev_err(&cq->hwq.pdev->dev,
2216 			"QPLIB: process_cq Raw/QP1 qp is NULL");
2217 		return -EINVAL;
2218 	}
2219 	if (qp->rq.flushed) {
2220 		dev_dbg(&cq->hwq.pdev->dev,
2221 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2222 		goto done;
2223 	}
2224 	cqe = *pcqe;
2225 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2226 	cqe->flags = le16_to_cpu(hwcqe->flags);
2227 	cqe->qp_handle = (u64)(unsigned long)qp;
2228 
2229 	wr_id_idx =
2230 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2231 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2232 	cqe->src_qp = qp->id;
2233 	if (qp->id == 1 && !cqe->length) {
2234 		/* Add workaround for the length misdetection */
2235 		cqe->length = 296;
2236 	} else {
2237 		cqe->length = le16_to_cpu(hwcqe->length);
2238 	}
2239 	cqe->pkey_index = qp->pkey_index;
2240 	memcpy(cqe->smac, qp->smac, 6);
2241 
2242 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2243 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2244 
2245 	rq = &qp->rq;
2246 	if (wr_id_idx > rq->hwq.max_elements) {
2247 		dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2248 		dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
2249 			wr_id_idx, rq->hwq.max_elements);
2250 		return -EINVAL;
2251 	}
2252 
2253 	cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2254 	cqe++;
2255 	(*budget)--;
2256 	rq->hwq.cons++;
2257 	*pcqe = cqe;
2258 
2259 	if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2260 		/* Add qp to flush list of the CQ */
2261 		bnxt_qplib_lock_buddy_cq(qp, cq);
2262 		__bnxt_qplib_add_flush_qp(qp);
2263 		bnxt_qplib_unlock_buddy_cq(qp, cq);
2264 	}
2265 
2266 done:
2267 	return rc;
2268 }
2269 
2270 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2271 					  struct cq_terminal *hwcqe,
2272 					  struct bnxt_qplib_cqe **pcqe,
2273 					  int *budget)
2274 {
2275 	struct bnxt_qplib_qp *qp;
2276 	struct bnxt_qplib_q *sq, *rq;
2277 	struct bnxt_qplib_cqe *cqe;
2278 	u32 sw_cons = 0, cqe_cons;
2279 	int rc = 0;
2280 
2281 	/* Check the Status */
2282 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2283 		dev_warn(&cq->hwq.pdev->dev,
2284 			 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2285 			 hwcqe->status);
2286 
2287 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2288 				      le64_to_cpu(hwcqe->qp_handle));
2289 	if (!qp) {
2290 		dev_err(&cq->hwq.pdev->dev,
2291 			"QPLIB: FP: CQ Process terminal qp is NULL");
2292 		return -EINVAL;
2293 	}
2294 
2295 	/* Must block new posting of SQ and RQ */
2296 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2297 
2298 	sq = &qp->sq;
2299 	rq = &qp->rq;
2300 
2301 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2302 	if (cqe_cons == 0xFFFF)
2303 		goto do_rq;
2304 
2305 	if (cqe_cons > sq->hwq.max_elements) {
2306 		dev_err(&cq->hwq.pdev->dev,
2307 			"QPLIB: FP: CQ Process terminal reported ");
2308 		dev_err(&cq->hwq.pdev->dev,
2309 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2310 			cqe_cons, sq->hwq.max_elements);
2311 		goto do_rq;
2312 	}
2313 
2314 	if (qp->sq.flushed) {
2315 		dev_dbg(&cq->hwq.pdev->dev,
2316 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2317 		goto sq_done;
2318 	}
2319 
2320 	/* Terminal CQE can also include aggregated successful CQEs prior.
2321 	 * So we must complete all CQEs from the current sq's cons to the
2322 	 * cq_cons with status OK
2323 	 */
2324 	cqe = *pcqe;
2325 	while (*budget) {
2326 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2327 		if (sw_cons == cqe_cons)
2328 			break;
2329 		if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2330 			memset(cqe, 0, sizeof(*cqe));
2331 			cqe->status = CQ_REQ_STATUS_OK;
2332 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2333 			cqe->qp_handle = (u64)(unsigned long)qp;
2334 			cqe->src_qp = qp->id;
2335 			cqe->wr_id = sq->swq[sw_cons].wr_id;
2336 			cqe->type = sq->swq[sw_cons].type;
2337 			cqe++;
2338 			(*budget)--;
2339 		}
2340 		sq->hwq.cons++;
2341 	}
2342 	*pcqe = cqe;
2343 	if (!(*budget) && sw_cons != cqe_cons) {
2344 		/* Out of budget */
2345 		rc = -EAGAIN;
2346 		goto sq_done;
2347 	}
2348 sq_done:
2349 	if (rc)
2350 		return rc;
2351 do_rq:
2352 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2353 	if (cqe_cons == 0xFFFF) {
2354 		goto done;
2355 	} else if (cqe_cons > rq->hwq.max_elements) {
2356 		dev_err(&cq->hwq.pdev->dev,
2357 			"QPLIB: FP: CQ Processed terminal ");
2358 		dev_err(&cq->hwq.pdev->dev,
2359 			"QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2360 			cqe_cons, rq->hwq.max_elements);
2361 		goto done;
2362 	}
2363 
2364 	if (qp->rq.flushed) {
2365 		dev_dbg(&cq->hwq.pdev->dev,
2366 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2367 		rc = 0;
2368 		goto done;
2369 	}
2370 
2371 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2372 	 * from the current rq->cons to the rq->prod regardless what the
2373 	 * rq->cons the terminal CQE indicates
2374 	 */
2375 
2376 	/* Add qp to flush list of the CQ */
2377 	bnxt_qplib_lock_buddy_cq(qp, cq);
2378 	__bnxt_qplib_add_flush_qp(qp);
2379 	bnxt_qplib_unlock_buddy_cq(qp, cq);
2380 done:
2381 	return rc;
2382 }
2383 
2384 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2385 					struct cq_cutoff *hwcqe)
2386 {
2387 	/* Check the Status */
2388 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2389 		dev_err(&cq->hwq.pdev->dev,
2390 			"QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2391 			hwcqe->status);
2392 		return -EINVAL;
2393 	}
2394 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2395 	wake_up_interruptible(&cq->waitq);
2396 
2397 	return 0;
2398 }
2399 
2400 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2401 				  struct bnxt_qplib_cqe *cqe,
2402 				  int num_cqes)
2403 {
2404 	struct bnxt_qplib_qp *qp = NULL;
2405 	u32 budget = num_cqes;
2406 	unsigned long flags;
2407 
2408 	spin_lock_irqsave(&cq->hwq.lock, flags);
2409 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2410 		dev_dbg(&cq->hwq.pdev->dev,
2411 			"QPLIB: FP: Flushing SQ QP= %p",
2412 			qp);
2413 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2414 	}
2415 
2416 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2417 		dev_dbg(&cq->hwq.pdev->dev,
2418 			"QPLIB: FP: Flushing RQ QP= %p",
2419 			qp);
2420 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2421 	}
2422 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2423 
2424 	return num_cqes - budget;
2425 }
2426 
2427 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2428 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2429 {
2430 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2431 	unsigned long flags;
2432 	u32 sw_cons, raw_cons;
2433 	int budget, rc = 0;
2434 
2435 	spin_lock_irqsave(&cq->hwq.lock, flags);
2436 	raw_cons = cq->hwq.cons;
2437 	budget = num_cqes;
2438 
2439 	while (budget) {
2440 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2441 		hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2442 		hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2443 
2444 		/* Check for Valid bit */
2445 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2446 			break;
2447 
2448 		/* From the device's respective CQE format to qplib_wc*/
2449 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2450 		case CQ_BASE_CQE_TYPE_REQ:
2451 			rc = bnxt_qplib_cq_process_req(cq,
2452 						       (struct cq_req *)hw_cqe,
2453 						       &cqe, &budget,
2454 						       sw_cons, lib_qp);
2455 			break;
2456 		case CQ_BASE_CQE_TYPE_RES_RC:
2457 			rc = bnxt_qplib_cq_process_res_rc(cq,
2458 							  (struct cq_res_rc *)
2459 							  hw_cqe, &cqe,
2460 							  &budget);
2461 			break;
2462 		case CQ_BASE_CQE_TYPE_RES_UD:
2463 			rc = bnxt_qplib_cq_process_res_ud
2464 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2465 					 &budget);
2466 			break;
2467 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2468 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2469 					(cq, (struct cq_res_raweth_qp1 *)
2470 					 hw_cqe, &cqe, &budget);
2471 			break;
2472 		case CQ_BASE_CQE_TYPE_TERMINAL:
2473 			rc = bnxt_qplib_cq_process_terminal
2474 					(cq, (struct cq_terminal *)hw_cqe,
2475 					 &cqe, &budget);
2476 			break;
2477 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2478 			bnxt_qplib_cq_process_cutoff
2479 					(cq, (struct cq_cutoff *)hw_cqe);
2480 			/* Done processing this CQ */
2481 			goto exit;
2482 		default:
2483 			dev_err(&cq->hwq.pdev->dev,
2484 				"QPLIB: process_cq unknown type 0x%lx",
2485 				hw_cqe->cqe_type_toggle &
2486 				CQ_BASE_CQE_TYPE_MASK);
2487 			rc = -EINVAL;
2488 			break;
2489 		}
2490 		if (rc < 0) {
2491 			if (rc == -EAGAIN)
2492 				break;
2493 			/* Error while processing the CQE, just skip to the
2494 			 * next one
2495 			 */
2496 			dev_err(&cq->hwq.pdev->dev,
2497 				"QPLIB: process_cqe error rc = 0x%x", rc);
2498 		}
2499 		raw_cons++;
2500 	}
2501 	if (cq->hwq.cons != raw_cons) {
2502 		cq->hwq.cons = raw_cons;
2503 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2504 	}
2505 exit:
2506 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2507 	return num_cqes - budget;
2508 }
2509 
2510 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2511 {
2512 	unsigned long flags;
2513 
2514 	spin_lock_irqsave(&cq->hwq.lock, flags);
2515 	if (arm_type)
2516 		bnxt_qplib_arm_cq(cq, arm_type);
2517 	/* Using cq->arm_state variable to track whether to issue cq handler */
2518 	atomic_set(&cq->arm_state, 1);
2519 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2520 }
2521