1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/prefetch.h>
47 #include <linux/if_ether.h>
48 
49 #include "roce_hsi.h"
50 
51 #include "qplib_res.h"
52 #include "qplib_rcfw.h"
53 #include "qplib_sp.h"
54 #include "qplib_fp.h"
55 
56 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
58 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
59 
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 	qp->sq.condition = false;
63 	qp->sq.send_phantom = false;
64 	qp->sq.single = false;
65 }
66 
67 /* Flush list */
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 	struct bnxt_qplib_cq *scq, *rcq;
71 
72 	scq = qp->scq;
73 	rcq = qp->rcq;
74 
75 	if (!qp->sq.flushed) {
76 		dev_dbg(&scq->hwq.pdev->dev,
77 			"FP: Adding to SQ Flush list = %p\n", qp);
78 		bnxt_qplib_cancel_phantom_processing(qp);
79 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 		qp->sq.flushed = true;
81 	}
82 	if (!qp->srq) {
83 		if (!qp->rq.flushed) {
84 			dev_dbg(&rcq->hwq.pdev->dev,
85 				"FP: Adding to RQ Flush list = %p\n", qp);
86 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 			qp->rq.flushed = true;
88 		}
89 	}
90 }
91 
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 				       unsigned long *flags)
94 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 	if (qp->scq == qp->rcq)
98 		__acquire(&qp->rcq->flush_lock);
99 	else
100 		spin_lock(&qp->rcq->flush_lock);
101 }
102 
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 				       unsigned long *flags)
105 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 	if (qp->scq == qp->rcq)
108 		__release(&qp->rcq->flush_lock);
109 	else
110 		spin_unlock(&qp->rcq->flush_lock);
111 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113 
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 	unsigned long flags;
117 
118 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 	__bnxt_qplib_add_flush_qp(qp);
120 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122 
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 	if (qp->sq.flushed) {
126 		qp->sq.flushed = false;
127 		list_del(&qp->sq_flush);
128 	}
129 	if (!qp->srq) {
130 		if (qp->rq.flushed) {
131 			qp->rq.flushed = false;
132 			list_del(&qp->rq_flush);
133 		}
134 	}
135 }
136 
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 	unsigned long flags;
140 
141 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143 	qp->sq.hwq.prod = 0;
144 	qp->sq.hwq.cons = 0;
145 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 	qp->rq.hwq.prod = 0;
147 	qp->rq.hwq.cons = 0;
148 
149 	__bnxt_qplib_del_flush_qp(qp);
150 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152 
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 	struct bnxt_qplib_nq_work *nq_work =
156 			container_of(work, struct bnxt_qplib_nq_work, work);
157 
158 	struct bnxt_qplib_cq *cq = nq_work->cq;
159 	struct bnxt_qplib_nq *nq = nq_work->nq;
160 
161 	if (cq && nq) {
162 		spin_lock_bh(&cq->compl_lock);
163 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 			dev_dbg(&nq->pdev->dev,
165 				"%s:Trigger cq  = %p event nq = %p\n",
166 				__func__, cq, nq);
167 			nq->cqn_handler(nq, cq);
168 		}
169 		spin_unlock_bh(&cq->compl_lock);
170 	}
171 	kfree(nq_work);
172 }
173 
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 				       struct bnxt_qplib_qp *qp)
176 {
177 	struct bnxt_qplib_q *rq = &qp->rq;
178 	struct bnxt_qplib_q *sq = &qp->sq;
179 
180 	if (qp->rq_hdr_buf)
181 		dma_free_coherent(&res->pdev->dev,
182 				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
183 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 	if (qp->sq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  sq->hwq.max_elements * qp->sq_hdr_buf_size,
187 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 	qp->rq_hdr_buf = NULL;
189 	qp->sq_hdr_buf = NULL;
190 	qp->rq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_map = 0;
192 	qp->sq_hdr_buf_size = 0;
193 	qp->rq_hdr_buf_size = 0;
194 }
195 
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 				       struct bnxt_qplib_qp *qp)
198 {
199 	struct bnxt_qplib_q *rq = &qp->rq;
200 	struct bnxt_qplib_q *sq = &qp->sq;
201 	int rc = 0;
202 
203 	if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
204 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 					sq->hwq.max_elements *
206 					qp->sq_hdr_buf_size,
207 					&qp->sq_hdr_buf_map, GFP_KERNEL);
208 		if (!qp->sq_hdr_buf) {
209 			rc = -ENOMEM;
210 			dev_err(&res->pdev->dev,
211 				"Failed to create sq_hdr_buf\n");
212 			goto fail;
213 		}
214 	}
215 
216 	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
217 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
218 						    rq->hwq.max_elements *
219 						    qp->rq_hdr_buf_size,
220 						    &qp->rq_hdr_buf_map,
221 						    GFP_KERNEL);
222 		if (!qp->rq_hdr_buf) {
223 			rc = -ENOMEM;
224 			dev_err(&res->pdev->dev,
225 				"Failed to create rq_hdr_buf\n");
226 			goto fail;
227 		}
228 	}
229 	return 0;
230 
231 fail:
232 	bnxt_qplib_free_qp_hdr_buf(res, qp);
233 	return rc;
234 }
235 
236 static void bnxt_qplib_service_nq(unsigned long data)
237 {
238 	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
239 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
240 	struct nq_base *nqe, **nq_ptr;
241 	struct bnxt_qplib_cq *cq;
242 	int num_cqne_processed = 0;
243 	int num_srqne_processed = 0;
244 	u32 sw_cons, raw_cons;
245 	u16 type;
246 	int budget = nq->budget;
247 	uintptr_t q_handle;
248 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
249 
250 	/* Service the NQ until empty */
251 	raw_cons = hwq->cons;
252 	while (budget--) {
253 		sw_cons = HWQ_CMP(raw_cons, hwq);
254 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
255 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
256 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
257 			break;
258 
259 		/*
260 		 * The valid test of the entry must be done first before
261 		 * reading any further.
262 		 */
263 		dma_rmb();
264 
265 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
266 		switch (type) {
267 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
268 		{
269 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
270 
271 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
272 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
273 						     << 32;
274 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
275 			bnxt_qplib_arm_cq_enable(cq);
276 			spin_lock_bh(&cq->compl_lock);
277 			atomic_set(&cq->arm_state, 0);
278 			if (!nq->cqn_handler(nq, (cq)))
279 				num_cqne_processed++;
280 			else
281 				dev_warn(&nq->pdev->dev,
282 					 "cqn - type 0x%x not handled\n", type);
283 			spin_unlock_bh(&cq->compl_lock);
284 			break;
285 		}
286 		case NQ_BASE_TYPE_SRQ_EVENT:
287 		{
288 			struct nq_srq_event *nqsrqe =
289 						(struct nq_srq_event *)nqe;
290 
291 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
292 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
293 				     << 32;
294 			bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
295 					   DBC_DBC_TYPE_SRQ_ARMENA);
296 			if (!nq->srqn_handler(nq,
297 					      (struct bnxt_qplib_srq *)q_handle,
298 					      nqsrqe->event))
299 				num_srqne_processed++;
300 			else
301 				dev_warn(&nq->pdev->dev,
302 					 "SRQ event 0x%x not handled\n",
303 					 nqsrqe->event);
304 			break;
305 		}
306 		case NQ_BASE_TYPE_DBQ_EVENT:
307 			break;
308 		default:
309 			dev_warn(&nq->pdev->dev,
310 				 "nqe with type = 0x%x not handled\n", type);
311 			break;
312 		}
313 		raw_cons++;
314 	}
315 	if (hwq->cons != raw_cons) {
316 		hwq->cons = raw_cons;
317 		bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
318 					    hwq->max_elements, nq->ring_id,
319 					    gen_p5);
320 	}
321 }
322 
323 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
324 {
325 	struct bnxt_qplib_nq *nq = dev_instance;
326 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
327 	struct nq_base **nq_ptr;
328 	u32 sw_cons;
329 
330 	/* Prefetch the NQ element */
331 	sw_cons = HWQ_CMP(hwq->cons, hwq);
332 	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
333 	prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
334 
335 	/* Fan out to CPU affinitized kthreads? */
336 	tasklet_schedule(&nq->worker);
337 
338 	return IRQ_HANDLED;
339 }
340 
341 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
342 {
343 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
344 	tasklet_disable(&nq->worker);
345 	/* Mask h/w interrupt */
346 	bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
347 			      nq->hwq.max_elements, nq->ring_id, gen_p5);
348 	/* Sync with last running IRQ handler */
349 	synchronize_irq(nq->vector);
350 	if (kill)
351 		tasklet_kill(&nq->worker);
352 	if (nq->requested) {
353 		irq_set_affinity_hint(nq->vector, NULL);
354 		free_irq(nq->vector, nq);
355 		nq->requested = false;
356 	}
357 }
358 
359 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
360 {
361 	if (nq->cqn_wq) {
362 		destroy_workqueue(nq->cqn_wq);
363 		nq->cqn_wq = NULL;
364 	}
365 
366 	/* Make sure the HW is stopped! */
367 	if (nq->requested)
368 		bnxt_qplib_nq_stop_irq(nq, true);
369 
370 	if (nq->bar_reg_iomem)
371 		iounmap(nq->bar_reg_iomem);
372 	nq->bar_reg_iomem = NULL;
373 
374 	nq->cqn_handler = NULL;
375 	nq->srqn_handler = NULL;
376 	nq->vector = 0;
377 }
378 
379 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
380 			    int msix_vector, bool need_init)
381 {
382 	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
383 	int rc;
384 
385 	if (nq->requested)
386 		return -EFAULT;
387 
388 	nq->vector = msix_vector;
389 	if (need_init)
390 		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
391 			     (unsigned long)nq);
392 	else
393 		tasklet_enable(&nq->worker);
394 
395 	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
396 	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
397 	if (rc)
398 		return rc;
399 
400 	cpumask_clear(&nq->mask);
401 	cpumask_set_cpu(nq_indx, &nq->mask);
402 	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
403 	if (rc) {
404 		dev_warn(&nq->pdev->dev,
405 			 "set affinity failed; vector: %d nq_idx: %d\n",
406 			 nq->vector, nq_indx);
407 	}
408 	nq->requested = true;
409 	bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
410 				    nq->hwq.max_elements, nq->ring_id, gen_p5);
411 
412 	return rc;
413 }
414 
415 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
416 			 int nq_idx, int msix_vector, int bar_reg_offset,
417 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
418 					    struct bnxt_qplib_cq *),
419 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
420 					     struct bnxt_qplib_srq *,
421 					     u8 event))
422 {
423 	resource_size_t nq_base;
424 	int rc = -1;
425 
426 	if (cqn_handler)
427 		nq->cqn_handler = cqn_handler;
428 
429 	if (srqn_handler)
430 		nq->srqn_handler = srqn_handler;
431 
432 	/* Have a task to schedule CQ notifiers in post send case */
433 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
434 	if (!nq->cqn_wq)
435 		return -ENOMEM;
436 
437 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
438 	nq->bar_reg_off = bar_reg_offset;
439 	nq_base = pci_resource_start(pdev, nq->bar_reg);
440 	if (!nq_base) {
441 		rc = -ENOMEM;
442 		goto fail;
443 	}
444 	/* Unconditionally map 8 bytes to support 57500 series */
445 	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
446 	if (!nq->bar_reg_iomem) {
447 		rc = -ENOMEM;
448 		goto fail;
449 	}
450 
451 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
452 	if (rc) {
453 		dev_err(&nq->pdev->dev,
454 			"Failed to request irq for nq-idx %d\n", nq_idx);
455 		goto fail;
456 	}
457 
458 	return 0;
459 fail:
460 	bnxt_qplib_disable_nq(nq);
461 	return rc;
462 }
463 
464 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
465 {
466 	if (nq->hwq.max_elements) {
467 		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
468 		nq->hwq.max_elements = 0;
469 	}
470 }
471 
472 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
473 {
474 	u8 hwq_type;
475 
476 	nq->pdev = pdev;
477 	if (!nq->hwq.max_elements ||
478 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
479 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
480 	hwq_type = bnxt_qplib_get_hwq_type(nq->res);
481 	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
482 				      &nq->hwq.max_elements,
483 				      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
484 				      PAGE_SIZE, hwq_type))
485 		return -ENOMEM;
486 
487 	nq->budget = 8;
488 	return 0;
489 }
490 
491 /* SRQ */
492 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
493 {
494 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
495 	void __iomem *db;
496 	u32 sw_prod;
497 	u64 val = 0;
498 
499 	/* Ring DB */
500 	sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
501 		   srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
502 	db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
503 						     srq->dpi->dbr;
504 	val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
505 	val <<= 32;
506 	val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
507 	writeq(val, db);
508 }
509 
510 int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
511 			   struct bnxt_qplib_srq *srq)
512 {
513 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
514 	struct cmdq_destroy_srq req;
515 	struct creq_destroy_srq_resp resp;
516 	u16 cmd_flags = 0;
517 	int rc;
518 
519 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
520 
521 	/* Configure the request */
522 	req.srq_cid = cpu_to_le32(srq->id);
523 
524 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
525 					  (void *)&resp, NULL, 0);
526 	if (rc)
527 		return rc;
528 
529 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
530 	kfree(srq->swq);
531 	return 0;
532 }
533 
534 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
535 			  struct bnxt_qplib_srq *srq)
536 {
537 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
538 	struct cmdq_create_srq req;
539 	struct creq_create_srq_resp resp;
540 	struct bnxt_qplib_pbl *pbl;
541 	u16 cmd_flags = 0;
542 	int rc, idx;
543 
544 	srq->hwq.max_elements = srq->max_wqe;
545 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
546 				       srq->nmap, &srq->hwq.max_elements,
547 				       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
548 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
549 	if (rc)
550 		goto exit;
551 
552 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
553 			   GFP_KERNEL);
554 	if (!srq->swq) {
555 		rc = -ENOMEM;
556 		goto fail;
557 	}
558 
559 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
560 
561 	/* Configure the request */
562 	req.dpi = cpu_to_le32(srq->dpi->dpi);
563 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
564 
565 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
566 	pbl = &srq->hwq.pbl[PBL_LVL_0];
567 	req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
568 				      CMDQ_CREATE_SRQ_LVL_MASK) <<
569 				      CMDQ_CREATE_SRQ_LVL_SFT) |
570 				      (pbl->pg_size == ROCE_PG_SIZE_4K ?
571 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
572 				       pbl->pg_size == ROCE_PG_SIZE_8K ?
573 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
574 				       pbl->pg_size == ROCE_PG_SIZE_64K ?
575 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
576 				       pbl->pg_size == ROCE_PG_SIZE_2M ?
577 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
578 				       pbl->pg_size == ROCE_PG_SIZE_8M ?
579 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
580 				       pbl->pg_size == ROCE_PG_SIZE_1G ?
581 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
582 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
583 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
584 	req.pd_id = cpu_to_le32(srq->pd->id);
585 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
586 
587 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
588 					  (void *)&resp, NULL, 0);
589 	if (rc)
590 		goto fail;
591 
592 	spin_lock_init(&srq->lock);
593 	srq->start_idx = 0;
594 	srq->last_idx = srq->hwq.max_elements - 1;
595 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
596 		srq->swq[idx].next_idx = idx + 1;
597 	srq->swq[srq->last_idx].next_idx = -1;
598 
599 	srq->id = le32_to_cpu(resp.xid);
600 	srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
601 	if (srq->threshold)
602 		bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
603 	srq->arm_req = false;
604 
605 	return 0;
606 fail:
607 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
608 	kfree(srq->swq);
609 exit:
610 	return rc;
611 }
612 
613 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
614 			  struct bnxt_qplib_srq *srq)
615 {
616 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
617 	u32 sw_prod, sw_cons, count = 0;
618 
619 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
620 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
621 
622 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
623 				    srq_hwq->max_elements - sw_cons + sw_prod;
624 	if (count > srq->threshold) {
625 		srq->arm_req = false;
626 		bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
627 	} else {
628 		/* Deferred arming */
629 		srq->arm_req = true;
630 	}
631 
632 	return 0;
633 }
634 
635 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
636 			 struct bnxt_qplib_srq *srq)
637 {
638 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
639 	struct cmdq_query_srq req;
640 	struct creq_query_srq_resp resp;
641 	struct bnxt_qplib_rcfw_sbuf *sbuf;
642 	struct creq_query_srq_resp_sb *sb;
643 	u16 cmd_flags = 0;
644 	int rc = 0;
645 
646 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
647 	req.srq_cid = cpu_to_le32(srq->id);
648 
649 	/* Configure the request */
650 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
651 	if (!sbuf)
652 		return -ENOMEM;
653 	sb = sbuf->sb;
654 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
655 					  (void *)sbuf, 0);
656 	srq->threshold = le16_to_cpu(sb->srq_limit);
657 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
658 
659 	return rc;
660 }
661 
662 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
663 			     struct bnxt_qplib_swqe *wqe)
664 {
665 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
666 	struct rq_wqe *srqe, **srqe_ptr;
667 	struct sq_sge *hw_sge;
668 	u32 sw_prod, sw_cons, count = 0;
669 	int i, rc = 0, next;
670 
671 	spin_lock(&srq_hwq->lock);
672 	if (srq->start_idx == srq->last_idx) {
673 		dev_err(&srq_hwq->pdev->dev,
674 			"FP: SRQ (0x%x) is full!\n", srq->id);
675 		rc = -EINVAL;
676 		spin_unlock(&srq_hwq->lock);
677 		goto done;
678 	}
679 	next = srq->start_idx;
680 	srq->start_idx = srq->swq[next].next_idx;
681 	spin_unlock(&srq_hwq->lock);
682 
683 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
684 	srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
685 	srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
686 	memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
687 	/* Calculate wqe_size16 and data_len */
688 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
689 	     i < wqe->num_sge; i++, hw_sge++) {
690 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
691 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
692 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
693 	}
694 	srqe->wqe_type = wqe->type;
695 	srqe->flags = wqe->flags;
696 	srqe->wqe_size = wqe->num_sge +
697 			((offsetof(typeof(*srqe), data) + 15) >> 4);
698 	srqe->wr_id[0] = cpu_to_le32((u32)next);
699 	srq->swq[next].wr_id = wqe->wr_id;
700 
701 	srq_hwq->prod++;
702 
703 	spin_lock(&srq_hwq->lock);
704 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
705 	/* retaining srq_hwq->cons for this logic
706 	 * actually the lock is only required to
707 	 * read srq_hwq->cons.
708 	 */
709 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
710 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
711 				    srq_hwq->max_elements - sw_cons + sw_prod;
712 	spin_unlock(&srq_hwq->lock);
713 	/* Ring DB */
714 	bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
715 	if (srq->arm_req == true && count > srq->threshold) {
716 		srq->arm_req = false;
717 		bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
718 	}
719 done:
720 	return rc;
721 }
722 
723 /* QP */
724 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
725 {
726 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
727 	struct cmdq_create_qp1 req;
728 	struct creq_create_qp1_resp resp;
729 	struct bnxt_qplib_pbl *pbl;
730 	struct bnxt_qplib_q *sq = &qp->sq;
731 	struct bnxt_qplib_q *rq = &qp->rq;
732 	int rc;
733 	u16 cmd_flags = 0;
734 	u32 qp_flags = 0;
735 
736 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
737 
738 	/* General */
739 	req.type = qp->type;
740 	req.dpi = cpu_to_le32(qp->dpi->dpi);
741 	req.qp_handle = cpu_to_le64(qp->qp_handle);
742 
743 	/* SQ */
744 	sq->hwq.max_elements = sq->max_wqe;
745 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
746 				       &sq->hwq.max_elements,
747 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
748 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
749 	if (rc)
750 		goto exit;
751 
752 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
753 	if (!sq->swq) {
754 		rc = -ENOMEM;
755 		goto fail_sq;
756 	}
757 	pbl = &sq->hwq.pbl[PBL_LVL_0];
758 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
759 	req.sq_pg_size_sq_lvl =
760 		((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
761 				<<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
762 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
763 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
764 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
765 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
766 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
767 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
768 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
769 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
770 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
771 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
772 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
773 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
774 		 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
775 
776 	if (qp->scq)
777 		req.scq_cid = cpu_to_le32(qp->scq->id);
778 
779 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
780 
781 	/* RQ */
782 	if (rq->max_wqe) {
783 		rq->hwq.max_elements = qp->rq.max_wqe;
784 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
785 					       &rq->hwq.max_elements,
786 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
787 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
788 		if (rc)
789 			goto fail_sq;
790 
791 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
792 				  GFP_KERNEL);
793 		if (!rq->swq) {
794 			rc = -ENOMEM;
795 			goto fail_rq;
796 		}
797 		pbl = &rq->hwq.pbl[PBL_LVL_0];
798 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
799 		req.rq_pg_size_rq_lvl =
800 			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
801 			 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
802 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
803 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
804 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
805 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
806 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
807 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
808 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
809 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
810 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
811 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
812 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
813 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
814 				 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
815 		if (qp->rcq)
816 			req.rcq_cid = cpu_to_le32(qp->rcq->id);
817 	}
818 
819 	/* Header buffer - allow hdr_buf pass in */
820 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
821 	if (rc) {
822 		rc = -ENOMEM;
823 		goto fail;
824 	}
825 	req.qp_flags = cpu_to_le32(qp_flags);
826 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
827 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
828 
829 	req.sq_fwo_sq_sge =
830 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
831 			    CMDQ_CREATE_QP1_SQ_SGE_SFT);
832 	req.rq_fwo_rq_sge =
833 		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
834 			    CMDQ_CREATE_QP1_RQ_SGE_SFT);
835 
836 	req.pd_id = cpu_to_le32(qp->pd->id);
837 
838 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
839 					  (void *)&resp, NULL, 0);
840 	if (rc)
841 		goto fail;
842 
843 	qp->id = le32_to_cpu(resp.xid);
844 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
845 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
846 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
847 
848 	return 0;
849 
850 fail:
851 	bnxt_qplib_free_qp_hdr_buf(res, qp);
852 fail_rq:
853 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
854 	kfree(rq->swq);
855 fail_sq:
856 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
857 	kfree(sq->swq);
858 exit:
859 	return rc;
860 }
861 
862 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
863 {
864 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
865 	unsigned long int psn_search, poff = 0;
866 	struct sq_psn_search **psn_search_ptr;
867 	struct bnxt_qplib_q *sq = &qp->sq;
868 	struct bnxt_qplib_q *rq = &qp->rq;
869 	int i, rc, req_size, psn_sz = 0;
870 	struct sq_send **hw_sq_send_ptr;
871 	struct creq_create_qp_resp resp;
872 	struct bnxt_qplib_hwq *xrrq;
873 	u16 cmd_flags = 0, max_ssge;
874 	struct cmdq_create_qp req;
875 	struct bnxt_qplib_pbl *pbl;
876 	u32 qp_flags = 0;
877 	u16 max_rsge;
878 
879 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
880 
881 	/* General */
882 	req.type = qp->type;
883 	req.dpi = cpu_to_le32(qp->dpi->dpi);
884 	req.qp_handle = cpu_to_le64(qp->qp_handle);
885 
886 	/* SQ */
887 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
888 		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
889 			 sizeof(struct sq_psn_search_ext) :
890 			 sizeof(struct sq_psn_search);
891 	}
892 	sq->hwq.max_elements = sq->max_wqe;
893 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
894 				       sq->nmap, &sq->hwq.max_elements,
895 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
896 				       psn_sz,
897 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
898 	if (rc)
899 		goto exit;
900 
901 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
902 	if (!sq->swq) {
903 		rc = -ENOMEM;
904 		goto fail_sq;
905 	}
906 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
907 	if (psn_sz) {
908 		psn_search_ptr = (struct sq_psn_search **)
909 				  &hw_sq_send_ptr[get_sqe_pg
910 					(sq->hwq.max_elements)];
911 		psn_search = (unsigned long int)
912 			      &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
913 			      [get_sqe_idx(sq->hwq.max_elements)];
914 		if (psn_search & ~PAGE_MASK) {
915 			/* If the psn_search does not start on a page boundary,
916 			 * then calculate the offset
917 			 */
918 			poff = (psn_search & ~PAGE_MASK) /
919 				BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
920 		}
921 		for (i = 0; i < sq->hwq.max_elements; i++) {
922 			sq->swq[i].psn_search =
923 				&psn_search_ptr[get_psne_pg(i + poff)]
924 					       [get_psne_idx(i + poff)];
925 			/*psns_ext will be used only for P5 chips. */
926 			sq->swq[i].psn_ext =
927 				(struct sq_psn_search_ext *)
928 				&psn_search_ptr[get_psne_pg(i + poff)]
929 					       [get_psne_idx(i + poff)];
930 		}
931 	}
932 	pbl = &sq->hwq.pbl[PBL_LVL_0];
933 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
934 	req.sq_pg_size_sq_lvl =
935 		((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
936 				 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
937 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
938 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
939 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
940 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
941 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
942 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
943 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
944 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
945 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
946 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
947 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
948 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
949 		 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
950 
951 	if (qp->scq)
952 		req.scq_cid = cpu_to_le32(qp->scq->id);
953 
954 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
955 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
956 	if (qp->sig_type)
957 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
958 
959 	/* RQ */
960 	if (rq->max_wqe) {
961 		rq->hwq.max_elements = rq->max_wqe;
962 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
963 					       rq->nmap, &rq->hwq.max_elements,
964 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
965 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
966 		if (rc)
967 			goto fail_sq;
968 
969 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
970 				  GFP_KERNEL);
971 		if (!rq->swq) {
972 			rc = -ENOMEM;
973 			goto fail_rq;
974 		}
975 		pbl = &rq->hwq.pbl[PBL_LVL_0];
976 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
977 		req.rq_pg_size_rq_lvl =
978 			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
979 			 CMDQ_CREATE_QP_RQ_LVL_SFT) |
980 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
981 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
982 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
983 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
984 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
985 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
986 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
987 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
988 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
989 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
990 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
991 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
992 				 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
993 	} else {
994 		/* SRQ */
995 		if (qp->srq) {
996 			qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
997 			req.srq_cid = cpu_to_le32(qp->srq->id);
998 		}
999 	}
1000 
1001 	if (qp->rcq)
1002 		req.rcq_cid = cpu_to_le32(qp->rcq->id);
1003 	req.qp_flags = cpu_to_le32(qp_flags);
1004 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
1005 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
1006 	qp->sq_hdr_buf = NULL;
1007 	qp->rq_hdr_buf = NULL;
1008 
1009 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1010 	if (rc)
1011 		goto fail_rq;
1012 
1013 	/* CTRL-22434: Irrespective of the requested SGE count on the SQ
1014 	 * always create the QP with max send sges possible if the requested
1015 	 * inline size is greater than 0.
1016 	 */
1017 	max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1018 	req.sq_fwo_sq_sge = cpu_to_le16(
1019 				((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1020 				 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1021 	max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
1022 	req.rq_fwo_rq_sge = cpu_to_le16(
1023 				((max_rsge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1024 				 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1025 	/* ORRQ and IRRQ */
1026 	if (psn_sz) {
1027 		xrrq = &qp->orrq;
1028 		xrrq->max_elements =
1029 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1030 		req_size = xrrq->max_elements *
1031 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1032 		req_size &= ~(PAGE_SIZE - 1);
1033 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1034 					       &xrrq->max_elements,
1035 					       BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1036 					       0, req_size, HWQ_TYPE_CTX);
1037 		if (rc)
1038 			goto fail_buf_free;
1039 		pbl = &xrrq->pbl[PBL_LVL_0];
1040 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1041 
1042 		xrrq = &qp->irrq;
1043 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1044 						qp->max_dest_rd_atomic);
1045 		req_size = xrrq->max_elements *
1046 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1047 		req_size &= ~(PAGE_SIZE - 1);
1048 
1049 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1050 					       &xrrq->max_elements,
1051 					       BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1052 					       0, req_size, HWQ_TYPE_CTX);
1053 		if (rc)
1054 			goto fail_orrq;
1055 
1056 		pbl = &xrrq->pbl[PBL_LVL_0];
1057 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1058 	}
1059 	req.pd_id = cpu_to_le32(qp->pd->id);
1060 
1061 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1062 					  (void *)&resp, NULL, 0);
1063 	if (rc)
1064 		goto fail;
1065 
1066 	qp->id = le32_to_cpu(resp.xid);
1067 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1068 	qp->cctx = res->cctx;
1069 	INIT_LIST_HEAD(&qp->sq_flush);
1070 	INIT_LIST_HEAD(&qp->rq_flush);
1071 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
1072 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1073 
1074 	return 0;
1075 
1076 fail:
1077 	if (qp->irrq.max_elements)
1078 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1079 fail_orrq:
1080 	if (qp->orrq.max_elements)
1081 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1082 fail_buf_free:
1083 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1084 fail_rq:
1085 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1086 	kfree(rq->swq);
1087 fail_sq:
1088 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1089 	kfree(sq->swq);
1090 exit:
1091 	return rc;
1092 }
1093 
1094 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1095 {
1096 	switch (qp->state) {
1097 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1098 		/* INIT->RTR, configure the path_mtu to the default
1099 		 * 2048 if not being requested
1100 		 */
1101 		if (!(qp->modify_flags &
1102 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1103 			qp->modify_flags |=
1104 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1105 			qp->path_mtu =
1106 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1107 		}
1108 		qp->modify_flags &=
1109 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1110 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1111 		if (qp->max_dest_rd_atomic < 1)
1112 			qp->max_dest_rd_atomic = 1;
1113 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1114 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1115 		if (!(qp->modify_flags &
1116 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1117 			qp->modify_flags |=
1118 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1119 			qp->ah.sgid_index = 0;
1120 		}
1121 		break;
1122 	default:
1123 		break;
1124 	}
1125 }
1126 
1127 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1128 {
1129 	switch (qp->state) {
1130 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1131 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1132 		if (qp->max_rd_atomic < 1)
1133 			qp->max_rd_atomic = 1;
1134 		/* Bono FW does not allow PKEY_INDEX,
1135 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1136 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1137 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1138 		 * modification
1139 		 */
1140 		qp->modify_flags &=
1141 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1142 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1143 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1144 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1145 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1146 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1147 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1148 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1149 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1150 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1151 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1152 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1153 		break;
1154 	default:
1155 		break;
1156 	}
1157 }
1158 
1159 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1160 {
1161 	switch (qp->cur_qp_state) {
1162 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1163 		break;
1164 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1165 		__modify_flags_from_init_state(qp);
1166 		break;
1167 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1168 		__modify_flags_from_rtr_state(qp);
1169 		break;
1170 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1171 		break;
1172 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1173 		break;
1174 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1175 		break;
1176 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1177 		break;
1178 	default:
1179 		break;
1180 	}
1181 }
1182 
1183 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1184 {
1185 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1186 	struct cmdq_modify_qp req;
1187 	struct creq_modify_qp_resp resp;
1188 	u16 cmd_flags = 0, pkey;
1189 	u32 temp32[4];
1190 	u32 bmask;
1191 	int rc;
1192 
1193 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1194 
1195 	/* Filter out the qp_attr_mask based on the state->new transition */
1196 	__filter_modify_flags(qp);
1197 	bmask = qp->modify_flags;
1198 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1199 	req.qp_cid = cpu_to_le32(qp->id);
1200 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1201 		req.network_type_en_sqd_async_notify_new_state =
1202 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1203 				(qp->en_sqd_async_notify ?
1204 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1205 	}
1206 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1207 
1208 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1209 		req.access = qp->access;
1210 
1211 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1212 		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1213 					 qp->pkey_index, &pkey))
1214 			req.pkey = cpu_to_le16(pkey);
1215 	}
1216 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1217 		req.qkey = cpu_to_le32(qp->qkey);
1218 
1219 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1220 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1221 		req.dgid[0] = cpu_to_le32(temp32[0]);
1222 		req.dgid[1] = cpu_to_le32(temp32[1]);
1223 		req.dgid[2] = cpu_to_le32(temp32[2]);
1224 		req.dgid[3] = cpu_to_le32(temp32[3]);
1225 	}
1226 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1227 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1228 
1229 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1230 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1231 					     [qp->ah.sgid_index]);
1232 
1233 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1234 		req.hop_limit = qp->ah.hop_limit;
1235 
1236 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1237 		req.traffic_class = qp->ah.traffic_class;
1238 
1239 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1240 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1241 
1242 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1243 		req.path_mtu = qp->path_mtu;
1244 
1245 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1246 		req.timeout = qp->timeout;
1247 
1248 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1249 		req.retry_cnt = qp->retry_cnt;
1250 
1251 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1252 		req.rnr_retry = qp->rnr_retry;
1253 
1254 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1255 		req.min_rnr_timer = qp->min_rnr_timer;
1256 
1257 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1258 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1259 
1260 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1261 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1262 
1263 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1264 		req.max_rd_atomic =
1265 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1266 
1267 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1268 		req.max_dest_rd_atomic =
1269 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1270 
1271 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1272 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1273 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1274 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1275 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1276 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1277 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1278 
1279 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1280 
1281 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1282 					  (void *)&resp, NULL, 0);
1283 	if (rc)
1284 		return rc;
1285 	qp->cur_qp_state = qp->state;
1286 	return 0;
1287 }
1288 
1289 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1290 {
1291 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1292 	struct cmdq_query_qp req;
1293 	struct creq_query_qp_resp resp;
1294 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1295 	struct creq_query_qp_resp_sb *sb;
1296 	u16 cmd_flags = 0;
1297 	u32 temp32[4];
1298 	int i, rc = 0;
1299 
1300 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1301 
1302 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1303 	if (!sbuf)
1304 		return -ENOMEM;
1305 	sb = sbuf->sb;
1306 
1307 	req.qp_cid = cpu_to_le32(qp->id);
1308 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1309 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1310 					  (void *)sbuf, 0);
1311 	if (rc)
1312 		goto bail;
1313 	/* Extract the context from the side buffer */
1314 	qp->state = sb->en_sqd_async_notify_state &
1315 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1316 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1317 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1318 				  true : false;
1319 	qp->access = sb->access;
1320 	qp->pkey_index = le16_to_cpu(sb->pkey);
1321 	qp->qkey = le32_to_cpu(sb->qkey);
1322 
1323 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1324 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1325 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1326 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1327 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1328 
1329 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1330 
1331 	qp->ah.sgid_index = 0;
1332 	for (i = 0; i < res->sgid_tbl.max; i++) {
1333 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1334 			qp->ah.sgid_index = i;
1335 			break;
1336 		}
1337 	}
1338 	if (i == res->sgid_tbl.max)
1339 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1340 
1341 	qp->ah.hop_limit = sb->hop_limit;
1342 	qp->ah.traffic_class = sb->traffic_class;
1343 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1344 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1345 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1346 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1347 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1348 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1349 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1350 	qp->timeout = sb->timeout;
1351 	qp->retry_cnt = sb->retry_cnt;
1352 	qp->rnr_retry = sb->rnr_retry;
1353 	qp->min_rnr_timer = sb->min_rnr_timer;
1354 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1355 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1356 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1357 	qp->max_dest_rd_atomic =
1358 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1359 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1360 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1361 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1362 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1363 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1364 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1365 	memcpy(qp->smac, sb->src_mac, 6);
1366 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1367 bail:
1368 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1369 	return rc;
1370 }
1371 
1372 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1373 {
1374 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1375 	struct cq_base *hw_cqe, **hw_cqe_ptr;
1376 	int i;
1377 
1378 	for (i = 0; i < cq_hwq->max_elements; i++) {
1379 		hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1380 		hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1381 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1382 			continue;
1383 		/*
1384 		 * The valid test of the entry must be done first before
1385 		 * reading any further.
1386 		 */
1387 		dma_rmb();
1388 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1389 		case CQ_BASE_CQE_TYPE_REQ:
1390 		case CQ_BASE_CQE_TYPE_TERMINAL:
1391 		{
1392 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1393 
1394 			if (qp == le64_to_cpu(cqe->qp_handle))
1395 				cqe->qp_handle = 0;
1396 			break;
1397 		}
1398 		case CQ_BASE_CQE_TYPE_RES_RC:
1399 		case CQ_BASE_CQE_TYPE_RES_UD:
1400 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1401 		{
1402 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1403 
1404 			if (qp == le64_to_cpu(cqe->qp_handle))
1405 				cqe->qp_handle = 0;
1406 			break;
1407 		}
1408 		default:
1409 			break;
1410 		}
1411 	}
1412 }
1413 
1414 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1415 			  struct bnxt_qplib_qp *qp)
1416 {
1417 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1418 	struct cmdq_destroy_qp req;
1419 	struct creq_destroy_qp_resp resp;
1420 	u16 cmd_flags = 0;
1421 	int rc;
1422 
1423 	rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1424 	rcfw->qp_tbl[qp->id].qp_handle = NULL;
1425 
1426 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1427 
1428 	req.qp_cid = cpu_to_le32(qp->id);
1429 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1430 					  (void *)&resp, NULL, 0);
1431 	if (rc) {
1432 		rcfw->qp_tbl[qp->id].qp_id = qp->id;
1433 		rcfw->qp_tbl[qp->id].qp_handle = qp;
1434 		return rc;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1441 			    struct bnxt_qplib_qp *qp)
1442 {
1443 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1444 	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1445 	kfree(qp->sq.swq);
1446 
1447 	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1448 	kfree(qp->rq.swq);
1449 
1450 	if (qp->irrq.max_elements)
1451 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1452 	if (qp->orrq.max_elements)
1453 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1454 
1455 }
1456 
1457 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1458 				struct bnxt_qplib_sge *sge)
1459 {
1460 	struct bnxt_qplib_q *sq = &qp->sq;
1461 	u32 sw_prod;
1462 
1463 	memset(sge, 0, sizeof(*sge));
1464 
1465 	if (qp->sq_hdr_buf) {
1466 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1467 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1468 					 sw_prod * qp->sq_hdr_buf_size);
1469 		sge->lkey = 0xFFFFFFFF;
1470 		sge->size = qp->sq_hdr_buf_size;
1471 		return qp->sq_hdr_buf + sw_prod * sge->size;
1472 	}
1473 	return NULL;
1474 }
1475 
1476 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1477 {
1478 	struct bnxt_qplib_q *rq = &qp->rq;
1479 
1480 	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1481 }
1482 
1483 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1484 {
1485 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1486 }
1487 
1488 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1489 				struct bnxt_qplib_sge *sge)
1490 {
1491 	struct bnxt_qplib_q *rq = &qp->rq;
1492 	u32 sw_prod;
1493 
1494 	memset(sge, 0, sizeof(*sge));
1495 
1496 	if (qp->rq_hdr_buf) {
1497 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1498 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1499 					 sw_prod * qp->rq_hdr_buf_size);
1500 		sge->lkey = 0xFFFFFFFF;
1501 		sge->size = qp->rq_hdr_buf_size;
1502 		return qp->rq_hdr_buf + sw_prod * sge->size;
1503 	}
1504 	return NULL;
1505 }
1506 
1507 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1508 {
1509 	struct bnxt_qplib_q *sq = &qp->sq;
1510 	u32 sw_prod;
1511 	u64 val = 0;
1512 
1513 	val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1514 	       DBC_DBC_TYPE_SQ);
1515 	val <<= 32;
1516 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1517 	val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1518 	/* Flush all the WQE writes to HW */
1519 	writeq(val, qp->dpi->dbr);
1520 }
1521 
1522 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1523 			 struct bnxt_qplib_swqe *wqe)
1524 {
1525 	struct bnxt_qplib_q *sq = &qp->sq;
1526 	struct bnxt_qplib_swq *swq;
1527 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1528 	struct sq_sge *hw_sge;
1529 	struct bnxt_qplib_nq_work *nq_work = NULL;
1530 	bool sch_handler = false;
1531 	u32 sw_prod;
1532 	u8 wqe_size16;
1533 	int i, rc = 0, data_len = 0, pkt_num = 0;
1534 	__le32 temp32;
1535 
1536 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1537 		if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1538 			sch_handler = true;
1539 			dev_dbg(&sq->hwq.pdev->dev,
1540 				"%s Error QP. Scheduling for poll_cq\n",
1541 				__func__);
1542 			goto queue_err;
1543 		}
1544 	}
1545 
1546 	if (bnxt_qplib_queue_full(sq)) {
1547 		dev_err(&sq->hwq.pdev->dev,
1548 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1549 			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1550 			sq->q_full_delta);
1551 		rc = -ENOMEM;
1552 		goto done;
1553 	}
1554 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1555 	swq = &sq->swq[sw_prod];
1556 	swq->wr_id = wqe->wr_id;
1557 	swq->type = wqe->type;
1558 	swq->flags = wqe->flags;
1559 	if (qp->sig_type)
1560 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1561 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1562 
1563 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1564 	hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1565 					[get_sqe_idx(sw_prod)];
1566 
1567 	memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1568 
1569 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1570 		/* Copy the inline data */
1571 		if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1572 			dev_warn(&sq->hwq.pdev->dev,
1573 				 "Inline data length > 96 detected\n");
1574 			data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1575 		} else {
1576 			data_len = wqe->inline_len;
1577 		}
1578 		memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1579 		wqe_size16 = (data_len + 15) >> 4;
1580 	} else {
1581 		for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1582 		     i < wqe->num_sge; i++, hw_sge++) {
1583 			hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1584 			hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1585 			hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1586 			data_len += wqe->sg_list[i].size;
1587 		}
1588 		/* Each SGE entry = 1 WQE size16 */
1589 		wqe_size16 = wqe->num_sge;
1590 		/* HW requires wqe size has room for atleast one SGE even if
1591 		 * none was supplied by ULP
1592 		 */
1593 		if (!wqe->num_sge)
1594 			wqe_size16++;
1595 	}
1596 
1597 	/* Specifics */
1598 	switch (wqe->type) {
1599 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1600 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1601 			/* Assemble info for Raw Ethertype QPs */
1602 			struct sq_send_raweth_qp1 *sqe =
1603 				(struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1604 
1605 			sqe->wqe_type = wqe->type;
1606 			sqe->flags = wqe->flags;
1607 			sqe->wqe_size = wqe_size16 +
1608 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1609 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1610 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1611 			sqe->length = cpu_to_le32(data_len);
1612 			sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1613 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1614 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1615 
1616 			break;
1617 		}
1618 		/* fall thru */
1619 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1620 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1621 	{
1622 		struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1623 
1624 		sqe->wqe_type = wqe->type;
1625 		sqe->flags = wqe->flags;
1626 		sqe->wqe_size = wqe_size16 +
1627 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1628 		sqe->inv_key_or_imm_data = cpu_to_le32(
1629 						wqe->send.inv_key);
1630 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1631 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1632 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1633 			sqe->dst_qp = cpu_to_le32(
1634 					wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1635 			sqe->length = cpu_to_le32(data_len);
1636 			sqe->avid = cpu_to_le32(wqe->send.avid &
1637 						SQ_SEND_AVID_MASK);
1638 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1639 		} else {
1640 			sqe->length = cpu_to_le32(data_len);
1641 			sqe->dst_qp = 0;
1642 			sqe->avid = 0;
1643 			if (qp->mtu)
1644 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1645 			if (!pkt_num)
1646 				pkt_num = 1;
1647 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1648 		}
1649 		break;
1650 	}
1651 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1652 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1653 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1654 	{
1655 		struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1656 
1657 		sqe->wqe_type = wqe->type;
1658 		sqe->flags = wqe->flags;
1659 		sqe->wqe_size = wqe_size16 +
1660 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1661 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1662 		sqe->length = cpu_to_le32((u32)data_len);
1663 		sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1664 		sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1665 		if (qp->mtu)
1666 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1667 		if (!pkt_num)
1668 			pkt_num = 1;
1669 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1670 		break;
1671 	}
1672 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1673 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1674 	{
1675 		struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1676 
1677 		sqe->wqe_type = wqe->type;
1678 		sqe->flags = wqe->flags;
1679 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1680 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1681 		sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1682 		sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1683 		if (qp->mtu)
1684 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1685 		if (!pkt_num)
1686 			pkt_num = 1;
1687 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1688 		break;
1689 	}
1690 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1691 	{
1692 		struct sq_localinvalidate *sqe =
1693 				(struct sq_localinvalidate *)hw_sq_send_hdr;
1694 
1695 		sqe->wqe_type = wqe->type;
1696 		sqe->flags = wqe->flags;
1697 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1698 
1699 		break;
1700 	}
1701 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1702 	{
1703 		struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1704 
1705 		sqe->wqe_type = wqe->type;
1706 		sqe->flags = wqe->flags;
1707 		sqe->access_cntl = wqe->frmr.access_cntl |
1708 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1709 		sqe->zero_based_page_size_log =
1710 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1711 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1712 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1713 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1714 		temp32 = cpu_to_le32(wqe->frmr.length);
1715 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1716 		sqe->numlevels_pbl_page_size_log =
1717 			((wqe->frmr.pbl_pg_sz_log <<
1718 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1719 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1720 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1721 					SQ_FR_PMR_NUMLEVELS_MASK);
1722 
1723 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1724 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1725 						wqe->frmr.page_list[i] |
1726 						PTU_PTE_VALID);
1727 		sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1728 		sqe->va = cpu_to_le64(wqe->frmr.va);
1729 
1730 		break;
1731 	}
1732 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1733 	{
1734 		struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1735 
1736 		sqe->wqe_type = wqe->type;
1737 		sqe->flags = wqe->flags;
1738 		sqe->access_cntl = wqe->bind.access_cntl;
1739 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1740 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1741 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1742 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1743 		sqe->va = cpu_to_le64(wqe->bind.va);
1744 		temp32 = cpu_to_le32(wqe->bind.length);
1745 		memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1746 		break;
1747 	}
1748 	default:
1749 		/* Bad wqe, return error */
1750 		rc = -EINVAL;
1751 		goto done;
1752 	}
1753 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1754 	if (swq->psn_search) {
1755 		u32 opcd_spsn;
1756 		u32 flg_npsn;
1757 
1758 		opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1759 			      SQ_PSN_SEARCH_START_PSN_MASK);
1760 		opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1761 			       SQ_PSN_SEARCH_OPCODE_MASK);
1762 		flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1763 			     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1764 		if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1765 			swq->psn_ext->opcode_start_psn =
1766 						cpu_to_le32(opcd_spsn);
1767 			swq->psn_ext->flags_next_psn =
1768 						cpu_to_le32(flg_npsn);
1769 		} else {
1770 			swq->psn_search->opcode_start_psn =
1771 						cpu_to_le32(opcd_spsn);
1772 			swq->psn_search->flags_next_psn =
1773 						cpu_to_le32(flg_npsn);
1774 		}
1775 	}
1776 queue_err:
1777 	if (sch_handler) {
1778 		/* Store the ULP info in the software structures */
1779 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1780 		swq = &sq->swq[sw_prod];
1781 		swq->wr_id = wqe->wr_id;
1782 		swq->type = wqe->type;
1783 		swq->flags = wqe->flags;
1784 		if (qp->sig_type)
1785 			swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1786 		swq->start_psn = sq->psn & BTH_PSN_MASK;
1787 	}
1788 	sq->hwq.prod++;
1789 	qp->wqe_cnt++;
1790 
1791 done:
1792 	if (sch_handler) {
1793 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1794 		if (nq_work) {
1795 			nq_work->cq = qp->scq;
1796 			nq_work->nq = qp->scq->nq;
1797 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1798 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1799 		} else {
1800 			dev_err(&sq->hwq.pdev->dev,
1801 				"FP: Failed to allocate SQ nq_work!\n");
1802 			rc = -ENOMEM;
1803 		}
1804 	}
1805 	return rc;
1806 }
1807 
1808 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1809 {
1810 	struct bnxt_qplib_q *rq = &qp->rq;
1811 	u32 sw_prod;
1812 	u64 val = 0;
1813 
1814 	val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1815 	       DBC_DBC_TYPE_RQ);
1816 	val <<= 32;
1817 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1818 	val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1819 	/* Flush the writes to HW Rx WQE before the ringing Rx DB */
1820 	writeq(val, qp->dpi->dbr);
1821 }
1822 
1823 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1824 			 struct bnxt_qplib_swqe *wqe)
1825 {
1826 	struct bnxt_qplib_q *rq = &qp->rq;
1827 	struct rq_wqe *rqe, **rqe_ptr;
1828 	struct sq_sge *hw_sge;
1829 	struct bnxt_qplib_nq_work *nq_work = NULL;
1830 	bool sch_handler = false;
1831 	u32 sw_prod;
1832 	int i, rc = 0;
1833 
1834 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1835 		sch_handler = true;
1836 		dev_dbg(&rq->hwq.pdev->dev,
1837 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
1838 		goto queue_err;
1839 	}
1840 	if (bnxt_qplib_queue_full(rq)) {
1841 		dev_err(&rq->hwq.pdev->dev,
1842 			"FP: QP (0x%x) RQ is full!\n", qp->id);
1843 		rc = -EINVAL;
1844 		goto done;
1845 	}
1846 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1847 	rq->swq[sw_prod].wr_id = wqe->wr_id;
1848 
1849 	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1850 	rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1851 
1852 	memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1853 
1854 	/* Calculate wqe_size16 and data_len */
1855 	for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1856 	     i < wqe->num_sge; i++, hw_sge++) {
1857 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1858 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1859 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1860 	}
1861 	rqe->wqe_type = wqe->type;
1862 	rqe->flags = wqe->flags;
1863 	rqe->wqe_size = wqe->num_sge +
1864 			((offsetof(typeof(*rqe), data) + 15) >> 4);
1865 	/* HW requires wqe size has room for atleast one SGE even if none
1866 	 * was supplied by ULP
1867 	 */
1868 	if (!wqe->num_sge)
1869 		rqe->wqe_size++;
1870 
1871 	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
1872 	rqe->wr_id[0] = cpu_to_le32(sw_prod);
1873 
1874 queue_err:
1875 	if (sch_handler) {
1876 		/* Store the ULP info in the software structures */
1877 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1878 		rq->swq[sw_prod].wr_id = wqe->wr_id;
1879 	}
1880 
1881 	rq->hwq.prod++;
1882 	if (sch_handler) {
1883 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1884 		if (nq_work) {
1885 			nq_work->cq = qp->rcq;
1886 			nq_work->nq = qp->rcq->nq;
1887 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1888 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1889 		} else {
1890 			dev_err(&rq->hwq.pdev->dev,
1891 				"FP: Failed to allocate RQ nq_work!\n");
1892 			rc = -ENOMEM;
1893 		}
1894 	}
1895 done:
1896 	return rc;
1897 }
1898 
1899 /* CQ */
1900 
1901 /* Spinlock must be held */
1902 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1903 {
1904 	u64 val = 0;
1905 
1906 	val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1907 	       DBC_DBC_TYPE_CQ_ARMENA;
1908 	val <<= 32;
1909 	/* Flush memory writes before enabling the CQ */
1910 	writeq(val, cq->dbr_base);
1911 }
1912 
1913 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1914 {
1915 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1916 	u32 sw_cons;
1917 	u64 val = 0;
1918 
1919 	/* Ring DB */
1920 	val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
1921 	val <<= 32;
1922 	sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1923 	val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1924 	/* flush memory writes before arming the CQ */
1925 	writeq(val, cq->dpi->dbr);
1926 }
1927 
1928 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1929 {
1930 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1931 	struct cmdq_create_cq req;
1932 	struct creq_create_cq_resp resp;
1933 	struct bnxt_qplib_pbl *pbl;
1934 	u16 cmd_flags = 0;
1935 	int rc;
1936 
1937 	cq->hwq.max_elements = cq->max_wqe;
1938 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1939 				       cq->nmap, &cq->hwq.max_elements,
1940 				       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1941 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
1942 	if (rc)
1943 		goto exit;
1944 
1945 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1946 
1947 	if (!cq->dpi) {
1948 		dev_err(&rcfw->pdev->dev,
1949 			"FP: CREATE_CQ failed due to NULL DPI\n");
1950 		return -EINVAL;
1951 	}
1952 	req.dpi = cpu_to_le32(cq->dpi->dpi);
1953 	req.cq_handle = cpu_to_le64(cq->cq_handle);
1954 
1955 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1956 	pbl = &cq->hwq.pbl[PBL_LVL_0];
1957 	req.pg_size_lvl = cpu_to_le32(
1958 	    ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1959 						CMDQ_CREATE_CQ_LVL_SFT) |
1960 	    (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1961 	     pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1962 	     pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1963 	     pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1964 	     pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1965 	     pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1966 	     CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1967 
1968 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1969 
1970 	req.cq_fco_cnq_id = cpu_to_le32(
1971 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1972 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1973 
1974 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1975 					  (void *)&resp, NULL, 0);
1976 	if (rc)
1977 		goto fail;
1978 
1979 	cq->id = le32_to_cpu(resp.xid);
1980 	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1981 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1982 	init_waitqueue_head(&cq->waitq);
1983 	INIT_LIST_HEAD(&cq->sqf_head);
1984 	INIT_LIST_HEAD(&cq->rqf_head);
1985 	spin_lock_init(&cq->compl_lock);
1986 	spin_lock_init(&cq->flush_lock);
1987 
1988 	bnxt_qplib_arm_cq_enable(cq);
1989 	return 0;
1990 
1991 fail:
1992 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1993 exit:
1994 	return rc;
1995 }
1996 
1997 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1998 {
1999 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2000 	struct cmdq_destroy_cq req;
2001 	struct creq_destroy_cq_resp resp;
2002 	u16 cmd_flags = 0;
2003 	int rc;
2004 
2005 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2006 
2007 	req.cq_cid = cpu_to_le32(cq->id);
2008 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2009 					  (void *)&resp, NULL, 0);
2010 	if (rc)
2011 		return rc;
2012 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
2013 	return 0;
2014 }
2015 
2016 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2017 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2018 {
2019 	u32 sw_prod, sw_cons;
2020 	struct bnxt_qplib_cqe *cqe;
2021 	int rc = 0;
2022 
2023 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2024 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2025 	cqe = *pcqe;
2026 	while (*budget) {
2027 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2028 		if (sw_cons == sw_prod) {
2029 			break;
2030 		}
2031 		/* Skip the FENCE WQE completions */
2032 		if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2033 			bnxt_qplib_cancel_phantom_processing(qp);
2034 			goto skip_compl;
2035 		}
2036 		memset(cqe, 0, sizeof(*cqe));
2037 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2038 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2039 		cqe->qp_handle = (u64)(unsigned long)qp;
2040 		cqe->wr_id = sq->swq[sw_cons].wr_id;
2041 		cqe->src_qp = qp->id;
2042 		cqe->type = sq->swq[sw_cons].type;
2043 		cqe++;
2044 		(*budget)--;
2045 skip_compl:
2046 		sq->hwq.cons++;
2047 	}
2048 	*pcqe = cqe;
2049 	if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2050 		/* Out of budget */
2051 		rc = -EAGAIN;
2052 
2053 	return rc;
2054 }
2055 
2056 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2057 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2058 {
2059 	struct bnxt_qplib_cqe *cqe;
2060 	u32 sw_prod, sw_cons;
2061 	int rc = 0;
2062 	int opcode = 0;
2063 
2064 	switch (qp->type) {
2065 	case CMDQ_CREATE_QP1_TYPE_GSI:
2066 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2067 		break;
2068 	case CMDQ_CREATE_QP_TYPE_RC:
2069 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2070 		break;
2071 	case CMDQ_CREATE_QP_TYPE_UD:
2072 	case CMDQ_CREATE_QP_TYPE_GSI:
2073 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2074 		break;
2075 	}
2076 
2077 	/* Flush the rest of the RQ */
2078 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2079 	cqe = *pcqe;
2080 	while (*budget) {
2081 		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2082 		if (sw_cons == sw_prod)
2083 			break;
2084 		memset(cqe, 0, sizeof(*cqe));
2085 		cqe->status =
2086 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2087 		cqe->opcode = opcode;
2088 		cqe->qp_handle = (unsigned long)qp;
2089 		cqe->wr_id = rq->swq[sw_cons].wr_id;
2090 		cqe++;
2091 		(*budget)--;
2092 		rq->hwq.cons++;
2093 	}
2094 	*pcqe = cqe;
2095 	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2096 		/* Out of budget */
2097 		rc = -EAGAIN;
2098 
2099 	return rc;
2100 }
2101 
2102 void bnxt_qplib_mark_qp_error(void *qp_handle)
2103 {
2104 	struct bnxt_qplib_qp *qp = qp_handle;
2105 
2106 	if (!qp)
2107 		return;
2108 
2109 	/* Must block new posting of SQ and RQ */
2110 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2111 	bnxt_qplib_cancel_phantom_processing(qp);
2112 }
2113 
2114 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2115  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2116  */
2117 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2118 		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2119 {
2120 	struct bnxt_qplib_q *sq = &qp->sq;
2121 	struct bnxt_qplib_swq *swq;
2122 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2123 	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2124 	struct cq_req *peek_req_hwcqe;
2125 	struct bnxt_qplib_qp *peek_qp;
2126 	struct bnxt_qplib_q *peek_sq;
2127 	int i, rc = 0;
2128 
2129 	/* Normal mode */
2130 	/* Check for the psn_search marking before completing */
2131 	swq = &sq->swq[sw_sq_cons];
2132 	if (swq->psn_search &&
2133 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2134 		/* Unmark */
2135 		swq->psn_search->flags_next_psn = cpu_to_le32
2136 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2137 				     & ~0x80000000);
2138 		dev_dbg(&cq->hwq.pdev->dev,
2139 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2140 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2141 		sq->condition = true;
2142 		sq->send_phantom = true;
2143 
2144 		/* TODO: Only ARM if the previous SQE is ARMALL */
2145 		bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
2146 
2147 		rc = -EAGAIN;
2148 		goto out;
2149 	}
2150 	if (sq->condition) {
2151 		/* Peek at the completions */
2152 		peek_raw_cq_cons = cq->hwq.cons;
2153 		peek_sw_cq_cons = cq_cons;
2154 		i = cq->hwq.max_elements;
2155 		while (i--) {
2156 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2157 			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2158 			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2159 						     [CQE_IDX(peek_sw_cq_cons)];
2160 			/* If the next hwcqe is VALID */
2161 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2162 					  cq->hwq.max_elements)) {
2163 			/*
2164 			 * The valid test of the entry must be done first before
2165 			 * reading any further.
2166 			 */
2167 				dma_rmb();
2168 				/* If the next hwcqe is a REQ */
2169 				if ((peek_hwcqe->cqe_type_toggle &
2170 				    CQ_BASE_CQE_TYPE_MASK) ==
2171 				    CQ_BASE_CQE_TYPE_REQ) {
2172 					peek_req_hwcqe = (struct cq_req *)
2173 							 peek_hwcqe;
2174 					peek_qp = (struct bnxt_qplib_qp *)
2175 						((unsigned long)
2176 						 le64_to_cpu
2177 						 (peek_req_hwcqe->qp_handle));
2178 					peek_sq = &peek_qp->sq;
2179 					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2180 						peek_req_hwcqe->sq_cons_idx) - 1
2181 						, &sq->hwq);
2182 					/* If the hwcqe's sq's wr_id matches */
2183 					if (peek_sq == sq &&
2184 					    sq->swq[peek_sq_cons_idx].wr_id ==
2185 					    BNXT_QPLIB_FENCE_WRID) {
2186 						/*
2187 						 *  Unbreak only if the phantom
2188 						 *  comes back
2189 						 */
2190 						dev_dbg(&cq->hwq.pdev->dev,
2191 							"FP: Got Phantom CQE\n");
2192 						sq->condition = false;
2193 						sq->single = true;
2194 						rc = 0;
2195 						goto out;
2196 					}
2197 				}
2198 				/* Valid but not the phantom, so keep looping */
2199 			} else {
2200 				/* Not valid yet, just exit and wait */
2201 				rc = -EINVAL;
2202 				goto out;
2203 			}
2204 			peek_sw_cq_cons++;
2205 			peek_raw_cq_cons++;
2206 		}
2207 		dev_err(&cq->hwq.pdev->dev,
2208 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2209 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2210 		rc = -EINVAL;
2211 	}
2212 out:
2213 	return rc;
2214 }
2215 
2216 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2217 				     struct cq_req *hwcqe,
2218 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2219 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2220 {
2221 	struct bnxt_qplib_qp *qp;
2222 	struct bnxt_qplib_q *sq;
2223 	struct bnxt_qplib_cqe *cqe;
2224 	u32 sw_sq_cons, cqe_sq_cons;
2225 	struct bnxt_qplib_swq *swq;
2226 	int rc = 0;
2227 
2228 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2229 				      le64_to_cpu(hwcqe->qp_handle));
2230 	if (!qp) {
2231 		dev_err(&cq->hwq.pdev->dev,
2232 			"FP: Process Req qp is NULL\n");
2233 		return -EINVAL;
2234 	}
2235 	sq = &qp->sq;
2236 
2237 	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2238 	if (cqe_sq_cons > sq->hwq.max_elements) {
2239 		dev_err(&cq->hwq.pdev->dev,
2240 			"FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2241 			cqe_sq_cons, sq->hwq.max_elements);
2242 		return -EINVAL;
2243 	}
2244 
2245 	if (qp->sq.flushed) {
2246 		dev_dbg(&cq->hwq.pdev->dev,
2247 			"%s: QP in Flush QP = %p\n", __func__, qp);
2248 		goto done;
2249 	}
2250 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2251 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2252 	 * to the cqe_sq_cons
2253 	 */
2254 	cqe = *pcqe;
2255 	while (*budget) {
2256 		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2257 		if (sw_sq_cons == cqe_sq_cons)
2258 			/* Done */
2259 			break;
2260 
2261 		swq = &sq->swq[sw_sq_cons];
2262 		memset(cqe, 0, sizeof(*cqe));
2263 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2264 		cqe->qp_handle = (u64)(unsigned long)qp;
2265 		cqe->src_qp = qp->id;
2266 		cqe->wr_id = swq->wr_id;
2267 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2268 			goto skip;
2269 		cqe->type = swq->type;
2270 
2271 		/* For the last CQE, check for status.  For errors, regardless
2272 		 * of the request being signaled or not, it must complete with
2273 		 * the hwcqe error status
2274 		 */
2275 		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2276 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2277 			cqe->status = hwcqe->status;
2278 			dev_err(&cq->hwq.pdev->dev,
2279 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2280 				sw_sq_cons, cqe->wr_id, cqe->status);
2281 			cqe++;
2282 			(*budget)--;
2283 			bnxt_qplib_mark_qp_error(qp);
2284 			/* Add qp to flush list of the CQ */
2285 			bnxt_qplib_add_flush_qp(qp);
2286 		} else {
2287 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2288 				/* Before we complete, do WA 9060 */
2289 				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2290 					      cqe_sq_cons)) {
2291 					*lib_qp = qp;
2292 					goto out;
2293 				}
2294 				cqe->status = CQ_REQ_STATUS_OK;
2295 				cqe++;
2296 				(*budget)--;
2297 			}
2298 		}
2299 skip:
2300 		sq->hwq.cons++;
2301 		if (sq->single)
2302 			break;
2303 	}
2304 out:
2305 	*pcqe = cqe;
2306 	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2307 		/* Out of budget */
2308 		rc = -EAGAIN;
2309 		goto done;
2310 	}
2311 	/*
2312 	 * Back to normal completion mode only after it has completed all of
2313 	 * the WC for this CQE
2314 	 */
2315 	sq->single = false;
2316 done:
2317 	return rc;
2318 }
2319 
2320 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2321 {
2322 	spin_lock(&srq->hwq.lock);
2323 	srq->swq[srq->last_idx].next_idx = (int)tag;
2324 	srq->last_idx = (int)tag;
2325 	srq->swq[srq->last_idx].next_idx = -1;
2326 	srq->hwq.cons++; /* Support for SRQE counter */
2327 	spin_unlock(&srq->hwq.lock);
2328 }
2329 
2330 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2331 					struct cq_res_rc *hwcqe,
2332 					struct bnxt_qplib_cqe **pcqe,
2333 					int *budget)
2334 {
2335 	struct bnxt_qplib_qp *qp;
2336 	struct bnxt_qplib_q *rq;
2337 	struct bnxt_qplib_srq *srq;
2338 	struct bnxt_qplib_cqe *cqe;
2339 	u32 wr_id_idx;
2340 	int rc = 0;
2341 
2342 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2343 				      le64_to_cpu(hwcqe->qp_handle));
2344 	if (!qp) {
2345 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2346 		return -EINVAL;
2347 	}
2348 	if (qp->rq.flushed) {
2349 		dev_dbg(&cq->hwq.pdev->dev,
2350 			"%s: QP in Flush QP = %p\n", __func__, qp);
2351 		goto done;
2352 	}
2353 
2354 	cqe = *pcqe;
2355 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2356 	cqe->length = le32_to_cpu(hwcqe->length);
2357 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2358 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2359 	cqe->flags = le16_to_cpu(hwcqe->flags);
2360 	cqe->status = hwcqe->status;
2361 	cqe->qp_handle = (u64)(unsigned long)qp;
2362 
2363 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2364 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2365 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2366 		srq = qp->srq;
2367 		if (!srq)
2368 			return -EINVAL;
2369 		if (wr_id_idx >= srq->hwq.max_elements) {
2370 			dev_err(&cq->hwq.pdev->dev,
2371 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2372 				wr_id_idx, srq->hwq.max_elements);
2373 			return -EINVAL;
2374 		}
2375 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2376 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2377 		cqe++;
2378 		(*budget)--;
2379 		*pcqe = cqe;
2380 	} else {
2381 		rq = &qp->rq;
2382 		if (wr_id_idx >= rq->hwq.max_elements) {
2383 			dev_err(&cq->hwq.pdev->dev,
2384 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2385 				wr_id_idx, rq->hwq.max_elements);
2386 			return -EINVAL;
2387 		}
2388 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2389 		cqe++;
2390 		(*budget)--;
2391 		rq->hwq.cons++;
2392 		*pcqe = cqe;
2393 
2394 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2395 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2396 			/* Add qp to flush list of the CQ */
2397 			bnxt_qplib_add_flush_qp(qp);
2398 		}
2399 	}
2400 
2401 done:
2402 	return rc;
2403 }
2404 
2405 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2406 					struct cq_res_ud *hwcqe,
2407 					struct bnxt_qplib_cqe **pcqe,
2408 					int *budget)
2409 {
2410 	struct bnxt_qplib_qp *qp;
2411 	struct bnxt_qplib_q *rq;
2412 	struct bnxt_qplib_srq *srq;
2413 	struct bnxt_qplib_cqe *cqe;
2414 	u32 wr_id_idx;
2415 	int rc = 0;
2416 
2417 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2418 				      le64_to_cpu(hwcqe->qp_handle));
2419 	if (!qp) {
2420 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2421 		return -EINVAL;
2422 	}
2423 	if (qp->rq.flushed) {
2424 		dev_dbg(&cq->hwq.pdev->dev,
2425 			"%s: QP in Flush QP = %p\n", __func__, qp);
2426 		goto done;
2427 	}
2428 	cqe = *pcqe;
2429 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2430 	cqe->length = (u32)le16_to_cpu(hwcqe->length);
2431 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2432 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2433 	cqe->flags = le16_to_cpu(hwcqe->flags);
2434 	cqe->status = hwcqe->status;
2435 	cqe->qp_handle = (u64)(unsigned long)qp;
2436 	/*FIXME: Endianness fix needed for smace */
2437 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2438 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2439 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2440 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2441 				  ((le32_to_cpu(
2442 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2443 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2444 
2445 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2446 		srq = qp->srq;
2447 		if (!srq)
2448 			return -EINVAL;
2449 
2450 		if (wr_id_idx >= srq->hwq.max_elements) {
2451 			dev_err(&cq->hwq.pdev->dev,
2452 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2453 				wr_id_idx, srq->hwq.max_elements);
2454 			return -EINVAL;
2455 		}
2456 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2457 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2458 		cqe++;
2459 		(*budget)--;
2460 		*pcqe = cqe;
2461 	} else {
2462 		rq = &qp->rq;
2463 		if (wr_id_idx >= rq->hwq.max_elements) {
2464 			dev_err(&cq->hwq.pdev->dev,
2465 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2466 				wr_id_idx, rq->hwq.max_elements);
2467 			return -EINVAL;
2468 		}
2469 
2470 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2471 		cqe++;
2472 		(*budget)--;
2473 		rq->hwq.cons++;
2474 		*pcqe = cqe;
2475 
2476 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2477 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2478 			/* Add qp to flush list of the CQ */
2479 			bnxt_qplib_add_flush_qp(qp);
2480 		}
2481 	}
2482 done:
2483 	return rc;
2484 }
2485 
2486 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2487 {
2488 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2489 	u32 sw_cons, raw_cons;
2490 	bool rc = true;
2491 
2492 	raw_cons = cq->hwq.cons;
2493 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2494 	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2495 	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2496 
2497 	 /* Check for Valid bit. If the CQE is valid, return false */
2498 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2499 	return rc;
2500 }
2501 
2502 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2503 						struct cq_res_raweth_qp1 *hwcqe,
2504 						struct bnxt_qplib_cqe **pcqe,
2505 						int *budget)
2506 {
2507 	struct bnxt_qplib_qp *qp;
2508 	struct bnxt_qplib_q *rq;
2509 	struct bnxt_qplib_srq *srq;
2510 	struct bnxt_qplib_cqe *cqe;
2511 	u32 wr_id_idx;
2512 	int rc = 0;
2513 
2514 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2515 				      le64_to_cpu(hwcqe->qp_handle));
2516 	if (!qp) {
2517 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2518 		return -EINVAL;
2519 	}
2520 	if (qp->rq.flushed) {
2521 		dev_dbg(&cq->hwq.pdev->dev,
2522 			"%s: QP in Flush QP = %p\n", __func__, qp);
2523 		goto done;
2524 	}
2525 	cqe = *pcqe;
2526 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2527 	cqe->flags = le16_to_cpu(hwcqe->flags);
2528 	cqe->qp_handle = (u64)(unsigned long)qp;
2529 
2530 	wr_id_idx =
2531 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2532 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2533 	cqe->src_qp = qp->id;
2534 	if (qp->id == 1 && !cqe->length) {
2535 		/* Add workaround for the length misdetection */
2536 		cqe->length = 296;
2537 	} else {
2538 		cqe->length = le16_to_cpu(hwcqe->length);
2539 	}
2540 	cqe->pkey_index = qp->pkey_index;
2541 	memcpy(cqe->smac, qp->smac, 6);
2542 
2543 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2544 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2545 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2546 
2547 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2548 		srq = qp->srq;
2549 		if (!srq) {
2550 			dev_err(&cq->hwq.pdev->dev,
2551 				"FP: SRQ used but not defined??\n");
2552 			return -EINVAL;
2553 		}
2554 		if (wr_id_idx >= srq->hwq.max_elements) {
2555 			dev_err(&cq->hwq.pdev->dev,
2556 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2557 				wr_id_idx, srq->hwq.max_elements);
2558 			return -EINVAL;
2559 		}
2560 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2561 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2562 		cqe++;
2563 		(*budget)--;
2564 		*pcqe = cqe;
2565 	} else {
2566 		rq = &qp->rq;
2567 		if (wr_id_idx >= rq->hwq.max_elements) {
2568 			dev_err(&cq->hwq.pdev->dev,
2569 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2570 				wr_id_idx, rq->hwq.max_elements);
2571 			return -EINVAL;
2572 		}
2573 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2574 		cqe++;
2575 		(*budget)--;
2576 		rq->hwq.cons++;
2577 		*pcqe = cqe;
2578 
2579 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2580 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2581 			/* Add qp to flush list of the CQ */
2582 			bnxt_qplib_add_flush_qp(qp);
2583 		}
2584 	}
2585 
2586 done:
2587 	return rc;
2588 }
2589 
2590 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2591 					  struct cq_terminal *hwcqe,
2592 					  struct bnxt_qplib_cqe **pcqe,
2593 					  int *budget)
2594 {
2595 	struct bnxt_qplib_qp *qp;
2596 	struct bnxt_qplib_q *sq, *rq;
2597 	struct bnxt_qplib_cqe *cqe;
2598 	u32 sw_cons = 0, cqe_cons;
2599 	int rc = 0;
2600 
2601 	/* Check the Status */
2602 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2603 		dev_warn(&cq->hwq.pdev->dev,
2604 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2605 			 hwcqe->status);
2606 
2607 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2608 				      le64_to_cpu(hwcqe->qp_handle));
2609 	if (!qp) {
2610 		dev_err(&cq->hwq.pdev->dev,
2611 			"FP: CQ Process terminal qp is NULL\n");
2612 		return -EINVAL;
2613 	}
2614 
2615 	/* Must block new posting of SQ and RQ */
2616 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2617 
2618 	sq = &qp->sq;
2619 	rq = &qp->rq;
2620 
2621 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2622 	if (cqe_cons == 0xFFFF)
2623 		goto do_rq;
2624 
2625 	if (cqe_cons > sq->hwq.max_elements) {
2626 		dev_err(&cq->hwq.pdev->dev,
2627 			"FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2628 			cqe_cons, sq->hwq.max_elements);
2629 		goto do_rq;
2630 	}
2631 
2632 	if (qp->sq.flushed) {
2633 		dev_dbg(&cq->hwq.pdev->dev,
2634 			"%s: QP in Flush QP = %p\n", __func__, qp);
2635 		goto sq_done;
2636 	}
2637 
2638 	/* Terminal CQE can also include aggregated successful CQEs prior.
2639 	 * So we must complete all CQEs from the current sq's cons to the
2640 	 * cq_cons with status OK
2641 	 */
2642 	cqe = *pcqe;
2643 	while (*budget) {
2644 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2645 		if (sw_cons == cqe_cons)
2646 			break;
2647 		if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2648 			memset(cqe, 0, sizeof(*cqe));
2649 			cqe->status = CQ_REQ_STATUS_OK;
2650 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2651 			cqe->qp_handle = (u64)(unsigned long)qp;
2652 			cqe->src_qp = qp->id;
2653 			cqe->wr_id = sq->swq[sw_cons].wr_id;
2654 			cqe->type = sq->swq[sw_cons].type;
2655 			cqe++;
2656 			(*budget)--;
2657 		}
2658 		sq->hwq.cons++;
2659 	}
2660 	*pcqe = cqe;
2661 	if (!(*budget) && sw_cons != cqe_cons) {
2662 		/* Out of budget */
2663 		rc = -EAGAIN;
2664 		goto sq_done;
2665 	}
2666 sq_done:
2667 	if (rc)
2668 		return rc;
2669 do_rq:
2670 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2671 	if (cqe_cons == 0xFFFF) {
2672 		goto done;
2673 	} else if (cqe_cons > rq->hwq.max_elements) {
2674 		dev_err(&cq->hwq.pdev->dev,
2675 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2676 			cqe_cons, rq->hwq.max_elements);
2677 		goto done;
2678 	}
2679 
2680 	if (qp->rq.flushed) {
2681 		dev_dbg(&cq->hwq.pdev->dev,
2682 			"%s: QP in Flush QP = %p\n", __func__, qp);
2683 		rc = 0;
2684 		goto done;
2685 	}
2686 
2687 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2688 	 * from the current rq->cons to the rq->prod regardless what the
2689 	 * rq->cons the terminal CQE indicates
2690 	 */
2691 
2692 	/* Add qp to flush list of the CQ */
2693 	bnxt_qplib_add_flush_qp(qp);
2694 done:
2695 	return rc;
2696 }
2697 
2698 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2699 					struct cq_cutoff *hwcqe)
2700 {
2701 	/* Check the Status */
2702 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2703 		dev_err(&cq->hwq.pdev->dev,
2704 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2705 			hwcqe->status);
2706 		return -EINVAL;
2707 	}
2708 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2709 	wake_up_interruptible(&cq->waitq);
2710 
2711 	return 0;
2712 }
2713 
2714 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2715 				  struct bnxt_qplib_cqe *cqe,
2716 				  int num_cqes)
2717 {
2718 	struct bnxt_qplib_qp *qp = NULL;
2719 	u32 budget = num_cqes;
2720 	unsigned long flags;
2721 
2722 	spin_lock_irqsave(&cq->flush_lock, flags);
2723 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2724 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2725 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2726 	}
2727 
2728 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2729 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2730 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2731 	}
2732 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2733 
2734 	return num_cqes - budget;
2735 }
2736 
2737 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2738 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2739 {
2740 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2741 	u32 sw_cons, raw_cons;
2742 	int budget, rc = 0;
2743 
2744 	raw_cons = cq->hwq.cons;
2745 	budget = num_cqes;
2746 
2747 	while (budget) {
2748 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2749 		hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2750 		hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2751 
2752 		/* Check for Valid bit */
2753 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2754 			break;
2755 
2756 		/*
2757 		 * The valid test of the entry must be done first before
2758 		 * reading any further.
2759 		 */
2760 		dma_rmb();
2761 		/* From the device's respective CQE format to qplib_wc*/
2762 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2763 		case CQ_BASE_CQE_TYPE_REQ:
2764 			rc = bnxt_qplib_cq_process_req(cq,
2765 						       (struct cq_req *)hw_cqe,
2766 						       &cqe, &budget,
2767 						       sw_cons, lib_qp);
2768 			break;
2769 		case CQ_BASE_CQE_TYPE_RES_RC:
2770 			rc = bnxt_qplib_cq_process_res_rc(cq,
2771 							  (struct cq_res_rc *)
2772 							  hw_cqe, &cqe,
2773 							  &budget);
2774 			break;
2775 		case CQ_BASE_CQE_TYPE_RES_UD:
2776 			rc = bnxt_qplib_cq_process_res_ud
2777 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2778 					 &budget);
2779 			break;
2780 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2781 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2782 					(cq, (struct cq_res_raweth_qp1 *)
2783 					 hw_cqe, &cqe, &budget);
2784 			break;
2785 		case CQ_BASE_CQE_TYPE_TERMINAL:
2786 			rc = bnxt_qplib_cq_process_terminal
2787 					(cq, (struct cq_terminal *)hw_cqe,
2788 					 &cqe, &budget);
2789 			break;
2790 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2791 			bnxt_qplib_cq_process_cutoff
2792 					(cq, (struct cq_cutoff *)hw_cqe);
2793 			/* Done processing this CQ */
2794 			goto exit;
2795 		default:
2796 			dev_err(&cq->hwq.pdev->dev,
2797 				"process_cq unknown type 0x%lx\n",
2798 				hw_cqe->cqe_type_toggle &
2799 				CQ_BASE_CQE_TYPE_MASK);
2800 			rc = -EINVAL;
2801 			break;
2802 		}
2803 		if (rc < 0) {
2804 			if (rc == -EAGAIN)
2805 				break;
2806 			/* Error while processing the CQE, just skip to the
2807 			 * next one
2808 			 */
2809 			dev_err(&cq->hwq.pdev->dev,
2810 				"process_cqe error rc = 0x%x\n", rc);
2811 		}
2812 		raw_cons++;
2813 	}
2814 	if (cq->hwq.cons != raw_cons) {
2815 		cq->hwq.cons = raw_cons;
2816 		bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
2817 	}
2818 exit:
2819 	return num_cqes - budget;
2820 }
2821 
2822 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2823 {
2824 	if (arm_type)
2825 		bnxt_qplib_arm_cq(cq, arm_type);
2826 	/* Using cq->arm_state variable to track whether to issue cq handler */
2827 	atomic_set(&cq->arm_state, 1);
2828 }
2829 
2830 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2831 {
2832 	flush_workqueue(qp->scq->nq->cqn_wq);
2833 	if (qp->scq != qp->rcq)
2834 		flush_workqueue(qp->rcq->nq->cqn_wq);
2835 }
2836