xref: /openbmc/linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c (revision 9977a8c3497a8f7f7f951994f298a8e4d961234f)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/spinlock.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
45 
46 #include "roce_hsi.h"
47 
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
50 #include "qplib_sp.h"
51 #include "qplib_fp.h"
52 
53 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
54 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
56 
57 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
58 {
59 	qp->sq.condition = false;
60 	qp->sq.send_phantom = false;
61 	qp->sq.single = false;
62 }
63 
64 /* Flush list */
65 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
66 {
67 	struct bnxt_qplib_cq *scq, *rcq;
68 
69 	scq = qp->scq;
70 	rcq = qp->rcq;
71 
72 	if (!qp->sq.flushed) {
73 		dev_dbg(&scq->hwq.pdev->dev,
74 			"QPLIB: FP: Adding to SQ Flush list = %p",
75 			qp);
76 		bnxt_qplib_cancel_phantom_processing(qp);
77 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
78 		qp->sq.flushed = true;
79 	}
80 	if (!qp->srq) {
81 		if (!qp->rq.flushed) {
82 			dev_dbg(&rcq->hwq.pdev->dev,
83 				"QPLIB: FP: Adding to RQ Flush list = %p",
84 				qp);
85 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 			qp->rq.flushed = true;
87 		}
88 	}
89 }
90 
91 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
92 				 unsigned long *flags)
93 	__acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
94 {
95 	spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
96 	if (qp->scq == qp->rcq)
97 		__acquire(&qp->rcq->hwq.lock);
98 	else
99 		spin_lock(&qp->rcq->hwq.lock);
100 }
101 
102 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
103 				 unsigned long *flags)
104 	__releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
105 {
106 	if (qp->scq == qp->rcq)
107 		__release(&qp->rcq->hwq.lock);
108 	else
109 		spin_unlock(&qp->rcq->hwq.lock);
110 	spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
111 }
112 
113 static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
114 						      struct bnxt_qplib_cq *cq)
115 {
116 	struct bnxt_qplib_cq *buddy_cq = NULL;
117 
118 	if (qp->scq == qp->rcq)
119 		buddy_cq = NULL;
120 	else if (qp->scq == cq)
121 		buddy_cq = qp->rcq;
122 	else
123 		buddy_cq = qp->scq;
124 	return buddy_cq;
125 }
126 
127 static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
128 				     struct bnxt_qplib_cq *cq)
129 	__acquires(&buddy_cq->hwq.lock)
130 {
131 	struct bnxt_qplib_cq *buddy_cq = NULL;
132 
133 	buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
134 	if (!buddy_cq)
135 		__acquire(&cq->hwq.lock);
136 	else
137 		spin_lock(&buddy_cq->hwq.lock);
138 }
139 
140 static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
141 				       struct bnxt_qplib_cq *cq)
142 	__releases(&buddy_cq->hwq.lock)
143 {
144 	struct bnxt_qplib_cq *buddy_cq = NULL;
145 
146 	buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
147 	if (!buddy_cq)
148 		__release(&cq->hwq.lock);
149 	else
150 		spin_unlock(&buddy_cq->hwq.lock);
151 }
152 
153 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
154 {
155 	unsigned long flags;
156 
157 	bnxt_qplib_acquire_cq_locks(qp, &flags);
158 	__bnxt_qplib_add_flush_qp(qp);
159 	bnxt_qplib_release_cq_locks(qp, &flags);
160 }
161 
162 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
163 {
164 	if (qp->sq.flushed) {
165 		qp->sq.flushed = false;
166 		list_del(&qp->sq_flush);
167 	}
168 	if (!qp->srq) {
169 		if (qp->rq.flushed) {
170 			qp->rq.flushed = false;
171 			list_del(&qp->rq_flush);
172 		}
173 	}
174 }
175 
176 void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
177 {
178 	unsigned long flags;
179 
180 	bnxt_qplib_acquire_cq_locks(qp, &flags);
181 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
182 	qp->sq.hwq.prod = 0;
183 	qp->sq.hwq.cons = 0;
184 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
185 	qp->rq.hwq.prod = 0;
186 	qp->rq.hwq.cons = 0;
187 
188 	__bnxt_qplib_del_flush_qp(qp);
189 	bnxt_qplib_release_cq_locks(qp, &flags);
190 }
191 
192 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
193 {
194 	struct bnxt_qplib_nq_work *nq_work =
195 			container_of(work, struct bnxt_qplib_nq_work, work);
196 
197 	struct bnxt_qplib_cq *cq = nq_work->cq;
198 	struct bnxt_qplib_nq *nq = nq_work->nq;
199 
200 	if (cq && nq) {
201 		spin_lock_bh(&cq->compl_lock);
202 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
203 			dev_dbg(&nq->pdev->dev,
204 				"%s:Trigger cq  = %p event nq = %p\n",
205 				__func__, cq, nq);
206 			nq->cqn_handler(nq, cq);
207 		}
208 		spin_unlock_bh(&cq->compl_lock);
209 	}
210 	kfree(nq_work);
211 }
212 
213 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
214 				       struct bnxt_qplib_qp *qp)
215 {
216 	struct bnxt_qplib_q *rq = &qp->rq;
217 	struct bnxt_qplib_q *sq = &qp->sq;
218 
219 	if (qp->rq_hdr_buf)
220 		dma_free_coherent(&res->pdev->dev,
221 				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
222 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
223 	if (qp->sq_hdr_buf)
224 		dma_free_coherent(&res->pdev->dev,
225 				  sq->hwq.max_elements * qp->sq_hdr_buf_size,
226 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
227 	qp->rq_hdr_buf = NULL;
228 	qp->sq_hdr_buf = NULL;
229 	qp->rq_hdr_buf_map = 0;
230 	qp->sq_hdr_buf_map = 0;
231 	qp->sq_hdr_buf_size = 0;
232 	qp->rq_hdr_buf_size = 0;
233 }
234 
235 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
236 				       struct bnxt_qplib_qp *qp)
237 {
238 	struct bnxt_qplib_q *rq = &qp->rq;
239 	struct bnxt_qplib_q *sq = &qp->rq;
240 	int rc = 0;
241 
242 	if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
243 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
244 					sq->hwq.max_elements *
245 					qp->sq_hdr_buf_size,
246 					&qp->sq_hdr_buf_map, GFP_KERNEL);
247 		if (!qp->sq_hdr_buf) {
248 			rc = -ENOMEM;
249 			dev_err(&res->pdev->dev,
250 				"QPLIB: Failed to create sq_hdr_buf");
251 			goto fail;
252 		}
253 	}
254 
255 	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
256 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
257 						    rq->hwq.max_elements *
258 						    qp->rq_hdr_buf_size,
259 						    &qp->rq_hdr_buf_map,
260 						    GFP_KERNEL);
261 		if (!qp->rq_hdr_buf) {
262 			rc = -ENOMEM;
263 			dev_err(&res->pdev->dev,
264 				"QPLIB: Failed to create rq_hdr_buf");
265 			goto fail;
266 		}
267 	}
268 	return 0;
269 
270 fail:
271 	bnxt_qplib_free_qp_hdr_buf(res, qp);
272 	return rc;
273 }
274 
275 static void bnxt_qplib_service_nq(unsigned long data)
276 {
277 	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
278 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
279 	struct nq_base *nqe, **nq_ptr;
280 	struct bnxt_qplib_cq *cq;
281 	int num_cqne_processed = 0;
282 	int num_srqne_processed = 0;
283 	u32 sw_cons, raw_cons;
284 	u16 type;
285 	int budget = nq->budget;
286 	u64 q_handle;
287 
288 	/* Service the NQ until empty */
289 	raw_cons = hwq->cons;
290 	while (budget--) {
291 		sw_cons = HWQ_CMP(raw_cons, hwq);
292 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
293 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
294 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
295 			break;
296 
297 		/*
298 		 * The valid test of the entry must be done first before
299 		 * reading any further.
300 		 */
301 		dma_rmb();
302 
303 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
304 		switch (type) {
305 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
306 		{
307 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
308 
309 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
310 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
311 						     << 32;
312 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
313 			bnxt_qplib_arm_cq_enable(cq);
314 			spin_lock_bh(&cq->compl_lock);
315 			atomic_set(&cq->arm_state, 0);
316 			if (!nq->cqn_handler(nq, (cq)))
317 				num_cqne_processed++;
318 			else
319 				dev_warn(&nq->pdev->dev,
320 					 "QPLIB: cqn - type 0x%x not handled",
321 					 type);
322 			spin_unlock_bh(&cq->compl_lock);
323 			break;
324 		}
325 		case NQ_BASE_TYPE_SRQ_EVENT:
326 		{
327 			struct nq_srq_event *nqsrqe =
328 						(struct nq_srq_event *)nqe;
329 
330 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
331 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
332 				     << 32;
333 			bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
334 					   DBR_DBR_TYPE_SRQ_ARMENA);
335 			if (!nq->srqn_handler(nq,
336 					      (struct bnxt_qplib_srq *)q_handle,
337 					      nqsrqe->event))
338 				num_srqne_processed++;
339 			else
340 				dev_warn(&nq->pdev->dev,
341 					 "QPLIB: SRQ event 0x%x not handled",
342 					 nqsrqe->event);
343 			break;
344 		}
345 		case NQ_BASE_TYPE_DBQ_EVENT:
346 			break;
347 		default:
348 			dev_warn(&nq->pdev->dev,
349 				 "QPLIB: nqe with type = 0x%x not handled",
350 				 type);
351 			break;
352 		}
353 		raw_cons++;
354 	}
355 	if (hwq->cons != raw_cons) {
356 		hwq->cons = raw_cons;
357 		NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
358 	}
359 }
360 
361 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
362 {
363 	struct bnxt_qplib_nq *nq = dev_instance;
364 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
365 	struct nq_base **nq_ptr;
366 	u32 sw_cons;
367 
368 	/* Prefetch the NQ element */
369 	sw_cons = HWQ_CMP(hwq->cons, hwq);
370 	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
371 	prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
372 
373 	/* Fan out to CPU affinitized kthreads? */
374 	tasklet_schedule(&nq->worker);
375 
376 	return IRQ_HANDLED;
377 }
378 
379 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
380 {
381 	if (nq->cqn_wq) {
382 		destroy_workqueue(nq->cqn_wq);
383 		nq->cqn_wq = NULL;
384 	}
385 	/* Make sure the HW is stopped! */
386 	synchronize_irq(nq->vector);
387 	tasklet_disable(&nq->worker);
388 	tasklet_kill(&nq->worker);
389 
390 	if (nq->requested) {
391 		irq_set_affinity_hint(nq->vector, NULL);
392 		free_irq(nq->vector, nq);
393 		nq->requested = false;
394 	}
395 	if (nq->bar_reg_iomem)
396 		iounmap(nq->bar_reg_iomem);
397 	nq->bar_reg_iomem = NULL;
398 
399 	nq->cqn_handler = NULL;
400 	nq->srqn_handler = NULL;
401 	nq->vector = 0;
402 }
403 
404 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
405 			 int nq_idx, int msix_vector, int bar_reg_offset,
406 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
407 					    struct bnxt_qplib_cq *),
408 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
409 					     struct bnxt_qplib_srq *,
410 					     u8 event))
411 {
412 	resource_size_t nq_base;
413 	int rc = -1;
414 
415 	nq->pdev = pdev;
416 	nq->vector = msix_vector;
417 	if (cqn_handler)
418 		nq->cqn_handler = cqn_handler;
419 
420 	if (srqn_handler)
421 		nq->srqn_handler = srqn_handler;
422 
423 	tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
424 
425 	/* Have a task to schedule CQ notifiers in post send case */
426 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
427 	if (!nq->cqn_wq)
428 		goto fail;
429 
430 	nq->requested = false;
431 	memset(nq->name, 0, 32);
432 	sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
433 	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
434 	if (rc) {
435 		dev_err(&nq->pdev->dev,
436 			"Failed to request IRQ for NQ: %#x", rc);
437 		goto fail;
438 	}
439 
440 	cpumask_clear(&nq->mask);
441 	cpumask_set_cpu(nq_idx, &nq->mask);
442 	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
443 	if (rc) {
444 		dev_warn(&nq->pdev->dev,
445 			 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
446 			 nq->vector, nq_idx);
447 	}
448 
449 	nq->requested = true;
450 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
451 	nq->bar_reg_off = bar_reg_offset;
452 	nq_base = pci_resource_start(pdev, nq->bar_reg);
453 	if (!nq_base) {
454 		rc = -ENOMEM;
455 		goto fail;
456 	}
457 	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
458 	if (!nq->bar_reg_iomem) {
459 		rc = -ENOMEM;
460 		goto fail;
461 	}
462 	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
463 
464 	return 0;
465 fail:
466 	bnxt_qplib_disable_nq(nq);
467 	return rc;
468 }
469 
470 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
471 {
472 	if (nq->hwq.max_elements) {
473 		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
474 		nq->hwq.max_elements = 0;
475 	}
476 }
477 
478 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
479 {
480 	nq->pdev = pdev;
481 	if (!nq->hwq.max_elements ||
482 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
483 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
484 
485 	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
486 				      &nq->hwq.max_elements,
487 				      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
488 				      PAGE_SIZE, HWQ_TYPE_L2_CMPL))
489 		return -ENOMEM;
490 
491 	nq->budget = 8;
492 	return 0;
493 }
494 
495 /* SRQ */
496 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
497 {
498 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
499 	struct dbr_dbr db_msg = { 0 };
500 	void __iomem *db;
501 	u32 sw_prod = 0;
502 
503 	/* Ring DB */
504 	sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
505 		   HWQ_CMP(srq_hwq->prod, srq_hwq);
506 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
507 				   DBR_DBR_INDEX_MASK);
508 	db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
509 					DBR_DBR_XID_MASK) | arm_type);
510 	db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
511 		srq->dbr_base : srq->dpi->dbr;
512 	wmb(); /* barrier before db ring */
513 	__iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
514 }
515 
516 int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
517 			   struct bnxt_qplib_srq *srq)
518 {
519 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
520 	struct cmdq_destroy_srq req;
521 	struct creq_destroy_srq_resp resp;
522 	u16 cmd_flags = 0;
523 	int rc;
524 
525 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
526 
527 	/* Configure the request */
528 	req.srq_cid = cpu_to_le32(srq->id);
529 
530 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
531 					  (void *)&resp, NULL, 0);
532 	if (rc)
533 		return rc;
534 
535 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
536 	kfree(srq->swq);
537 	return 0;
538 }
539 
540 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
541 			  struct bnxt_qplib_srq *srq)
542 {
543 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
544 	struct cmdq_create_srq req;
545 	struct creq_create_srq_resp resp;
546 	struct bnxt_qplib_pbl *pbl;
547 	u16 cmd_flags = 0;
548 	int rc, idx;
549 
550 	srq->hwq.max_elements = srq->max_wqe;
551 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
552 				       srq->nmap, &srq->hwq.max_elements,
553 				       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
554 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
555 	if (rc)
556 		goto exit;
557 
558 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
559 			   GFP_KERNEL);
560 	if (!srq->swq) {
561 		rc = -ENOMEM;
562 		goto fail;
563 	}
564 
565 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
566 
567 	/* Configure the request */
568 	req.dpi = cpu_to_le32(srq->dpi->dpi);
569 	req.srq_handle = cpu_to_le64(srq);
570 
571 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
572 	pbl = &srq->hwq.pbl[PBL_LVL_0];
573 	req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
574 				      CMDQ_CREATE_SRQ_LVL_MASK) <<
575 				      CMDQ_CREATE_SRQ_LVL_SFT) |
576 				      (pbl->pg_size == ROCE_PG_SIZE_4K ?
577 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
578 				       pbl->pg_size == ROCE_PG_SIZE_8K ?
579 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
580 				       pbl->pg_size == ROCE_PG_SIZE_64K ?
581 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
582 				       pbl->pg_size == ROCE_PG_SIZE_2M ?
583 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
584 				       pbl->pg_size == ROCE_PG_SIZE_8M ?
585 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
586 				       pbl->pg_size == ROCE_PG_SIZE_1G ?
587 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
588 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
589 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
590 	req.pd_id = cpu_to_le32(srq->pd->id);
591 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
592 
593 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
594 					  (void *)&resp, NULL, 0);
595 	if (rc)
596 		goto fail;
597 
598 	spin_lock_init(&srq->lock);
599 	srq->start_idx = 0;
600 	srq->last_idx = srq->hwq.max_elements - 1;
601 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
602 		srq->swq[idx].next_idx = idx + 1;
603 	srq->swq[srq->last_idx].next_idx = -1;
604 
605 	srq->id = le32_to_cpu(resp.xid);
606 	srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
607 	if (srq->threshold)
608 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
609 	srq->arm_req = false;
610 
611 	return 0;
612 fail:
613 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
614 	kfree(srq->swq);
615 exit:
616 	return rc;
617 }
618 
619 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
620 			  struct bnxt_qplib_srq *srq)
621 {
622 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
623 	u32 sw_prod, sw_cons, count = 0;
624 
625 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
626 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
627 
628 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
629 				    srq_hwq->max_elements - sw_cons + sw_prod;
630 	if (count > srq->threshold) {
631 		srq->arm_req = false;
632 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
633 	} else {
634 		/* Deferred arming */
635 		srq->arm_req = true;
636 	}
637 
638 	return 0;
639 }
640 
641 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
642 			 struct bnxt_qplib_srq *srq)
643 {
644 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
645 	struct cmdq_query_srq req;
646 	struct creq_query_srq_resp resp;
647 	struct bnxt_qplib_rcfw_sbuf *sbuf;
648 	struct creq_query_srq_resp_sb *sb;
649 	u16 cmd_flags = 0;
650 	int rc = 0;
651 
652 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
653 	req.srq_cid = cpu_to_le32(srq->id);
654 
655 	/* Configure the request */
656 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
657 	if (!sbuf)
658 		return -ENOMEM;
659 	sb = sbuf->sb;
660 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
661 					  (void *)sbuf, 0);
662 	srq->threshold = le16_to_cpu(sb->srq_limit);
663 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
664 
665 	return rc;
666 }
667 
668 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
669 			     struct bnxt_qplib_swqe *wqe)
670 {
671 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
672 	struct rq_wqe *srqe, **srqe_ptr;
673 	struct sq_sge *hw_sge;
674 	u32 sw_prod, sw_cons, count = 0;
675 	int i, rc = 0, next;
676 
677 	spin_lock(&srq_hwq->lock);
678 	if (srq->start_idx == srq->last_idx) {
679 		dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
680 			srq->id);
681 		rc = -EINVAL;
682 		spin_unlock(&srq_hwq->lock);
683 		goto done;
684 	}
685 	next = srq->start_idx;
686 	srq->start_idx = srq->swq[next].next_idx;
687 	spin_unlock(&srq_hwq->lock);
688 
689 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
690 	srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
691 	srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
692 	memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
693 	/* Calculate wqe_size16 and data_len */
694 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
695 	     i < wqe->num_sge; i++, hw_sge++) {
696 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
697 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
698 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
699 	}
700 	srqe->wqe_type = wqe->type;
701 	srqe->flags = wqe->flags;
702 	srqe->wqe_size = wqe->num_sge +
703 			((offsetof(typeof(*srqe), data) + 15) >> 4);
704 	srqe->wr_id[0] = cpu_to_le32((u32)next);
705 	srq->swq[next].wr_id = wqe->wr_id;
706 
707 	srq_hwq->prod++;
708 
709 	spin_lock(&srq_hwq->lock);
710 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
711 	/* retaining srq_hwq->cons for this logic
712 	 * actually the lock is only required to
713 	 * read srq_hwq->cons.
714 	 */
715 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
716 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
717 				    srq_hwq->max_elements - sw_cons + sw_prod;
718 	spin_unlock(&srq_hwq->lock);
719 	/* Ring DB */
720 	bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
721 	if (srq->arm_req == true && count > srq->threshold) {
722 		srq->arm_req = false;
723 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
724 	}
725 done:
726 	return rc;
727 }
728 
729 /* QP */
730 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
731 {
732 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
733 	struct cmdq_create_qp1 req;
734 	struct creq_create_qp1_resp resp;
735 	struct bnxt_qplib_pbl *pbl;
736 	struct bnxt_qplib_q *sq = &qp->sq;
737 	struct bnxt_qplib_q *rq = &qp->rq;
738 	int rc;
739 	u16 cmd_flags = 0;
740 	u32 qp_flags = 0;
741 
742 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
743 
744 	/* General */
745 	req.type = qp->type;
746 	req.dpi = cpu_to_le32(qp->dpi->dpi);
747 	req.qp_handle = cpu_to_le64(qp->qp_handle);
748 
749 	/* SQ */
750 	sq->hwq.max_elements = sq->max_wqe;
751 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
752 				       &sq->hwq.max_elements,
753 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
754 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
755 	if (rc)
756 		goto exit;
757 
758 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
759 	if (!sq->swq) {
760 		rc = -ENOMEM;
761 		goto fail_sq;
762 	}
763 	pbl = &sq->hwq.pbl[PBL_LVL_0];
764 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
765 	req.sq_pg_size_sq_lvl =
766 		((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
767 				<<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
768 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
769 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
770 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
771 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
772 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
773 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
774 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
775 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
776 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
777 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
778 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
779 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
780 		 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
781 
782 	if (qp->scq)
783 		req.scq_cid = cpu_to_le32(qp->scq->id);
784 
785 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
786 
787 	/* RQ */
788 	if (rq->max_wqe) {
789 		rq->hwq.max_elements = qp->rq.max_wqe;
790 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
791 					       &rq->hwq.max_elements,
792 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
793 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
794 		if (rc)
795 			goto fail_sq;
796 
797 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
798 				  GFP_KERNEL);
799 		if (!rq->swq) {
800 			rc = -ENOMEM;
801 			goto fail_rq;
802 		}
803 		pbl = &rq->hwq.pbl[PBL_LVL_0];
804 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
805 		req.rq_pg_size_rq_lvl =
806 			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
807 			 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
808 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
809 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
810 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
811 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
812 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
813 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
814 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
815 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
816 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
817 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
818 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
819 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
820 				 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
821 		if (qp->rcq)
822 			req.rcq_cid = cpu_to_le32(qp->rcq->id);
823 	}
824 
825 	/* Header buffer - allow hdr_buf pass in */
826 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
827 	if (rc) {
828 		rc = -ENOMEM;
829 		goto fail;
830 	}
831 	req.qp_flags = cpu_to_le32(qp_flags);
832 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
833 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
834 
835 	req.sq_fwo_sq_sge =
836 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
837 			    CMDQ_CREATE_QP1_SQ_SGE_SFT);
838 	req.rq_fwo_rq_sge =
839 		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
840 			    CMDQ_CREATE_QP1_RQ_SGE_SFT);
841 
842 	req.pd_id = cpu_to_le32(qp->pd->id);
843 
844 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
845 					  (void *)&resp, NULL, 0);
846 	if (rc)
847 		goto fail;
848 
849 	qp->id = le32_to_cpu(resp.xid);
850 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
851 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
852 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
853 
854 	return 0;
855 
856 fail:
857 	bnxt_qplib_free_qp_hdr_buf(res, qp);
858 fail_rq:
859 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
860 	kfree(rq->swq);
861 fail_sq:
862 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
863 	kfree(sq->swq);
864 exit:
865 	return rc;
866 }
867 
868 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
869 {
870 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
871 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
872 	struct cmdq_create_qp req;
873 	struct creq_create_qp_resp resp;
874 	struct bnxt_qplib_pbl *pbl;
875 	struct sq_psn_search **psn_search_ptr;
876 	unsigned long int psn_search, poff = 0;
877 	struct bnxt_qplib_q *sq = &qp->sq;
878 	struct bnxt_qplib_q *rq = &qp->rq;
879 	struct bnxt_qplib_hwq *xrrq;
880 	int i, rc, req_size, psn_sz;
881 	u16 cmd_flags = 0, max_ssge;
882 	u32 sw_prod, qp_flags = 0;
883 
884 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
885 
886 	/* General */
887 	req.type = qp->type;
888 	req.dpi = cpu_to_le32(qp->dpi->dpi);
889 	req.qp_handle = cpu_to_le64(qp->qp_handle);
890 
891 	/* SQ */
892 	psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
893 		 sizeof(struct sq_psn_search) : 0;
894 	sq->hwq.max_elements = sq->max_wqe;
895 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
896 				       sq->nmap, &sq->hwq.max_elements,
897 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
898 				       psn_sz,
899 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
900 	if (rc)
901 		goto exit;
902 
903 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
904 	if (!sq->swq) {
905 		rc = -ENOMEM;
906 		goto fail_sq;
907 	}
908 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
909 	if (psn_sz) {
910 		psn_search_ptr = (struct sq_psn_search **)
911 				  &hw_sq_send_ptr[get_sqe_pg
912 					(sq->hwq.max_elements)];
913 		psn_search = (unsigned long int)
914 			      &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
915 			      [get_sqe_idx(sq->hwq.max_elements)];
916 		if (psn_search & ~PAGE_MASK) {
917 			/* If the psn_search does not start on a page boundary,
918 			 * then calculate the offset
919 			 */
920 			poff = (psn_search & ~PAGE_MASK) /
921 				BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
922 		}
923 		for (i = 0; i < sq->hwq.max_elements; i++)
924 			sq->swq[i].psn_search =
925 				&psn_search_ptr[get_psne_pg(i + poff)]
926 					       [get_psne_idx(i + poff)];
927 	}
928 	pbl = &sq->hwq.pbl[PBL_LVL_0];
929 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
930 	req.sq_pg_size_sq_lvl =
931 		((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
932 				 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
933 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
934 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
935 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
936 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
937 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
938 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
939 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
940 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
941 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
942 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
943 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
944 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
945 		 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
946 
947 	/* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
948 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
949 	for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
950 		hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
951 						[get_sqe_idx(sw_prod)];
952 		hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
953 	}
954 
955 	if (qp->scq)
956 		req.scq_cid = cpu_to_le32(qp->scq->id);
957 
958 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
959 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
960 	if (qp->sig_type)
961 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
962 
963 	/* RQ */
964 	if (rq->max_wqe) {
965 		rq->hwq.max_elements = rq->max_wqe;
966 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
967 					       rq->nmap, &rq->hwq.max_elements,
968 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
969 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
970 		if (rc)
971 			goto fail_sq;
972 
973 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
974 				  GFP_KERNEL);
975 		if (!rq->swq) {
976 			rc = -ENOMEM;
977 			goto fail_rq;
978 		}
979 		pbl = &rq->hwq.pbl[PBL_LVL_0];
980 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
981 		req.rq_pg_size_rq_lvl =
982 			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
983 			 CMDQ_CREATE_QP_RQ_LVL_SFT) |
984 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
985 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
986 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
987 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
988 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
989 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
990 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
991 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
992 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
993 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
994 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
995 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
996 				 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
997 	} else {
998 		/* SRQ */
999 		if (qp->srq) {
1000 			qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1001 			req.srq_cid = cpu_to_le32(qp->srq->id);
1002 		}
1003 	}
1004 
1005 	if (qp->rcq)
1006 		req.rcq_cid = cpu_to_le32(qp->rcq->id);
1007 	req.qp_flags = cpu_to_le32(qp_flags);
1008 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
1009 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
1010 	qp->sq_hdr_buf = NULL;
1011 	qp->rq_hdr_buf = NULL;
1012 
1013 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1014 	if (rc)
1015 		goto fail_rq;
1016 
1017 	/* CTRL-22434: Irrespective of the requested SGE count on the SQ
1018 	 * always create the QP with max send sges possible if the requested
1019 	 * inline size is greater than 0.
1020 	 */
1021 	max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1022 	req.sq_fwo_sq_sge = cpu_to_le16(
1023 				((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1024 				 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1025 	req.rq_fwo_rq_sge = cpu_to_le16(
1026 				((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1027 				 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1028 	/* ORRQ and IRRQ */
1029 	if (psn_sz) {
1030 		xrrq = &qp->orrq;
1031 		xrrq->max_elements =
1032 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1033 		req_size = xrrq->max_elements *
1034 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1035 		req_size &= ~(PAGE_SIZE - 1);
1036 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1037 					       &xrrq->max_elements,
1038 					       BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1039 					       0, req_size, HWQ_TYPE_CTX);
1040 		if (rc)
1041 			goto fail_buf_free;
1042 		pbl = &xrrq->pbl[PBL_LVL_0];
1043 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1044 
1045 		xrrq = &qp->irrq;
1046 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1047 						qp->max_dest_rd_atomic);
1048 		req_size = xrrq->max_elements *
1049 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1050 		req_size &= ~(PAGE_SIZE - 1);
1051 
1052 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1053 					       &xrrq->max_elements,
1054 					       BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1055 					       0, req_size, HWQ_TYPE_CTX);
1056 		if (rc)
1057 			goto fail_orrq;
1058 
1059 		pbl = &xrrq->pbl[PBL_LVL_0];
1060 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1061 	}
1062 	req.pd_id = cpu_to_le32(qp->pd->id);
1063 
1064 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1065 					  (void *)&resp, NULL, 0);
1066 	if (rc)
1067 		goto fail;
1068 
1069 	qp->id = le32_to_cpu(resp.xid);
1070 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1071 	INIT_LIST_HEAD(&qp->sq_flush);
1072 	INIT_LIST_HEAD(&qp->rq_flush);
1073 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
1074 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1075 
1076 	return 0;
1077 
1078 fail:
1079 	if (qp->irrq.max_elements)
1080 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1081 fail_orrq:
1082 	if (qp->orrq.max_elements)
1083 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1084 fail_buf_free:
1085 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1086 fail_rq:
1087 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1088 	kfree(rq->swq);
1089 fail_sq:
1090 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1091 	kfree(sq->swq);
1092 exit:
1093 	return rc;
1094 }
1095 
1096 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1097 {
1098 	switch (qp->state) {
1099 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1100 		/* INIT->RTR, configure the path_mtu to the default
1101 		 * 2048 if not being requested
1102 		 */
1103 		if (!(qp->modify_flags &
1104 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1105 			qp->modify_flags |=
1106 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1107 			qp->path_mtu =
1108 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1109 		}
1110 		qp->modify_flags &=
1111 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1112 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1113 		if (qp->max_dest_rd_atomic < 1)
1114 			qp->max_dest_rd_atomic = 1;
1115 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1116 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1117 		if (!(qp->modify_flags &
1118 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1119 			qp->modify_flags |=
1120 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1121 			qp->ah.sgid_index = 0;
1122 		}
1123 		break;
1124 	default:
1125 		break;
1126 	}
1127 }
1128 
1129 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1130 {
1131 	switch (qp->state) {
1132 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1133 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1134 		if (qp->max_rd_atomic < 1)
1135 			qp->max_rd_atomic = 1;
1136 		/* Bono FW does not allow PKEY_INDEX,
1137 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1138 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1139 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1140 		 * modification
1141 		 */
1142 		qp->modify_flags &=
1143 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1144 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1145 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1146 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1147 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1148 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1149 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1150 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1151 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1152 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1153 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1154 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1155 		break;
1156 	default:
1157 		break;
1158 	}
1159 }
1160 
1161 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1162 {
1163 	switch (qp->cur_qp_state) {
1164 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1165 		break;
1166 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1167 		__modify_flags_from_init_state(qp);
1168 		break;
1169 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1170 		__modify_flags_from_rtr_state(qp);
1171 		break;
1172 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1173 		break;
1174 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1175 		break;
1176 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1177 		break;
1178 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1179 		break;
1180 	default:
1181 		break;
1182 	}
1183 }
1184 
1185 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1186 {
1187 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1188 	struct cmdq_modify_qp req;
1189 	struct creq_modify_qp_resp resp;
1190 	u16 cmd_flags = 0, pkey;
1191 	u32 temp32[4];
1192 	u32 bmask;
1193 	int rc;
1194 
1195 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1196 
1197 	/* Filter out the qp_attr_mask based on the state->new transition */
1198 	__filter_modify_flags(qp);
1199 	bmask = qp->modify_flags;
1200 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1201 	req.qp_cid = cpu_to_le32(qp->id);
1202 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1203 		req.network_type_en_sqd_async_notify_new_state =
1204 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1205 				(qp->en_sqd_async_notify ?
1206 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1207 	}
1208 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1209 
1210 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1211 		req.access = qp->access;
1212 
1213 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1214 		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1215 					 qp->pkey_index, &pkey))
1216 			req.pkey = cpu_to_le16(pkey);
1217 	}
1218 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1219 		req.qkey = cpu_to_le32(qp->qkey);
1220 
1221 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1222 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1223 		req.dgid[0] = cpu_to_le32(temp32[0]);
1224 		req.dgid[1] = cpu_to_le32(temp32[1]);
1225 		req.dgid[2] = cpu_to_le32(temp32[2]);
1226 		req.dgid[3] = cpu_to_le32(temp32[3]);
1227 	}
1228 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1229 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1230 
1231 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1232 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1233 					     [qp->ah.sgid_index]);
1234 
1235 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1236 		req.hop_limit = qp->ah.hop_limit;
1237 
1238 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1239 		req.traffic_class = qp->ah.traffic_class;
1240 
1241 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1242 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1243 
1244 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1245 		req.path_mtu = qp->path_mtu;
1246 
1247 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1248 		req.timeout = qp->timeout;
1249 
1250 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1251 		req.retry_cnt = qp->retry_cnt;
1252 
1253 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1254 		req.rnr_retry = qp->rnr_retry;
1255 
1256 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1257 		req.min_rnr_timer = qp->min_rnr_timer;
1258 
1259 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1260 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1261 
1262 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1263 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1264 
1265 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1266 		req.max_rd_atomic =
1267 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1268 
1269 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1270 		req.max_dest_rd_atomic =
1271 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1272 
1273 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1274 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1275 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1276 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1277 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1278 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1279 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1280 
1281 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1282 
1283 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1284 					  (void *)&resp, NULL, 0);
1285 	if (rc)
1286 		return rc;
1287 	qp->cur_qp_state = qp->state;
1288 	return 0;
1289 }
1290 
1291 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1292 {
1293 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1294 	struct cmdq_query_qp req;
1295 	struct creq_query_qp_resp resp;
1296 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1297 	struct creq_query_qp_resp_sb *sb;
1298 	u16 cmd_flags = 0;
1299 	u32 temp32[4];
1300 	int i, rc = 0;
1301 
1302 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1303 
1304 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1305 	if (!sbuf)
1306 		return -ENOMEM;
1307 	sb = sbuf->sb;
1308 
1309 	req.qp_cid = cpu_to_le32(qp->id);
1310 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1311 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1312 					  (void *)sbuf, 0);
1313 	if (rc)
1314 		goto bail;
1315 	/* Extract the context from the side buffer */
1316 	qp->state = sb->en_sqd_async_notify_state &
1317 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1318 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1319 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1320 				  true : false;
1321 	qp->access = sb->access;
1322 	qp->pkey_index = le16_to_cpu(sb->pkey);
1323 	qp->qkey = le32_to_cpu(sb->qkey);
1324 
1325 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1326 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1327 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1328 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1329 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1330 
1331 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1332 
1333 	qp->ah.sgid_index = 0;
1334 	for (i = 0; i < res->sgid_tbl.max; i++) {
1335 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1336 			qp->ah.sgid_index = i;
1337 			break;
1338 		}
1339 	}
1340 	if (i == res->sgid_tbl.max)
1341 		dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1342 
1343 	qp->ah.hop_limit = sb->hop_limit;
1344 	qp->ah.traffic_class = sb->traffic_class;
1345 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1346 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1347 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1348 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1349 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1350 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1351 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1352 	qp->timeout = sb->timeout;
1353 	qp->retry_cnt = sb->retry_cnt;
1354 	qp->rnr_retry = sb->rnr_retry;
1355 	qp->min_rnr_timer = sb->min_rnr_timer;
1356 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1357 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1358 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1359 	qp->max_dest_rd_atomic =
1360 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1361 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1362 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1363 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1364 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1365 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1366 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1367 	memcpy(qp->smac, sb->src_mac, 6);
1368 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1369 bail:
1370 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1371 	return rc;
1372 }
1373 
1374 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1375 {
1376 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1377 	struct cq_base *hw_cqe, **hw_cqe_ptr;
1378 	int i;
1379 
1380 	for (i = 0; i < cq_hwq->max_elements; i++) {
1381 		hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1382 		hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1383 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1384 			continue;
1385 		/*
1386 		 * The valid test of the entry must be done first before
1387 		 * reading any further.
1388 		 */
1389 		dma_rmb();
1390 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1391 		case CQ_BASE_CQE_TYPE_REQ:
1392 		case CQ_BASE_CQE_TYPE_TERMINAL:
1393 		{
1394 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1395 
1396 			if (qp == le64_to_cpu(cqe->qp_handle))
1397 				cqe->qp_handle = 0;
1398 			break;
1399 		}
1400 		case CQ_BASE_CQE_TYPE_RES_RC:
1401 		case CQ_BASE_CQE_TYPE_RES_UD:
1402 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1403 		{
1404 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1405 
1406 			if (qp == le64_to_cpu(cqe->qp_handle))
1407 				cqe->qp_handle = 0;
1408 			break;
1409 		}
1410 		default:
1411 			break;
1412 		}
1413 	}
1414 }
1415 
1416 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1417 			  struct bnxt_qplib_qp *qp)
1418 {
1419 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1420 	struct cmdq_destroy_qp req;
1421 	struct creq_destroy_qp_resp resp;
1422 	unsigned long flags;
1423 	u16 cmd_flags = 0;
1424 	int rc;
1425 
1426 	rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1427 	rcfw->qp_tbl[qp->id].qp_handle = NULL;
1428 
1429 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1430 
1431 	req.qp_cid = cpu_to_le32(qp->id);
1432 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1433 					  (void *)&resp, NULL, 0);
1434 	if (rc) {
1435 		rcfw->qp_tbl[qp->id].qp_id = qp->id;
1436 		rcfw->qp_tbl[qp->id].qp_handle = qp;
1437 		return rc;
1438 	}
1439 
1440 	/* Must walk the associated CQs to nullified the QP ptr */
1441 	spin_lock_irqsave(&qp->scq->hwq.lock, flags);
1442 
1443 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
1444 
1445 	if (qp->rcq && qp->rcq != qp->scq) {
1446 		spin_lock(&qp->rcq->hwq.lock);
1447 		__clean_cq(qp->rcq, (u64)(unsigned long)qp);
1448 		spin_unlock(&qp->rcq->hwq.lock);
1449 	}
1450 
1451 	spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
1452 
1453 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1454 	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1455 	kfree(qp->sq.swq);
1456 
1457 	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1458 	kfree(qp->rq.swq);
1459 
1460 	if (qp->irrq.max_elements)
1461 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1462 	if (qp->orrq.max_elements)
1463 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1464 
1465 	return 0;
1466 }
1467 
1468 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1469 				struct bnxt_qplib_sge *sge)
1470 {
1471 	struct bnxt_qplib_q *sq = &qp->sq;
1472 	u32 sw_prod;
1473 
1474 	memset(sge, 0, sizeof(*sge));
1475 
1476 	if (qp->sq_hdr_buf) {
1477 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1478 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1479 					 sw_prod * qp->sq_hdr_buf_size);
1480 		sge->lkey = 0xFFFFFFFF;
1481 		sge->size = qp->sq_hdr_buf_size;
1482 		return qp->sq_hdr_buf + sw_prod * sge->size;
1483 	}
1484 	return NULL;
1485 }
1486 
1487 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1488 {
1489 	struct bnxt_qplib_q *rq = &qp->rq;
1490 
1491 	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1492 }
1493 
1494 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1495 {
1496 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1497 }
1498 
1499 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1500 				struct bnxt_qplib_sge *sge)
1501 {
1502 	struct bnxt_qplib_q *rq = &qp->rq;
1503 	u32 sw_prod;
1504 
1505 	memset(sge, 0, sizeof(*sge));
1506 
1507 	if (qp->rq_hdr_buf) {
1508 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1509 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1510 					 sw_prod * qp->rq_hdr_buf_size);
1511 		sge->lkey = 0xFFFFFFFF;
1512 		sge->size = qp->rq_hdr_buf_size;
1513 		return qp->rq_hdr_buf + sw_prod * sge->size;
1514 	}
1515 	return NULL;
1516 }
1517 
1518 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1519 {
1520 	struct bnxt_qplib_q *sq = &qp->sq;
1521 	struct dbr_dbr db_msg = { 0 };
1522 	u32 sw_prod;
1523 
1524 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1525 
1526 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1527 				   DBR_DBR_INDEX_MASK);
1528 	db_msg.type_xid =
1529 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1530 			    DBR_DBR_TYPE_SQ);
1531 	/* Flush all the WQE writes to HW */
1532 	wmb();
1533 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1534 }
1535 
1536 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1537 			 struct bnxt_qplib_swqe *wqe)
1538 {
1539 	struct bnxt_qplib_q *sq = &qp->sq;
1540 	struct bnxt_qplib_swq *swq;
1541 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1542 	struct sq_sge *hw_sge;
1543 	struct bnxt_qplib_nq_work *nq_work = NULL;
1544 	bool sch_handler = false;
1545 	u32 sw_prod;
1546 	u8 wqe_size16;
1547 	int i, rc = 0, data_len = 0, pkt_num = 0;
1548 	__le32 temp32;
1549 
1550 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1551 		if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1552 			sch_handler = true;
1553 			dev_dbg(&sq->hwq.pdev->dev,
1554 				"%s Error QP. Scheduling for poll_cq\n",
1555 				__func__);
1556 			goto queue_err;
1557 		}
1558 	}
1559 
1560 	if (bnxt_qplib_queue_full(sq)) {
1561 		dev_err(&sq->hwq.pdev->dev,
1562 			"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1563 			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1564 			sq->q_full_delta);
1565 		rc = -ENOMEM;
1566 		goto done;
1567 	}
1568 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1569 	swq = &sq->swq[sw_prod];
1570 	swq->wr_id = wqe->wr_id;
1571 	swq->type = wqe->type;
1572 	swq->flags = wqe->flags;
1573 	if (qp->sig_type)
1574 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1575 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1576 
1577 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1578 	hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1579 					[get_sqe_idx(sw_prod)];
1580 
1581 	memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1582 
1583 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1584 		/* Copy the inline data */
1585 		if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1586 			dev_warn(&sq->hwq.pdev->dev,
1587 				 "QPLIB: Inline data length > 96 detected");
1588 			data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1589 		} else {
1590 			data_len = wqe->inline_len;
1591 		}
1592 		memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1593 		wqe_size16 = (data_len + 15) >> 4;
1594 	} else {
1595 		for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1596 		     i < wqe->num_sge; i++, hw_sge++) {
1597 			hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1598 			hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1599 			hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1600 			data_len += wqe->sg_list[i].size;
1601 		}
1602 		/* Each SGE entry = 1 WQE size16 */
1603 		wqe_size16 = wqe->num_sge;
1604 		/* HW requires wqe size has room for atleast one SGE even if
1605 		 * none was supplied by ULP
1606 		 */
1607 		if (!wqe->num_sge)
1608 			wqe_size16++;
1609 	}
1610 
1611 	/* Specifics */
1612 	switch (wqe->type) {
1613 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1614 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1615 			/* Assemble info for Raw Ethertype QPs */
1616 			struct sq_send_raweth_qp1 *sqe =
1617 				(struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1618 
1619 			sqe->wqe_type = wqe->type;
1620 			sqe->flags = wqe->flags;
1621 			sqe->wqe_size = wqe_size16 +
1622 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1623 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1624 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1625 			sqe->length = cpu_to_le32(data_len);
1626 			sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1627 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1628 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1629 
1630 			break;
1631 		}
1632 		/* fall thru */
1633 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1634 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1635 	{
1636 		struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1637 
1638 		sqe->wqe_type = wqe->type;
1639 		sqe->flags = wqe->flags;
1640 		sqe->wqe_size = wqe_size16 +
1641 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1642 		sqe->inv_key_or_imm_data = cpu_to_le32(
1643 						wqe->send.inv_key);
1644 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1645 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1646 			sqe->dst_qp = cpu_to_le32(
1647 					wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1648 			sqe->length = cpu_to_le32(data_len);
1649 			sqe->avid = cpu_to_le32(wqe->send.avid &
1650 						SQ_SEND_AVID_MASK);
1651 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1652 		} else {
1653 			sqe->length = cpu_to_le32(data_len);
1654 			sqe->dst_qp = 0;
1655 			sqe->avid = 0;
1656 			if (qp->mtu)
1657 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1658 			if (!pkt_num)
1659 				pkt_num = 1;
1660 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1661 		}
1662 		break;
1663 	}
1664 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1665 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1666 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1667 	{
1668 		struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1669 
1670 		sqe->wqe_type = wqe->type;
1671 		sqe->flags = wqe->flags;
1672 		sqe->wqe_size = wqe_size16 +
1673 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1674 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1675 		sqe->length = cpu_to_le32((u32)data_len);
1676 		sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1677 		sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1678 		if (qp->mtu)
1679 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1680 		if (!pkt_num)
1681 			pkt_num = 1;
1682 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1683 		break;
1684 	}
1685 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1686 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1687 	{
1688 		struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1689 
1690 		sqe->wqe_type = wqe->type;
1691 		sqe->flags = wqe->flags;
1692 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1693 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1694 		sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1695 		sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1696 		if (qp->mtu)
1697 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1698 		if (!pkt_num)
1699 			pkt_num = 1;
1700 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1701 		break;
1702 	}
1703 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1704 	{
1705 		struct sq_localinvalidate *sqe =
1706 				(struct sq_localinvalidate *)hw_sq_send_hdr;
1707 
1708 		sqe->wqe_type = wqe->type;
1709 		sqe->flags = wqe->flags;
1710 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1711 
1712 		break;
1713 	}
1714 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1715 	{
1716 		struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1717 
1718 		sqe->wqe_type = wqe->type;
1719 		sqe->flags = wqe->flags;
1720 		sqe->access_cntl = wqe->frmr.access_cntl |
1721 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1722 		sqe->zero_based_page_size_log =
1723 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1724 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1725 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1726 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1727 		temp32 = cpu_to_le32(wqe->frmr.length);
1728 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1729 		sqe->numlevels_pbl_page_size_log =
1730 			((wqe->frmr.pbl_pg_sz_log <<
1731 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1732 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1733 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1734 					SQ_FR_PMR_NUMLEVELS_MASK);
1735 
1736 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1737 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1738 						wqe->frmr.page_list[i] |
1739 						PTU_PTE_VALID);
1740 		sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1741 		sqe->va = cpu_to_le64(wqe->frmr.va);
1742 
1743 		break;
1744 	}
1745 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1746 	{
1747 		struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1748 
1749 		sqe->wqe_type = wqe->type;
1750 		sqe->flags = wqe->flags;
1751 		sqe->access_cntl = wqe->bind.access_cntl;
1752 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1753 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1754 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1755 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1756 		sqe->va = cpu_to_le64(wqe->bind.va);
1757 		temp32 = cpu_to_le32(wqe->bind.length);
1758 		memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1759 		break;
1760 	}
1761 	default:
1762 		/* Bad wqe, return error */
1763 		rc = -EINVAL;
1764 		goto done;
1765 	}
1766 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1767 	if (swq->psn_search) {
1768 		swq->psn_search->opcode_start_psn = cpu_to_le32(
1769 			((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1770 			 SQ_PSN_SEARCH_START_PSN_MASK) |
1771 			((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1772 			 SQ_PSN_SEARCH_OPCODE_MASK));
1773 		swq->psn_search->flags_next_psn = cpu_to_le32(
1774 			((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1775 			 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1776 	}
1777 queue_err:
1778 	if (sch_handler) {
1779 		/* Store the ULP info in the software structures */
1780 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1781 		swq = &sq->swq[sw_prod];
1782 		swq->wr_id = wqe->wr_id;
1783 		swq->type = wqe->type;
1784 		swq->flags = wqe->flags;
1785 		if (qp->sig_type)
1786 			swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1787 		swq->start_psn = sq->psn & BTH_PSN_MASK;
1788 	}
1789 	sq->hwq.prod++;
1790 	qp->wqe_cnt++;
1791 
1792 done:
1793 	if (sch_handler) {
1794 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1795 		if (nq_work) {
1796 			nq_work->cq = qp->scq;
1797 			nq_work->nq = qp->scq->nq;
1798 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1799 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1800 		} else {
1801 			dev_err(&sq->hwq.pdev->dev,
1802 				"QPLIB: FP: Failed to allocate SQ nq_work!");
1803 			rc = -ENOMEM;
1804 		}
1805 	}
1806 	return rc;
1807 }
1808 
1809 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1810 {
1811 	struct bnxt_qplib_q *rq = &qp->rq;
1812 	struct dbr_dbr db_msg = { 0 };
1813 	u32 sw_prod;
1814 
1815 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1816 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1817 				   DBR_DBR_INDEX_MASK);
1818 	db_msg.type_xid =
1819 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1820 			    DBR_DBR_TYPE_RQ);
1821 
1822 	/* Flush the writes to HW Rx WQE before the ringing Rx DB */
1823 	wmb();
1824 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1825 }
1826 
1827 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1828 			 struct bnxt_qplib_swqe *wqe)
1829 {
1830 	struct bnxt_qplib_q *rq = &qp->rq;
1831 	struct rq_wqe *rqe, **rqe_ptr;
1832 	struct sq_sge *hw_sge;
1833 	struct bnxt_qplib_nq_work *nq_work = NULL;
1834 	bool sch_handler = false;
1835 	u32 sw_prod;
1836 	int i, rc = 0;
1837 
1838 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1839 		sch_handler = true;
1840 		dev_dbg(&rq->hwq.pdev->dev,
1841 			"%s Error QP. Scheduling for poll_cq\n",
1842 			__func__);
1843 		goto queue_err;
1844 	}
1845 	if (bnxt_qplib_queue_full(rq)) {
1846 		dev_err(&rq->hwq.pdev->dev,
1847 			"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1848 		rc = -EINVAL;
1849 		goto done;
1850 	}
1851 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1852 	rq->swq[sw_prod].wr_id = wqe->wr_id;
1853 
1854 	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1855 	rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1856 
1857 	memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1858 
1859 	/* Calculate wqe_size16 and data_len */
1860 	for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1861 	     i < wqe->num_sge; i++, hw_sge++) {
1862 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1863 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1864 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1865 	}
1866 	rqe->wqe_type = wqe->type;
1867 	rqe->flags = wqe->flags;
1868 	rqe->wqe_size = wqe->num_sge +
1869 			((offsetof(typeof(*rqe), data) + 15) >> 4);
1870 	/* HW requires wqe size has room for atleast one SGE even if none
1871 	 * was supplied by ULP
1872 	 */
1873 	if (!wqe->num_sge)
1874 		rqe->wqe_size++;
1875 
1876 	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
1877 	rqe->wr_id[0] = cpu_to_le32(sw_prod);
1878 
1879 queue_err:
1880 	if (sch_handler) {
1881 		/* Store the ULP info in the software structures */
1882 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1883 		rq->swq[sw_prod].wr_id = wqe->wr_id;
1884 	}
1885 
1886 	rq->hwq.prod++;
1887 	if (sch_handler) {
1888 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1889 		if (nq_work) {
1890 			nq_work->cq = qp->rcq;
1891 			nq_work->nq = qp->rcq->nq;
1892 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1893 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1894 		} else {
1895 			dev_err(&rq->hwq.pdev->dev,
1896 				"QPLIB: FP: Failed to allocate RQ nq_work!");
1897 			rc = -ENOMEM;
1898 		}
1899 	}
1900 done:
1901 	return rc;
1902 }
1903 
1904 /* CQ */
1905 
1906 /* Spinlock must be held */
1907 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1908 {
1909 	struct dbr_dbr db_msg = { 0 };
1910 
1911 	db_msg.type_xid =
1912 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1913 			    DBR_DBR_TYPE_CQ_ARMENA);
1914 	/* Flush memory writes before enabling the CQ */
1915 	wmb();
1916 	__iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1917 }
1918 
1919 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1920 {
1921 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1922 	struct dbr_dbr db_msg = { 0 };
1923 	u32 sw_cons;
1924 
1925 	/* Ring DB */
1926 	sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1927 	db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1928 				    DBR_DBR_INDEX_MASK);
1929 	db_msg.type_xid =
1930 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1931 			    arm_type);
1932 	/* flush memory writes before arming the CQ */
1933 	wmb();
1934 	__iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1935 }
1936 
1937 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1938 {
1939 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1940 	struct cmdq_create_cq req;
1941 	struct creq_create_cq_resp resp;
1942 	struct bnxt_qplib_pbl *pbl;
1943 	u16 cmd_flags = 0;
1944 	int rc;
1945 
1946 	cq->hwq.max_elements = cq->max_wqe;
1947 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1948 				       cq->nmap, &cq->hwq.max_elements,
1949 				       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1950 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
1951 	if (rc)
1952 		goto exit;
1953 
1954 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1955 
1956 	if (!cq->dpi) {
1957 		dev_err(&rcfw->pdev->dev,
1958 			"QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1959 		return -EINVAL;
1960 	}
1961 	req.dpi = cpu_to_le32(cq->dpi->dpi);
1962 	req.cq_handle = cpu_to_le64(cq->cq_handle);
1963 
1964 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1965 	pbl = &cq->hwq.pbl[PBL_LVL_0];
1966 	req.pg_size_lvl = cpu_to_le32(
1967 	    ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1968 						CMDQ_CREATE_CQ_LVL_SFT) |
1969 	    (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1970 	     pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1971 	     pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1972 	     pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1973 	     pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1974 	     pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1975 	     CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1976 
1977 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1978 
1979 	req.cq_fco_cnq_id = cpu_to_le32(
1980 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1981 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1982 
1983 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1984 					  (void *)&resp, NULL, 0);
1985 	if (rc)
1986 		goto fail;
1987 
1988 	cq->id = le32_to_cpu(resp.xid);
1989 	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1990 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1991 	init_waitqueue_head(&cq->waitq);
1992 	INIT_LIST_HEAD(&cq->sqf_head);
1993 	INIT_LIST_HEAD(&cq->rqf_head);
1994 	spin_lock_init(&cq->compl_lock);
1995 
1996 	bnxt_qplib_arm_cq_enable(cq);
1997 	return 0;
1998 
1999 fail:
2000 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
2001 exit:
2002 	return rc;
2003 }
2004 
2005 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2006 {
2007 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2008 	struct cmdq_destroy_cq req;
2009 	struct creq_destroy_cq_resp resp;
2010 	u16 cmd_flags = 0;
2011 	int rc;
2012 
2013 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2014 
2015 	req.cq_cid = cpu_to_le32(cq->id);
2016 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2017 					  (void *)&resp, NULL, 0);
2018 	if (rc)
2019 		return rc;
2020 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
2021 	return 0;
2022 }
2023 
2024 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2025 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2026 {
2027 	u32 sw_prod, sw_cons;
2028 	struct bnxt_qplib_cqe *cqe;
2029 	int rc = 0;
2030 
2031 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2032 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2033 	cqe = *pcqe;
2034 	while (*budget) {
2035 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2036 		if (sw_cons == sw_prod) {
2037 			break;
2038 		}
2039 		/* Skip the FENCE WQE completions */
2040 		if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2041 			bnxt_qplib_cancel_phantom_processing(qp);
2042 			goto skip_compl;
2043 		}
2044 		memset(cqe, 0, sizeof(*cqe));
2045 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2046 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2047 		cqe->qp_handle = (u64)(unsigned long)qp;
2048 		cqe->wr_id = sq->swq[sw_cons].wr_id;
2049 		cqe->src_qp = qp->id;
2050 		cqe->type = sq->swq[sw_cons].type;
2051 		cqe++;
2052 		(*budget)--;
2053 skip_compl:
2054 		sq->hwq.cons++;
2055 	}
2056 	*pcqe = cqe;
2057 	if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2058 		/* Out of budget */
2059 		rc = -EAGAIN;
2060 
2061 	return rc;
2062 }
2063 
2064 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2065 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2066 {
2067 	struct bnxt_qplib_cqe *cqe;
2068 	u32 sw_prod, sw_cons;
2069 	int rc = 0;
2070 	int opcode = 0;
2071 
2072 	switch (qp->type) {
2073 	case CMDQ_CREATE_QP1_TYPE_GSI:
2074 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2075 		break;
2076 	case CMDQ_CREATE_QP_TYPE_RC:
2077 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2078 		break;
2079 	case CMDQ_CREATE_QP_TYPE_UD:
2080 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2081 		break;
2082 	}
2083 
2084 	/* Flush the rest of the RQ */
2085 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2086 	cqe = *pcqe;
2087 	while (*budget) {
2088 		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2089 		if (sw_cons == sw_prod)
2090 			break;
2091 		memset(cqe, 0, sizeof(*cqe));
2092 		cqe->status =
2093 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2094 		cqe->opcode = opcode;
2095 		cqe->qp_handle = (unsigned long)qp;
2096 		cqe->wr_id = rq->swq[sw_cons].wr_id;
2097 		cqe++;
2098 		(*budget)--;
2099 		rq->hwq.cons++;
2100 	}
2101 	*pcqe = cqe;
2102 	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2103 		/* Out of budget */
2104 		rc = -EAGAIN;
2105 
2106 	return rc;
2107 }
2108 
2109 void bnxt_qplib_mark_qp_error(void *qp_handle)
2110 {
2111 	struct bnxt_qplib_qp *qp = qp_handle;
2112 
2113 	if (!qp)
2114 		return;
2115 
2116 	/* Must block new posting of SQ and RQ */
2117 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2118 	bnxt_qplib_cancel_phantom_processing(qp);
2119 
2120 	/* Add qp to flush list of the CQ */
2121 	__bnxt_qplib_add_flush_qp(qp);
2122 }
2123 
2124 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2125  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2126  */
2127 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2128 		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2129 {
2130 	struct bnxt_qplib_q *sq = &qp->sq;
2131 	struct bnxt_qplib_swq *swq;
2132 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2133 	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2134 	struct cq_req *peek_req_hwcqe;
2135 	struct bnxt_qplib_qp *peek_qp;
2136 	struct bnxt_qplib_q *peek_sq;
2137 	int i, rc = 0;
2138 
2139 	/* Normal mode */
2140 	/* Check for the psn_search marking before completing */
2141 	swq = &sq->swq[sw_sq_cons];
2142 	if (swq->psn_search &&
2143 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2144 		/* Unmark */
2145 		swq->psn_search->flags_next_psn = cpu_to_le32
2146 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2147 				     & ~0x80000000);
2148 		dev_dbg(&cq->hwq.pdev->dev,
2149 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2150 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2151 		sq->condition = true;
2152 		sq->send_phantom = true;
2153 
2154 		/* TODO: Only ARM if the previous SQE is ARMALL */
2155 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
2156 
2157 		rc = -EAGAIN;
2158 		goto out;
2159 	}
2160 	if (sq->condition) {
2161 		/* Peek at the completions */
2162 		peek_raw_cq_cons = cq->hwq.cons;
2163 		peek_sw_cq_cons = cq_cons;
2164 		i = cq->hwq.max_elements;
2165 		while (i--) {
2166 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2167 			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2168 			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2169 						     [CQE_IDX(peek_sw_cq_cons)];
2170 			/* If the next hwcqe is VALID */
2171 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2172 					  cq->hwq.max_elements)) {
2173 			/*
2174 			 * The valid test of the entry must be done first before
2175 			 * reading any further.
2176 			 */
2177 				dma_rmb();
2178 				/* If the next hwcqe is a REQ */
2179 				if ((peek_hwcqe->cqe_type_toggle &
2180 				    CQ_BASE_CQE_TYPE_MASK) ==
2181 				    CQ_BASE_CQE_TYPE_REQ) {
2182 					peek_req_hwcqe = (struct cq_req *)
2183 							 peek_hwcqe;
2184 					peek_qp = (struct bnxt_qplib_qp *)
2185 						((unsigned long)
2186 						 le64_to_cpu
2187 						 (peek_req_hwcqe->qp_handle));
2188 					peek_sq = &peek_qp->sq;
2189 					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2190 						peek_req_hwcqe->sq_cons_idx) - 1
2191 						, &sq->hwq);
2192 					/* If the hwcqe's sq's wr_id matches */
2193 					if (peek_sq == sq &&
2194 					    sq->swq[peek_sq_cons_idx].wr_id ==
2195 					    BNXT_QPLIB_FENCE_WRID) {
2196 						/*
2197 						 *  Unbreak only if the phantom
2198 						 *  comes back
2199 						 */
2200 						dev_dbg(&cq->hwq.pdev->dev,
2201 							"FP:Got Phantom CQE");
2202 						sq->condition = false;
2203 						sq->single = true;
2204 						rc = 0;
2205 						goto out;
2206 					}
2207 				}
2208 				/* Valid but not the phantom, so keep looping */
2209 			} else {
2210 				/* Not valid yet, just exit and wait */
2211 				rc = -EINVAL;
2212 				goto out;
2213 			}
2214 			peek_sw_cq_cons++;
2215 			peek_raw_cq_cons++;
2216 		}
2217 		dev_err(&cq->hwq.pdev->dev,
2218 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
2219 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2220 		rc = -EINVAL;
2221 	}
2222 out:
2223 	return rc;
2224 }
2225 
2226 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2227 				     struct cq_req *hwcqe,
2228 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2229 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2230 {
2231 	struct bnxt_qplib_qp *qp;
2232 	struct bnxt_qplib_q *sq;
2233 	struct bnxt_qplib_cqe *cqe;
2234 	u32 sw_sq_cons, cqe_sq_cons;
2235 	struct bnxt_qplib_swq *swq;
2236 	int rc = 0;
2237 
2238 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2239 				      le64_to_cpu(hwcqe->qp_handle));
2240 	if (!qp) {
2241 		dev_err(&cq->hwq.pdev->dev,
2242 			"QPLIB: FP: Process Req qp is NULL");
2243 		return -EINVAL;
2244 	}
2245 	sq = &qp->sq;
2246 
2247 	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2248 	if (cqe_sq_cons > sq->hwq.max_elements) {
2249 		dev_err(&cq->hwq.pdev->dev,
2250 			"QPLIB: FP: CQ Process req reported ");
2251 		dev_err(&cq->hwq.pdev->dev,
2252 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2253 			cqe_sq_cons, sq->hwq.max_elements);
2254 		return -EINVAL;
2255 	}
2256 
2257 	if (qp->sq.flushed) {
2258 		dev_dbg(&cq->hwq.pdev->dev,
2259 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2260 		goto done;
2261 	}
2262 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2263 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2264 	 * to the cqe_sq_cons
2265 	 */
2266 	cqe = *pcqe;
2267 	while (*budget) {
2268 		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2269 		if (sw_sq_cons == cqe_sq_cons)
2270 			/* Done */
2271 			break;
2272 
2273 		swq = &sq->swq[sw_sq_cons];
2274 		memset(cqe, 0, sizeof(*cqe));
2275 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2276 		cqe->qp_handle = (u64)(unsigned long)qp;
2277 		cqe->src_qp = qp->id;
2278 		cqe->wr_id = swq->wr_id;
2279 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2280 			goto skip;
2281 		cqe->type = swq->type;
2282 
2283 		/* For the last CQE, check for status.  For errors, regardless
2284 		 * of the request being signaled or not, it must complete with
2285 		 * the hwcqe error status
2286 		 */
2287 		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2288 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2289 			cqe->status = hwcqe->status;
2290 			dev_err(&cq->hwq.pdev->dev,
2291 				"QPLIB: FP: CQ Processed Req ");
2292 			dev_err(&cq->hwq.pdev->dev,
2293 				"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
2294 				sw_sq_cons, cqe->wr_id, cqe->status);
2295 			cqe++;
2296 			(*budget)--;
2297 			bnxt_qplib_lock_buddy_cq(qp, cq);
2298 			bnxt_qplib_mark_qp_error(qp);
2299 			bnxt_qplib_unlock_buddy_cq(qp, cq);
2300 		} else {
2301 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2302 				/* Before we complete, do WA 9060 */
2303 				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2304 					      cqe_sq_cons)) {
2305 					*lib_qp = qp;
2306 					goto out;
2307 				}
2308 				cqe->status = CQ_REQ_STATUS_OK;
2309 				cqe++;
2310 				(*budget)--;
2311 			}
2312 		}
2313 skip:
2314 		sq->hwq.cons++;
2315 		if (sq->single)
2316 			break;
2317 	}
2318 out:
2319 	*pcqe = cqe;
2320 	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2321 		/* Out of budget */
2322 		rc = -EAGAIN;
2323 		goto done;
2324 	}
2325 	/*
2326 	 * Back to normal completion mode only after it has completed all of
2327 	 * the WC for this CQE
2328 	 */
2329 	sq->single = false;
2330 done:
2331 	return rc;
2332 }
2333 
2334 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2335 {
2336 	spin_lock(&srq->hwq.lock);
2337 	srq->swq[srq->last_idx].next_idx = (int)tag;
2338 	srq->last_idx = (int)tag;
2339 	srq->swq[srq->last_idx].next_idx = -1;
2340 	srq->hwq.cons++; /* Support for SRQE counter */
2341 	spin_unlock(&srq->hwq.lock);
2342 }
2343 
2344 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2345 					struct cq_res_rc *hwcqe,
2346 					struct bnxt_qplib_cqe **pcqe,
2347 					int *budget)
2348 {
2349 	struct bnxt_qplib_qp *qp;
2350 	struct bnxt_qplib_q *rq;
2351 	struct bnxt_qplib_srq *srq;
2352 	struct bnxt_qplib_cqe *cqe;
2353 	u32 wr_id_idx;
2354 	int rc = 0;
2355 
2356 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2357 				      le64_to_cpu(hwcqe->qp_handle));
2358 	if (!qp) {
2359 		dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2360 		return -EINVAL;
2361 	}
2362 	if (qp->rq.flushed) {
2363 		dev_dbg(&cq->hwq.pdev->dev,
2364 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2365 		goto done;
2366 	}
2367 
2368 	cqe = *pcqe;
2369 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2370 	cqe->length = le32_to_cpu(hwcqe->length);
2371 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2372 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2373 	cqe->flags = le16_to_cpu(hwcqe->flags);
2374 	cqe->status = hwcqe->status;
2375 	cqe->qp_handle = (u64)(unsigned long)qp;
2376 
2377 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2378 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2379 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2380 		srq = qp->srq;
2381 		if (!srq)
2382 			return -EINVAL;
2383 		if (wr_id_idx > srq->hwq.max_elements) {
2384 			dev_err(&cq->hwq.pdev->dev,
2385 				"QPLIB: FP: CQ Process RC ");
2386 			dev_err(&cq->hwq.pdev->dev,
2387 				"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2388 				wr_id_idx, srq->hwq.max_elements);
2389 			return -EINVAL;
2390 		}
2391 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2392 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2393 		cqe++;
2394 		(*budget)--;
2395 		*pcqe = cqe;
2396 	} else {
2397 		rq = &qp->rq;
2398 		if (wr_id_idx > rq->hwq.max_elements) {
2399 			dev_err(&cq->hwq.pdev->dev,
2400 				"QPLIB: FP: CQ Process RC ");
2401 			dev_err(&cq->hwq.pdev->dev,
2402 				"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2403 				wr_id_idx, rq->hwq.max_elements);
2404 			return -EINVAL;
2405 		}
2406 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2407 		cqe++;
2408 		(*budget)--;
2409 		rq->hwq.cons++;
2410 		*pcqe = cqe;
2411 
2412 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2413 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2414 			/* Add qp to flush list of the CQ */
2415 			bnxt_qplib_lock_buddy_cq(qp, cq);
2416 			__bnxt_qplib_add_flush_qp(qp);
2417 			bnxt_qplib_unlock_buddy_cq(qp, cq);
2418 		}
2419 	}
2420 
2421 done:
2422 	return rc;
2423 }
2424 
2425 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2426 					struct cq_res_ud *hwcqe,
2427 					struct bnxt_qplib_cqe **pcqe,
2428 					int *budget)
2429 {
2430 	struct bnxt_qplib_qp *qp;
2431 	struct bnxt_qplib_q *rq;
2432 	struct bnxt_qplib_srq *srq;
2433 	struct bnxt_qplib_cqe *cqe;
2434 	u32 wr_id_idx;
2435 	int rc = 0;
2436 
2437 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2438 				      le64_to_cpu(hwcqe->qp_handle));
2439 	if (!qp) {
2440 		dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2441 		return -EINVAL;
2442 	}
2443 	if (qp->rq.flushed) {
2444 		dev_dbg(&cq->hwq.pdev->dev,
2445 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2446 		goto done;
2447 	}
2448 	cqe = *pcqe;
2449 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2450 	cqe->length = le32_to_cpu(hwcqe->length);
2451 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2452 	cqe->flags = le16_to_cpu(hwcqe->flags);
2453 	cqe->status = hwcqe->status;
2454 	cqe->qp_handle = (u64)(unsigned long)qp;
2455 	memcpy(cqe->smac, hwcqe->src_mac, 6);
2456 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2457 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2458 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2459 				  ((le32_to_cpu(
2460 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2461 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2462 
2463 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2464 		srq = qp->srq;
2465 		if (!srq)
2466 			return -EINVAL;
2467 
2468 		if (wr_id_idx > srq->hwq.max_elements) {
2469 			dev_err(&cq->hwq.pdev->dev,
2470 				"QPLIB: FP: CQ Process UD ");
2471 			dev_err(&cq->hwq.pdev->dev,
2472 				"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2473 				wr_id_idx, srq->hwq.max_elements);
2474 			return -EINVAL;
2475 		}
2476 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2477 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2478 		cqe++;
2479 		(*budget)--;
2480 		*pcqe = cqe;
2481 	} else {
2482 		rq = &qp->rq;
2483 		if (wr_id_idx > rq->hwq.max_elements) {
2484 			dev_err(&cq->hwq.pdev->dev,
2485 				"QPLIB: FP: CQ Process UD ");
2486 			dev_err(&cq->hwq.pdev->dev,
2487 				"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2488 				wr_id_idx, rq->hwq.max_elements);
2489 			return -EINVAL;
2490 		}
2491 
2492 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2493 		cqe++;
2494 		(*budget)--;
2495 		rq->hwq.cons++;
2496 		*pcqe = cqe;
2497 
2498 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2499 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2500 			/* Add qp to flush list of the CQ */
2501 			bnxt_qplib_lock_buddy_cq(qp, cq);
2502 			__bnxt_qplib_add_flush_qp(qp);
2503 			bnxt_qplib_unlock_buddy_cq(qp, cq);
2504 		}
2505 	}
2506 done:
2507 	return rc;
2508 }
2509 
2510 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2511 {
2512 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2513 	unsigned long flags;
2514 	u32 sw_cons, raw_cons;
2515 	bool rc = true;
2516 
2517 	spin_lock_irqsave(&cq->hwq.lock, flags);
2518 	raw_cons = cq->hwq.cons;
2519 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2520 	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2521 	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2522 
2523 	 /* Check for Valid bit. If the CQE is valid, return false */
2524 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2525 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2526 	return rc;
2527 }
2528 
2529 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2530 						struct cq_res_raweth_qp1 *hwcqe,
2531 						struct bnxt_qplib_cqe **pcqe,
2532 						int *budget)
2533 {
2534 	struct bnxt_qplib_qp *qp;
2535 	struct bnxt_qplib_q *rq;
2536 	struct bnxt_qplib_srq *srq;
2537 	struct bnxt_qplib_cqe *cqe;
2538 	u32 wr_id_idx;
2539 	int rc = 0;
2540 
2541 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2542 				      le64_to_cpu(hwcqe->qp_handle));
2543 	if (!qp) {
2544 		dev_err(&cq->hwq.pdev->dev,
2545 			"QPLIB: process_cq Raw/QP1 qp is NULL");
2546 		return -EINVAL;
2547 	}
2548 	if (qp->rq.flushed) {
2549 		dev_dbg(&cq->hwq.pdev->dev,
2550 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2551 		goto done;
2552 	}
2553 	cqe = *pcqe;
2554 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2555 	cqe->flags = le16_to_cpu(hwcqe->flags);
2556 	cqe->qp_handle = (u64)(unsigned long)qp;
2557 
2558 	wr_id_idx =
2559 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2560 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2561 	cqe->src_qp = qp->id;
2562 	if (qp->id == 1 && !cqe->length) {
2563 		/* Add workaround for the length misdetection */
2564 		cqe->length = 296;
2565 	} else {
2566 		cqe->length = le16_to_cpu(hwcqe->length);
2567 	}
2568 	cqe->pkey_index = qp->pkey_index;
2569 	memcpy(cqe->smac, qp->smac, 6);
2570 
2571 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2572 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2573 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2574 
2575 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2576 		srq = qp->srq;
2577 		if (!srq) {
2578 			dev_err(&cq->hwq.pdev->dev,
2579 				"QPLIB: FP: SRQ used but not defined??");
2580 			return -EINVAL;
2581 		}
2582 		if (wr_id_idx > srq->hwq.max_elements) {
2583 			dev_err(&cq->hwq.pdev->dev,
2584 				"QPLIB: FP: CQ Process Raw/QP1 ");
2585 			dev_err(&cq->hwq.pdev->dev,
2586 				"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2587 				wr_id_idx, srq->hwq.max_elements);
2588 			return -EINVAL;
2589 		}
2590 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2591 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2592 		cqe++;
2593 		(*budget)--;
2594 		*pcqe = cqe;
2595 	} else {
2596 		rq = &qp->rq;
2597 		if (wr_id_idx > rq->hwq.max_elements) {
2598 			dev_err(&cq->hwq.pdev->dev,
2599 				"QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2600 			dev_err(&cq->hwq.pdev->dev,
2601 				"QPLIB: ix 0x%x exceeded RQ max 0x%x",
2602 				wr_id_idx, rq->hwq.max_elements);
2603 			return -EINVAL;
2604 		}
2605 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2606 		cqe++;
2607 		(*budget)--;
2608 		rq->hwq.cons++;
2609 		*pcqe = cqe;
2610 
2611 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2612 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2613 			/* Add qp to flush list of the CQ */
2614 			bnxt_qplib_lock_buddy_cq(qp, cq);
2615 			__bnxt_qplib_add_flush_qp(qp);
2616 			bnxt_qplib_unlock_buddy_cq(qp, cq);
2617 		}
2618 	}
2619 
2620 done:
2621 	return rc;
2622 }
2623 
2624 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2625 					  struct cq_terminal *hwcqe,
2626 					  struct bnxt_qplib_cqe **pcqe,
2627 					  int *budget)
2628 {
2629 	struct bnxt_qplib_qp *qp;
2630 	struct bnxt_qplib_q *sq, *rq;
2631 	struct bnxt_qplib_cqe *cqe;
2632 	u32 sw_cons = 0, cqe_cons;
2633 	int rc = 0;
2634 
2635 	/* Check the Status */
2636 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2637 		dev_warn(&cq->hwq.pdev->dev,
2638 			 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2639 			 hwcqe->status);
2640 
2641 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2642 				      le64_to_cpu(hwcqe->qp_handle));
2643 	if (!qp) {
2644 		dev_err(&cq->hwq.pdev->dev,
2645 			"QPLIB: FP: CQ Process terminal qp is NULL");
2646 		return -EINVAL;
2647 	}
2648 
2649 	/* Must block new posting of SQ and RQ */
2650 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2651 
2652 	sq = &qp->sq;
2653 	rq = &qp->rq;
2654 
2655 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2656 	if (cqe_cons == 0xFFFF)
2657 		goto do_rq;
2658 
2659 	if (cqe_cons > sq->hwq.max_elements) {
2660 		dev_err(&cq->hwq.pdev->dev,
2661 			"QPLIB: FP: CQ Process terminal reported ");
2662 		dev_err(&cq->hwq.pdev->dev,
2663 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2664 			cqe_cons, sq->hwq.max_elements);
2665 		goto do_rq;
2666 	}
2667 
2668 	if (qp->sq.flushed) {
2669 		dev_dbg(&cq->hwq.pdev->dev,
2670 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2671 		goto sq_done;
2672 	}
2673 
2674 	/* Terminal CQE can also include aggregated successful CQEs prior.
2675 	 * So we must complete all CQEs from the current sq's cons to the
2676 	 * cq_cons with status OK
2677 	 */
2678 	cqe = *pcqe;
2679 	while (*budget) {
2680 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2681 		if (sw_cons == cqe_cons)
2682 			break;
2683 		if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2684 			memset(cqe, 0, sizeof(*cqe));
2685 			cqe->status = CQ_REQ_STATUS_OK;
2686 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2687 			cqe->qp_handle = (u64)(unsigned long)qp;
2688 			cqe->src_qp = qp->id;
2689 			cqe->wr_id = sq->swq[sw_cons].wr_id;
2690 			cqe->type = sq->swq[sw_cons].type;
2691 			cqe++;
2692 			(*budget)--;
2693 		}
2694 		sq->hwq.cons++;
2695 	}
2696 	*pcqe = cqe;
2697 	if (!(*budget) && sw_cons != cqe_cons) {
2698 		/* Out of budget */
2699 		rc = -EAGAIN;
2700 		goto sq_done;
2701 	}
2702 sq_done:
2703 	if (rc)
2704 		return rc;
2705 do_rq:
2706 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2707 	if (cqe_cons == 0xFFFF) {
2708 		goto done;
2709 	} else if (cqe_cons > rq->hwq.max_elements) {
2710 		dev_err(&cq->hwq.pdev->dev,
2711 			"QPLIB: FP: CQ Processed terminal ");
2712 		dev_err(&cq->hwq.pdev->dev,
2713 			"QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2714 			cqe_cons, rq->hwq.max_elements);
2715 		goto done;
2716 	}
2717 
2718 	if (qp->rq.flushed) {
2719 		dev_dbg(&cq->hwq.pdev->dev,
2720 			"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2721 		rc = 0;
2722 		goto done;
2723 	}
2724 
2725 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2726 	 * from the current rq->cons to the rq->prod regardless what the
2727 	 * rq->cons the terminal CQE indicates
2728 	 */
2729 
2730 	/* Add qp to flush list of the CQ */
2731 	bnxt_qplib_lock_buddy_cq(qp, cq);
2732 	__bnxt_qplib_add_flush_qp(qp);
2733 	bnxt_qplib_unlock_buddy_cq(qp, cq);
2734 done:
2735 	return rc;
2736 }
2737 
2738 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2739 					struct cq_cutoff *hwcqe)
2740 {
2741 	/* Check the Status */
2742 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2743 		dev_err(&cq->hwq.pdev->dev,
2744 			"QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2745 			hwcqe->status);
2746 		return -EINVAL;
2747 	}
2748 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2749 	wake_up_interruptible(&cq->waitq);
2750 
2751 	return 0;
2752 }
2753 
2754 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2755 				  struct bnxt_qplib_cqe *cqe,
2756 				  int num_cqes)
2757 {
2758 	struct bnxt_qplib_qp *qp = NULL;
2759 	u32 budget = num_cqes;
2760 	unsigned long flags;
2761 
2762 	spin_lock_irqsave(&cq->hwq.lock, flags);
2763 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2764 		dev_dbg(&cq->hwq.pdev->dev,
2765 			"QPLIB: FP: Flushing SQ QP= %p",
2766 			qp);
2767 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2768 	}
2769 
2770 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2771 		dev_dbg(&cq->hwq.pdev->dev,
2772 			"QPLIB: FP: Flushing RQ QP= %p",
2773 			qp);
2774 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2775 	}
2776 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2777 
2778 	return num_cqes - budget;
2779 }
2780 
2781 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2782 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2783 {
2784 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2785 	unsigned long flags;
2786 	u32 sw_cons, raw_cons;
2787 	int budget, rc = 0;
2788 
2789 	spin_lock_irqsave(&cq->hwq.lock, flags);
2790 	raw_cons = cq->hwq.cons;
2791 	budget = num_cqes;
2792 
2793 	while (budget) {
2794 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2795 		hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2796 		hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2797 
2798 		/* Check for Valid bit */
2799 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2800 			break;
2801 
2802 		/*
2803 		 * The valid test of the entry must be done first before
2804 		 * reading any further.
2805 		 */
2806 		dma_rmb();
2807 		/* From the device's respective CQE format to qplib_wc*/
2808 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2809 		case CQ_BASE_CQE_TYPE_REQ:
2810 			rc = bnxt_qplib_cq_process_req(cq,
2811 						       (struct cq_req *)hw_cqe,
2812 						       &cqe, &budget,
2813 						       sw_cons, lib_qp);
2814 			break;
2815 		case CQ_BASE_CQE_TYPE_RES_RC:
2816 			rc = bnxt_qplib_cq_process_res_rc(cq,
2817 							  (struct cq_res_rc *)
2818 							  hw_cqe, &cqe,
2819 							  &budget);
2820 			break;
2821 		case CQ_BASE_CQE_TYPE_RES_UD:
2822 			rc = bnxt_qplib_cq_process_res_ud
2823 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2824 					 &budget);
2825 			break;
2826 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2827 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2828 					(cq, (struct cq_res_raweth_qp1 *)
2829 					 hw_cqe, &cqe, &budget);
2830 			break;
2831 		case CQ_BASE_CQE_TYPE_TERMINAL:
2832 			rc = bnxt_qplib_cq_process_terminal
2833 					(cq, (struct cq_terminal *)hw_cqe,
2834 					 &cqe, &budget);
2835 			break;
2836 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2837 			bnxt_qplib_cq_process_cutoff
2838 					(cq, (struct cq_cutoff *)hw_cqe);
2839 			/* Done processing this CQ */
2840 			goto exit;
2841 		default:
2842 			dev_err(&cq->hwq.pdev->dev,
2843 				"QPLIB: process_cq unknown type 0x%lx",
2844 				hw_cqe->cqe_type_toggle &
2845 				CQ_BASE_CQE_TYPE_MASK);
2846 			rc = -EINVAL;
2847 			break;
2848 		}
2849 		if (rc < 0) {
2850 			if (rc == -EAGAIN)
2851 				break;
2852 			/* Error while processing the CQE, just skip to the
2853 			 * next one
2854 			 */
2855 			dev_err(&cq->hwq.pdev->dev,
2856 				"QPLIB: process_cqe error rc = 0x%x", rc);
2857 		}
2858 		raw_cons++;
2859 	}
2860 	if (cq->hwq.cons != raw_cons) {
2861 		cq->hwq.cons = raw_cons;
2862 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2863 	}
2864 exit:
2865 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2866 	return num_cqes - budget;
2867 }
2868 
2869 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2870 {
2871 	unsigned long flags;
2872 
2873 	spin_lock_irqsave(&cq->hwq.lock, flags);
2874 	if (arm_type)
2875 		bnxt_qplib_arm_cq(cq, arm_type);
2876 	/* Using cq->arm_state variable to track whether to issue cq handler */
2877 	atomic_set(&cq->arm_state, 1);
2878 	spin_unlock_irqrestore(&cq->hwq.lock, flags);
2879 }
2880 
2881 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2882 {
2883 	flush_workqueue(qp->scq->nq->cqn_wq);
2884 	if (qp->scq != qp->rcq)
2885 		flush_workqueue(qp->rcq->nq->cqn_wq);
2886 }
2887