1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/prefetch.h>
47 
48 #include "roce_hsi.h"
49 
50 #include "qplib_res.h"
51 #include "qplib_rcfw.h"
52 #include "qplib_sp.h"
53 #include "qplib_fp.h"
54 
55 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
56 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
57 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
58 
59 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
60 {
61 	qp->sq.condition = false;
62 	qp->sq.send_phantom = false;
63 	qp->sq.single = false;
64 }
65 
66 /* Flush list */
67 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
68 {
69 	struct bnxt_qplib_cq *scq, *rcq;
70 
71 	scq = qp->scq;
72 	rcq = qp->rcq;
73 
74 	if (!qp->sq.flushed) {
75 		dev_dbg(&scq->hwq.pdev->dev,
76 			"FP: Adding to SQ Flush list = %p\n", qp);
77 		bnxt_qplib_cancel_phantom_processing(qp);
78 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
79 		qp->sq.flushed = true;
80 	}
81 	if (!qp->srq) {
82 		if (!qp->rq.flushed) {
83 			dev_dbg(&rcq->hwq.pdev->dev,
84 				"FP: Adding to RQ Flush list = %p\n", qp);
85 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 			qp->rq.flushed = true;
87 		}
88 	}
89 }
90 
91 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 				       unsigned long *flags)
93 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94 {
95 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 	if (qp->scq == qp->rcq)
97 		__acquire(&qp->rcq->flush_lock);
98 	else
99 		spin_lock(&qp->rcq->flush_lock);
100 }
101 
102 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 				       unsigned long *flags)
104 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105 {
106 	if (qp->scq == qp->rcq)
107 		__release(&qp->rcq->flush_lock);
108 	else
109 		spin_unlock(&qp->rcq->flush_lock);
110 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111 }
112 
113 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
114 {
115 	unsigned long flags;
116 
117 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118 	__bnxt_qplib_add_flush_qp(qp);
119 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
120 }
121 
122 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
123 {
124 	if (qp->sq.flushed) {
125 		qp->sq.flushed = false;
126 		list_del(&qp->sq_flush);
127 	}
128 	if (!qp->srq) {
129 		if (qp->rq.flushed) {
130 			qp->rq.flushed = false;
131 			list_del(&qp->rq_flush);
132 		}
133 	}
134 }
135 
136 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
137 {
138 	unsigned long flags;
139 
140 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
142 	qp->sq.hwq.prod = 0;
143 	qp->sq.hwq.cons = 0;
144 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
145 	qp->rq.hwq.prod = 0;
146 	qp->rq.hwq.cons = 0;
147 
148 	__bnxt_qplib_del_flush_qp(qp);
149 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
150 }
151 
152 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
153 {
154 	struct bnxt_qplib_nq_work *nq_work =
155 			container_of(work, struct bnxt_qplib_nq_work, work);
156 
157 	struct bnxt_qplib_cq *cq = nq_work->cq;
158 	struct bnxt_qplib_nq *nq = nq_work->nq;
159 
160 	if (cq && nq) {
161 		spin_lock_bh(&cq->compl_lock);
162 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163 			dev_dbg(&nq->pdev->dev,
164 				"%s:Trigger cq  = %p event nq = %p\n",
165 				__func__, cq, nq);
166 			nq->cqn_handler(nq, cq);
167 		}
168 		spin_unlock_bh(&cq->compl_lock);
169 	}
170 	kfree(nq_work);
171 }
172 
173 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174 				       struct bnxt_qplib_qp *qp)
175 {
176 	struct bnxt_qplib_q *rq = &qp->rq;
177 	struct bnxt_qplib_q *sq = &qp->sq;
178 
179 	if (qp->rq_hdr_buf)
180 		dma_free_coherent(&res->pdev->dev,
181 				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
182 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183 	if (qp->sq_hdr_buf)
184 		dma_free_coherent(&res->pdev->dev,
185 				  sq->hwq.max_elements * qp->sq_hdr_buf_size,
186 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187 	qp->rq_hdr_buf = NULL;
188 	qp->sq_hdr_buf = NULL;
189 	qp->rq_hdr_buf_map = 0;
190 	qp->sq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_size = 0;
192 	qp->rq_hdr_buf_size = 0;
193 }
194 
195 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 				       struct bnxt_qplib_qp *qp)
197 {
198 	struct bnxt_qplib_q *rq = &qp->rq;
199 	struct bnxt_qplib_q *sq = &qp->sq;
200 	int rc = 0;
201 
202 	if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
203 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204 					sq->hwq.max_elements *
205 					qp->sq_hdr_buf_size,
206 					&qp->sq_hdr_buf_map, GFP_KERNEL);
207 		if (!qp->sq_hdr_buf) {
208 			rc = -ENOMEM;
209 			dev_err(&res->pdev->dev,
210 				"Failed to create sq_hdr_buf\n");
211 			goto fail;
212 		}
213 	}
214 
215 	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
216 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 						    rq->hwq.max_elements *
218 						    qp->rq_hdr_buf_size,
219 						    &qp->rq_hdr_buf_map,
220 						    GFP_KERNEL);
221 		if (!qp->rq_hdr_buf) {
222 			rc = -ENOMEM;
223 			dev_err(&res->pdev->dev,
224 				"Failed to create rq_hdr_buf\n");
225 			goto fail;
226 		}
227 	}
228 	return 0;
229 
230 fail:
231 	bnxt_qplib_free_qp_hdr_buf(res, qp);
232 	return rc;
233 }
234 
235 static void bnxt_qplib_service_nq(unsigned long data)
236 {
237 	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
238 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
239 	struct nq_base *nqe, **nq_ptr;
240 	struct bnxt_qplib_cq *cq;
241 	int num_cqne_processed = 0;
242 	int num_srqne_processed = 0;
243 	u32 sw_cons, raw_cons;
244 	u16 type;
245 	int budget = nq->budget;
246 	uintptr_t q_handle;
247 
248 	/* Service the NQ until empty */
249 	raw_cons = hwq->cons;
250 	while (budget--) {
251 		sw_cons = HWQ_CMP(raw_cons, hwq);
252 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
253 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
254 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
255 			break;
256 
257 		/*
258 		 * The valid test of the entry must be done first before
259 		 * reading any further.
260 		 */
261 		dma_rmb();
262 
263 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
264 		switch (type) {
265 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
266 		{
267 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
268 
269 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
270 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
271 						     << 32;
272 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
273 			bnxt_qplib_arm_cq_enable(cq);
274 			spin_lock_bh(&cq->compl_lock);
275 			atomic_set(&cq->arm_state, 0);
276 			if (!nq->cqn_handler(nq, (cq)))
277 				num_cqne_processed++;
278 			else
279 				dev_warn(&nq->pdev->dev,
280 					 "cqn - type 0x%x not handled\n", type);
281 			spin_unlock_bh(&cq->compl_lock);
282 			break;
283 		}
284 		case NQ_BASE_TYPE_SRQ_EVENT:
285 		{
286 			struct nq_srq_event *nqsrqe =
287 						(struct nq_srq_event *)nqe;
288 
289 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
290 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
291 				     << 32;
292 			bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
293 					   DBR_DBR_TYPE_SRQ_ARMENA);
294 			if (!nq->srqn_handler(nq,
295 					      (struct bnxt_qplib_srq *)q_handle,
296 					      nqsrqe->event))
297 				num_srqne_processed++;
298 			else
299 				dev_warn(&nq->pdev->dev,
300 					 "SRQ event 0x%x not handled\n",
301 					 nqsrqe->event);
302 			break;
303 		}
304 		case NQ_BASE_TYPE_DBQ_EVENT:
305 			break;
306 		default:
307 			dev_warn(&nq->pdev->dev,
308 				 "nqe with type = 0x%x not handled\n", type);
309 			break;
310 		}
311 		raw_cons++;
312 	}
313 	if (hwq->cons != raw_cons) {
314 		hwq->cons = raw_cons;
315 		NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
316 	}
317 }
318 
319 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
320 {
321 	struct bnxt_qplib_nq *nq = dev_instance;
322 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
323 	struct nq_base **nq_ptr;
324 	u32 sw_cons;
325 
326 	/* Prefetch the NQ element */
327 	sw_cons = HWQ_CMP(hwq->cons, hwq);
328 	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
329 	prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
330 
331 	/* Fan out to CPU affinitized kthreads? */
332 	tasklet_schedule(&nq->worker);
333 
334 	return IRQ_HANDLED;
335 }
336 
337 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
338 {
339 	tasklet_disable(&nq->worker);
340 	/* Mask h/w interrupt */
341 	NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
342 	/* Sync with last running IRQ handler */
343 	synchronize_irq(nq->vector);
344 	if (kill)
345 		tasklet_kill(&nq->worker);
346 	if (nq->requested) {
347 		irq_set_affinity_hint(nq->vector, NULL);
348 		free_irq(nq->vector, nq);
349 		nq->requested = false;
350 	}
351 }
352 
353 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
354 {
355 	if (nq->cqn_wq) {
356 		destroy_workqueue(nq->cqn_wq);
357 		nq->cqn_wq = NULL;
358 	}
359 
360 	/* Make sure the HW is stopped! */
361 	if (nq->requested)
362 		bnxt_qplib_nq_stop_irq(nq, true);
363 
364 	if (nq->bar_reg_iomem)
365 		iounmap(nq->bar_reg_iomem);
366 	nq->bar_reg_iomem = NULL;
367 
368 	nq->cqn_handler = NULL;
369 	nq->srqn_handler = NULL;
370 	nq->vector = 0;
371 }
372 
373 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
374 			    int msix_vector, bool need_init)
375 {
376 	int rc;
377 
378 	if (nq->requested)
379 		return -EFAULT;
380 
381 	nq->vector = msix_vector;
382 	if (need_init)
383 		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
384 			     (unsigned long)nq);
385 	else
386 		tasklet_enable(&nq->worker);
387 
388 	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
389 	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
390 	if (rc)
391 		return rc;
392 
393 	cpumask_clear(&nq->mask);
394 	cpumask_set_cpu(nq_indx, &nq->mask);
395 	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
396 	if (rc) {
397 		dev_warn(&nq->pdev->dev,
398 			 "set affinity failed; vector: %d nq_idx: %d\n",
399 			 nq->vector, nq_indx);
400 	}
401 	nq->requested = true;
402 	NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
403 
404 	return rc;
405 }
406 
407 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
408 			 int nq_idx, int msix_vector, int bar_reg_offset,
409 			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
410 					    struct bnxt_qplib_cq *),
411 			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
412 					     struct bnxt_qplib_srq *,
413 					     u8 event))
414 {
415 	resource_size_t nq_base;
416 	int rc = -1;
417 
418 	if (cqn_handler)
419 		nq->cqn_handler = cqn_handler;
420 
421 	if (srqn_handler)
422 		nq->srqn_handler = srqn_handler;
423 
424 	/* Have a task to schedule CQ notifiers in post send case */
425 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
426 	if (!nq->cqn_wq)
427 		return -ENOMEM;
428 
429 	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
430 	nq->bar_reg_off = bar_reg_offset;
431 	nq_base = pci_resource_start(pdev, nq->bar_reg);
432 	if (!nq_base) {
433 		rc = -ENOMEM;
434 		goto fail;
435 	}
436 	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
437 	if (!nq->bar_reg_iomem) {
438 		rc = -ENOMEM;
439 		goto fail;
440 	}
441 
442 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
443 	if (rc) {
444 		dev_err(&nq->pdev->dev,
445 			"Failed to request irq for nq-idx %d\n", nq_idx);
446 		goto fail;
447 	}
448 
449 	return 0;
450 fail:
451 	bnxt_qplib_disable_nq(nq);
452 	return rc;
453 }
454 
455 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
456 {
457 	if (nq->hwq.max_elements) {
458 		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
459 		nq->hwq.max_elements = 0;
460 	}
461 }
462 
463 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
464 {
465 	nq->pdev = pdev;
466 	if (!nq->hwq.max_elements ||
467 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
468 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
469 
470 	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
471 				      &nq->hwq.max_elements,
472 				      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
473 				      PAGE_SIZE, HWQ_TYPE_L2_CMPL))
474 		return -ENOMEM;
475 
476 	nq->budget = 8;
477 	return 0;
478 }
479 
480 /* SRQ */
481 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
482 {
483 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
484 	struct dbr_dbr db_msg = { 0 };
485 	void __iomem *db;
486 	u32 sw_prod = 0;
487 
488 	/* Ring DB */
489 	sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
490 		   HWQ_CMP(srq_hwq->prod, srq_hwq);
491 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
492 				   DBR_DBR_INDEX_MASK);
493 	db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
494 					DBR_DBR_XID_MASK) | arm_type);
495 	db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
496 		srq->dbr_base : srq->dpi->dbr;
497 	wmb(); /* barrier before db ring */
498 	__iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
499 }
500 
501 int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
502 			   struct bnxt_qplib_srq *srq)
503 {
504 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
505 	struct cmdq_destroy_srq req;
506 	struct creq_destroy_srq_resp resp;
507 	u16 cmd_flags = 0;
508 	int rc;
509 
510 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
511 
512 	/* Configure the request */
513 	req.srq_cid = cpu_to_le32(srq->id);
514 
515 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
516 					  (void *)&resp, NULL, 0);
517 	if (rc)
518 		return rc;
519 
520 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
521 	kfree(srq->swq);
522 	return 0;
523 }
524 
525 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
526 			  struct bnxt_qplib_srq *srq)
527 {
528 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
529 	struct cmdq_create_srq req;
530 	struct creq_create_srq_resp resp;
531 	struct bnxt_qplib_pbl *pbl;
532 	u16 cmd_flags = 0;
533 	int rc, idx;
534 
535 	srq->hwq.max_elements = srq->max_wqe;
536 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
537 				       srq->nmap, &srq->hwq.max_elements,
538 				       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
539 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
540 	if (rc)
541 		goto exit;
542 
543 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
544 			   GFP_KERNEL);
545 	if (!srq->swq) {
546 		rc = -ENOMEM;
547 		goto fail;
548 	}
549 
550 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
551 
552 	/* Configure the request */
553 	req.dpi = cpu_to_le32(srq->dpi->dpi);
554 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
555 
556 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
557 	pbl = &srq->hwq.pbl[PBL_LVL_0];
558 	req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
559 				      CMDQ_CREATE_SRQ_LVL_MASK) <<
560 				      CMDQ_CREATE_SRQ_LVL_SFT) |
561 				      (pbl->pg_size == ROCE_PG_SIZE_4K ?
562 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
563 				       pbl->pg_size == ROCE_PG_SIZE_8K ?
564 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
565 				       pbl->pg_size == ROCE_PG_SIZE_64K ?
566 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
567 				       pbl->pg_size == ROCE_PG_SIZE_2M ?
568 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
569 				       pbl->pg_size == ROCE_PG_SIZE_8M ?
570 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
571 				       pbl->pg_size == ROCE_PG_SIZE_1G ?
572 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
573 				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
574 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
575 	req.pd_id = cpu_to_le32(srq->pd->id);
576 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
577 
578 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
579 					  (void *)&resp, NULL, 0);
580 	if (rc)
581 		goto fail;
582 
583 	spin_lock_init(&srq->lock);
584 	srq->start_idx = 0;
585 	srq->last_idx = srq->hwq.max_elements - 1;
586 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
587 		srq->swq[idx].next_idx = idx + 1;
588 	srq->swq[srq->last_idx].next_idx = -1;
589 
590 	srq->id = le32_to_cpu(resp.xid);
591 	srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
592 	if (srq->threshold)
593 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
594 	srq->arm_req = false;
595 
596 	return 0;
597 fail:
598 	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
599 	kfree(srq->swq);
600 exit:
601 	return rc;
602 }
603 
604 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
605 			  struct bnxt_qplib_srq *srq)
606 {
607 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
608 	u32 sw_prod, sw_cons, count = 0;
609 
610 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
611 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
612 
613 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
614 				    srq_hwq->max_elements - sw_cons + sw_prod;
615 	if (count > srq->threshold) {
616 		srq->arm_req = false;
617 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
618 	} else {
619 		/* Deferred arming */
620 		srq->arm_req = true;
621 	}
622 
623 	return 0;
624 }
625 
626 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
627 			 struct bnxt_qplib_srq *srq)
628 {
629 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
630 	struct cmdq_query_srq req;
631 	struct creq_query_srq_resp resp;
632 	struct bnxt_qplib_rcfw_sbuf *sbuf;
633 	struct creq_query_srq_resp_sb *sb;
634 	u16 cmd_flags = 0;
635 	int rc = 0;
636 
637 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
638 	req.srq_cid = cpu_to_le32(srq->id);
639 
640 	/* Configure the request */
641 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
642 	if (!sbuf)
643 		return -ENOMEM;
644 	sb = sbuf->sb;
645 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
646 					  (void *)sbuf, 0);
647 	srq->threshold = le16_to_cpu(sb->srq_limit);
648 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
649 
650 	return rc;
651 }
652 
653 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
654 			     struct bnxt_qplib_swqe *wqe)
655 {
656 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
657 	struct rq_wqe *srqe, **srqe_ptr;
658 	struct sq_sge *hw_sge;
659 	u32 sw_prod, sw_cons, count = 0;
660 	int i, rc = 0, next;
661 
662 	spin_lock(&srq_hwq->lock);
663 	if (srq->start_idx == srq->last_idx) {
664 		dev_err(&srq_hwq->pdev->dev,
665 			"FP: SRQ (0x%x) is full!\n", srq->id);
666 		rc = -EINVAL;
667 		spin_unlock(&srq_hwq->lock);
668 		goto done;
669 	}
670 	next = srq->start_idx;
671 	srq->start_idx = srq->swq[next].next_idx;
672 	spin_unlock(&srq_hwq->lock);
673 
674 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
675 	srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
676 	srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
677 	memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
678 	/* Calculate wqe_size16 and data_len */
679 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
680 	     i < wqe->num_sge; i++, hw_sge++) {
681 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
682 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
683 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
684 	}
685 	srqe->wqe_type = wqe->type;
686 	srqe->flags = wqe->flags;
687 	srqe->wqe_size = wqe->num_sge +
688 			((offsetof(typeof(*srqe), data) + 15) >> 4);
689 	srqe->wr_id[0] = cpu_to_le32((u32)next);
690 	srq->swq[next].wr_id = wqe->wr_id;
691 
692 	srq_hwq->prod++;
693 
694 	spin_lock(&srq_hwq->lock);
695 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
696 	/* retaining srq_hwq->cons for this logic
697 	 * actually the lock is only required to
698 	 * read srq_hwq->cons.
699 	 */
700 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
701 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
702 				    srq_hwq->max_elements - sw_cons + sw_prod;
703 	spin_unlock(&srq_hwq->lock);
704 	/* Ring DB */
705 	bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
706 	if (srq->arm_req == true && count > srq->threshold) {
707 		srq->arm_req = false;
708 		bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
709 	}
710 done:
711 	return rc;
712 }
713 
714 /* QP */
715 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
716 {
717 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
718 	struct cmdq_create_qp1 req;
719 	struct creq_create_qp1_resp resp;
720 	struct bnxt_qplib_pbl *pbl;
721 	struct bnxt_qplib_q *sq = &qp->sq;
722 	struct bnxt_qplib_q *rq = &qp->rq;
723 	int rc;
724 	u16 cmd_flags = 0;
725 	u32 qp_flags = 0;
726 
727 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
728 
729 	/* General */
730 	req.type = qp->type;
731 	req.dpi = cpu_to_le32(qp->dpi->dpi);
732 	req.qp_handle = cpu_to_le64(qp->qp_handle);
733 
734 	/* SQ */
735 	sq->hwq.max_elements = sq->max_wqe;
736 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
737 				       &sq->hwq.max_elements,
738 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
739 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
740 	if (rc)
741 		goto exit;
742 
743 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
744 	if (!sq->swq) {
745 		rc = -ENOMEM;
746 		goto fail_sq;
747 	}
748 	pbl = &sq->hwq.pbl[PBL_LVL_0];
749 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
750 	req.sq_pg_size_sq_lvl =
751 		((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
752 				<<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
753 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
754 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
755 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
756 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
757 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
758 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
759 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
760 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
761 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
762 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
763 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
764 				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
765 		 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
766 
767 	if (qp->scq)
768 		req.scq_cid = cpu_to_le32(qp->scq->id);
769 
770 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
771 
772 	/* RQ */
773 	if (rq->max_wqe) {
774 		rq->hwq.max_elements = qp->rq.max_wqe;
775 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
776 					       &rq->hwq.max_elements,
777 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
778 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
779 		if (rc)
780 			goto fail_sq;
781 
782 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
783 				  GFP_KERNEL);
784 		if (!rq->swq) {
785 			rc = -ENOMEM;
786 			goto fail_rq;
787 		}
788 		pbl = &rq->hwq.pbl[PBL_LVL_0];
789 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
790 		req.rq_pg_size_rq_lvl =
791 			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
792 			 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
793 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
794 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
795 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
796 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
797 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
798 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
799 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
800 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
801 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
802 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
803 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
804 					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
805 				 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
806 		if (qp->rcq)
807 			req.rcq_cid = cpu_to_le32(qp->rcq->id);
808 	}
809 
810 	/* Header buffer - allow hdr_buf pass in */
811 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
812 	if (rc) {
813 		rc = -ENOMEM;
814 		goto fail;
815 	}
816 	req.qp_flags = cpu_to_le32(qp_flags);
817 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
818 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
819 
820 	req.sq_fwo_sq_sge =
821 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
822 			    CMDQ_CREATE_QP1_SQ_SGE_SFT);
823 	req.rq_fwo_rq_sge =
824 		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
825 			    CMDQ_CREATE_QP1_RQ_SGE_SFT);
826 
827 	req.pd_id = cpu_to_le32(qp->pd->id);
828 
829 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
830 					  (void *)&resp, NULL, 0);
831 	if (rc)
832 		goto fail;
833 
834 	qp->id = le32_to_cpu(resp.xid);
835 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
836 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
837 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
838 
839 	return 0;
840 
841 fail:
842 	bnxt_qplib_free_qp_hdr_buf(res, qp);
843 fail_rq:
844 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
845 	kfree(rq->swq);
846 fail_sq:
847 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
848 	kfree(sq->swq);
849 exit:
850 	return rc;
851 }
852 
853 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
854 {
855 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
856 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
857 	struct cmdq_create_qp req;
858 	struct creq_create_qp_resp resp;
859 	struct bnxt_qplib_pbl *pbl;
860 	struct sq_psn_search **psn_search_ptr;
861 	unsigned long int psn_search, poff = 0;
862 	struct bnxt_qplib_q *sq = &qp->sq;
863 	struct bnxt_qplib_q *rq = &qp->rq;
864 	struct bnxt_qplib_hwq *xrrq;
865 	int i, rc, req_size, psn_sz;
866 	u16 cmd_flags = 0, max_ssge;
867 	u32 sw_prod, qp_flags = 0;
868 
869 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
870 
871 	/* General */
872 	req.type = qp->type;
873 	req.dpi = cpu_to_le32(qp->dpi->dpi);
874 	req.qp_handle = cpu_to_le64(qp->qp_handle);
875 
876 	/* SQ */
877 	psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
878 		 sizeof(struct sq_psn_search) : 0;
879 	sq->hwq.max_elements = sq->max_wqe;
880 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
881 				       sq->nmap, &sq->hwq.max_elements,
882 				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
883 				       psn_sz,
884 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
885 	if (rc)
886 		goto exit;
887 
888 	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
889 	if (!sq->swq) {
890 		rc = -ENOMEM;
891 		goto fail_sq;
892 	}
893 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
894 	if (psn_sz) {
895 		psn_search_ptr = (struct sq_psn_search **)
896 				  &hw_sq_send_ptr[get_sqe_pg
897 					(sq->hwq.max_elements)];
898 		psn_search = (unsigned long int)
899 			      &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
900 			      [get_sqe_idx(sq->hwq.max_elements)];
901 		if (psn_search & ~PAGE_MASK) {
902 			/* If the psn_search does not start on a page boundary,
903 			 * then calculate the offset
904 			 */
905 			poff = (psn_search & ~PAGE_MASK) /
906 				BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
907 		}
908 		for (i = 0; i < sq->hwq.max_elements; i++)
909 			sq->swq[i].psn_search =
910 				&psn_search_ptr[get_psne_pg(i + poff)]
911 					       [get_psne_idx(i + poff)];
912 	}
913 	pbl = &sq->hwq.pbl[PBL_LVL_0];
914 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
915 	req.sq_pg_size_sq_lvl =
916 		((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
917 				 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
918 		(pbl->pg_size == ROCE_PG_SIZE_4K ?
919 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
920 		 pbl->pg_size == ROCE_PG_SIZE_8K ?
921 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
922 		 pbl->pg_size == ROCE_PG_SIZE_64K ?
923 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
924 		 pbl->pg_size == ROCE_PG_SIZE_2M ?
925 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
926 		 pbl->pg_size == ROCE_PG_SIZE_8M ?
927 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
928 		 pbl->pg_size == ROCE_PG_SIZE_1G ?
929 				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
930 		 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
931 
932 	/* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
933 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
934 	for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
935 		hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
936 						[get_sqe_idx(sw_prod)];
937 		hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
938 	}
939 
940 	if (qp->scq)
941 		req.scq_cid = cpu_to_le32(qp->scq->id);
942 
943 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
944 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
945 	if (qp->sig_type)
946 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
947 
948 	/* RQ */
949 	if (rq->max_wqe) {
950 		rq->hwq.max_elements = rq->max_wqe;
951 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
952 					       rq->nmap, &rq->hwq.max_elements,
953 					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
954 					       PAGE_SIZE, HWQ_TYPE_QUEUE);
955 		if (rc)
956 			goto fail_sq;
957 
958 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
959 				  GFP_KERNEL);
960 		if (!rq->swq) {
961 			rc = -ENOMEM;
962 			goto fail_rq;
963 		}
964 		pbl = &rq->hwq.pbl[PBL_LVL_0];
965 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
966 		req.rq_pg_size_rq_lvl =
967 			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
968 			 CMDQ_CREATE_QP_RQ_LVL_SFT) |
969 				(pbl->pg_size == ROCE_PG_SIZE_4K ?
970 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
971 				 pbl->pg_size == ROCE_PG_SIZE_8K ?
972 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
973 				 pbl->pg_size == ROCE_PG_SIZE_64K ?
974 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
975 				 pbl->pg_size == ROCE_PG_SIZE_2M ?
976 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
977 				 pbl->pg_size == ROCE_PG_SIZE_8M ?
978 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
979 				 pbl->pg_size == ROCE_PG_SIZE_1G ?
980 					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
981 				 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
982 	} else {
983 		/* SRQ */
984 		if (qp->srq) {
985 			qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
986 			req.srq_cid = cpu_to_le32(qp->srq->id);
987 		}
988 	}
989 
990 	if (qp->rcq)
991 		req.rcq_cid = cpu_to_le32(qp->rcq->id);
992 	req.qp_flags = cpu_to_le32(qp_flags);
993 	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
994 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
995 	qp->sq_hdr_buf = NULL;
996 	qp->rq_hdr_buf = NULL;
997 
998 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
999 	if (rc)
1000 		goto fail_rq;
1001 
1002 	/* CTRL-22434: Irrespective of the requested SGE count on the SQ
1003 	 * always create the QP with max send sges possible if the requested
1004 	 * inline size is greater than 0.
1005 	 */
1006 	max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1007 	req.sq_fwo_sq_sge = cpu_to_le16(
1008 				((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1009 				 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1010 	req.rq_fwo_rq_sge = cpu_to_le16(
1011 				((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1012 				 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1013 	/* ORRQ and IRRQ */
1014 	if (psn_sz) {
1015 		xrrq = &qp->orrq;
1016 		xrrq->max_elements =
1017 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1018 		req_size = xrrq->max_elements *
1019 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1020 		req_size &= ~(PAGE_SIZE - 1);
1021 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1022 					       &xrrq->max_elements,
1023 					       BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1024 					       0, req_size, HWQ_TYPE_CTX);
1025 		if (rc)
1026 			goto fail_buf_free;
1027 		pbl = &xrrq->pbl[PBL_LVL_0];
1028 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1029 
1030 		xrrq = &qp->irrq;
1031 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1032 						qp->max_dest_rd_atomic);
1033 		req_size = xrrq->max_elements *
1034 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1035 		req_size &= ~(PAGE_SIZE - 1);
1036 
1037 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1038 					       &xrrq->max_elements,
1039 					       BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1040 					       0, req_size, HWQ_TYPE_CTX);
1041 		if (rc)
1042 			goto fail_orrq;
1043 
1044 		pbl = &xrrq->pbl[PBL_LVL_0];
1045 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1046 	}
1047 	req.pd_id = cpu_to_le32(qp->pd->id);
1048 
1049 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1050 					  (void *)&resp, NULL, 0);
1051 	if (rc)
1052 		goto fail;
1053 
1054 	qp->id = le32_to_cpu(resp.xid);
1055 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1056 	INIT_LIST_HEAD(&qp->sq_flush);
1057 	INIT_LIST_HEAD(&qp->rq_flush);
1058 	rcfw->qp_tbl[qp->id].qp_id = qp->id;
1059 	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1060 
1061 	return 0;
1062 
1063 fail:
1064 	if (qp->irrq.max_elements)
1065 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1066 fail_orrq:
1067 	if (qp->orrq.max_elements)
1068 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1069 fail_buf_free:
1070 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1071 fail_rq:
1072 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1073 	kfree(rq->swq);
1074 fail_sq:
1075 	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1076 	kfree(sq->swq);
1077 exit:
1078 	return rc;
1079 }
1080 
1081 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1082 {
1083 	switch (qp->state) {
1084 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1085 		/* INIT->RTR, configure the path_mtu to the default
1086 		 * 2048 if not being requested
1087 		 */
1088 		if (!(qp->modify_flags &
1089 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1090 			qp->modify_flags |=
1091 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1092 			qp->path_mtu =
1093 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1094 		}
1095 		qp->modify_flags &=
1096 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1097 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1098 		if (qp->max_dest_rd_atomic < 1)
1099 			qp->max_dest_rd_atomic = 1;
1100 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1101 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1102 		if (!(qp->modify_flags &
1103 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1104 			qp->modify_flags |=
1105 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1106 			qp->ah.sgid_index = 0;
1107 		}
1108 		break;
1109 	default:
1110 		break;
1111 	}
1112 }
1113 
1114 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1115 {
1116 	switch (qp->state) {
1117 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1118 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1119 		if (qp->max_rd_atomic < 1)
1120 			qp->max_rd_atomic = 1;
1121 		/* Bono FW does not allow PKEY_INDEX,
1122 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1123 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1124 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1125 		 * modification
1126 		 */
1127 		qp->modify_flags &=
1128 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1129 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1130 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1131 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1132 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1133 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1134 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1135 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1136 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1137 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1138 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1139 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1140 		break;
1141 	default:
1142 		break;
1143 	}
1144 }
1145 
1146 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1147 {
1148 	switch (qp->cur_qp_state) {
1149 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1150 		break;
1151 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1152 		__modify_flags_from_init_state(qp);
1153 		break;
1154 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1155 		__modify_flags_from_rtr_state(qp);
1156 		break;
1157 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1158 		break;
1159 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1160 		break;
1161 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1162 		break;
1163 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1164 		break;
1165 	default:
1166 		break;
1167 	}
1168 }
1169 
1170 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1171 {
1172 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1173 	struct cmdq_modify_qp req;
1174 	struct creq_modify_qp_resp resp;
1175 	u16 cmd_flags = 0, pkey;
1176 	u32 temp32[4];
1177 	u32 bmask;
1178 	int rc;
1179 
1180 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1181 
1182 	/* Filter out the qp_attr_mask based on the state->new transition */
1183 	__filter_modify_flags(qp);
1184 	bmask = qp->modify_flags;
1185 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1186 	req.qp_cid = cpu_to_le32(qp->id);
1187 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1188 		req.network_type_en_sqd_async_notify_new_state =
1189 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1190 				(qp->en_sqd_async_notify ?
1191 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1192 	}
1193 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1194 
1195 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1196 		req.access = qp->access;
1197 
1198 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1199 		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1200 					 qp->pkey_index, &pkey))
1201 			req.pkey = cpu_to_le16(pkey);
1202 	}
1203 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1204 		req.qkey = cpu_to_le32(qp->qkey);
1205 
1206 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1207 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1208 		req.dgid[0] = cpu_to_le32(temp32[0]);
1209 		req.dgid[1] = cpu_to_le32(temp32[1]);
1210 		req.dgid[2] = cpu_to_le32(temp32[2]);
1211 		req.dgid[3] = cpu_to_le32(temp32[3]);
1212 	}
1213 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1214 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1215 
1216 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1217 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1218 					     [qp->ah.sgid_index]);
1219 
1220 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1221 		req.hop_limit = qp->ah.hop_limit;
1222 
1223 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1224 		req.traffic_class = qp->ah.traffic_class;
1225 
1226 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1227 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1228 
1229 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1230 		req.path_mtu = qp->path_mtu;
1231 
1232 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1233 		req.timeout = qp->timeout;
1234 
1235 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1236 		req.retry_cnt = qp->retry_cnt;
1237 
1238 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1239 		req.rnr_retry = qp->rnr_retry;
1240 
1241 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1242 		req.min_rnr_timer = qp->min_rnr_timer;
1243 
1244 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1245 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1246 
1247 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1248 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1249 
1250 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1251 		req.max_rd_atomic =
1252 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1253 
1254 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1255 		req.max_dest_rd_atomic =
1256 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1257 
1258 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1259 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1260 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1261 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1262 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1263 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1264 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1265 
1266 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1267 
1268 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1269 					  (void *)&resp, NULL, 0);
1270 	if (rc)
1271 		return rc;
1272 	qp->cur_qp_state = qp->state;
1273 	return 0;
1274 }
1275 
1276 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1277 {
1278 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1279 	struct cmdq_query_qp req;
1280 	struct creq_query_qp_resp resp;
1281 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1282 	struct creq_query_qp_resp_sb *sb;
1283 	u16 cmd_flags = 0;
1284 	u32 temp32[4];
1285 	int i, rc = 0;
1286 
1287 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1288 
1289 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1290 	if (!sbuf)
1291 		return -ENOMEM;
1292 	sb = sbuf->sb;
1293 
1294 	req.qp_cid = cpu_to_le32(qp->id);
1295 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1296 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1297 					  (void *)sbuf, 0);
1298 	if (rc)
1299 		goto bail;
1300 	/* Extract the context from the side buffer */
1301 	qp->state = sb->en_sqd_async_notify_state &
1302 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1303 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1304 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1305 				  true : false;
1306 	qp->access = sb->access;
1307 	qp->pkey_index = le16_to_cpu(sb->pkey);
1308 	qp->qkey = le32_to_cpu(sb->qkey);
1309 
1310 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1311 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1312 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1313 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1314 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1315 
1316 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1317 
1318 	qp->ah.sgid_index = 0;
1319 	for (i = 0; i < res->sgid_tbl.max; i++) {
1320 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1321 			qp->ah.sgid_index = i;
1322 			break;
1323 		}
1324 	}
1325 	if (i == res->sgid_tbl.max)
1326 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1327 
1328 	qp->ah.hop_limit = sb->hop_limit;
1329 	qp->ah.traffic_class = sb->traffic_class;
1330 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1331 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1332 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1333 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1334 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1335 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1336 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1337 	qp->timeout = sb->timeout;
1338 	qp->retry_cnt = sb->retry_cnt;
1339 	qp->rnr_retry = sb->rnr_retry;
1340 	qp->min_rnr_timer = sb->min_rnr_timer;
1341 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1342 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1343 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1344 	qp->max_dest_rd_atomic =
1345 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1346 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1347 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1348 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1349 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1350 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1351 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1352 	memcpy(qp->smac, sb->src_mac, 6);
1353 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1354 bail:
1355 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1356 	return rc;
1357 }
1358 
1359 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1360 {
1361 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1362 	struct cq_base *hw_cqe, **hw_cqe_ptr;
1363 	int i;
1364 
1365 	for (i = 0; i < cq_hwq->max_elements; i++) {
1366 		hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1367 		hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1368 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1369 			continue;
1370 		/*
1371 		 * The valid test of the entry must be done first before
1372 		 * reading any further.
1373 		 */
1374 		dma_rmb();
1375 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1376 		case CQ_BASE_CQE_TYPE_REQ:
1377 		case CQ_BASE_CQE_TYPE_TERMINAL:
1378 		{
1379 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1380 
1381 			if (qp == le64_to_cpu(cqe->qp_handle))
1382 				cqe->qp_handle = 0;
1383 			break;
1384 		}
1385 		case CQ_BASE_CQE_TYPE_RES_RC:
1386 		case CQ_BASE_CQE_TYPE_RES_UD:
1387 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1388 		{
1389 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1390 
1391 			if (qp == le64_to_cpu(cqe->qp_handle))
1392 				cqe->qp_handle = 0;
1393 			break;
1394 		}
1395 		default:
1396 			break;
1397 		}
1398 	}
1399 }
1400 
1401 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1402 			  struct bnxt_qplib_qp *qp)
1403 {
1404 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1405 	struct cmdq_destroy_qp req;
1406 	struct creq_destroy_qp_resp resp;
1407 	u16 cmd_flags = 0;
1408 	int rc;
1409 
1410 	rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1411 	rcfw->qp_tbl[qp->id].qp_handle = NULL;
1412 
1413 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1414 
1415 	req.qp_cid = cpu_to_le32(qp->id);
1416 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1417 					  (void *)&resp, NULL, 0);
1418 	if (rc) {
1419 		rcfw->qp_tbl[qp->id].qp_id = qp->id;
1420 		rcfw->qp_tbl[qp->id].qp_handle = qp;
1421 		return rc;
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1428 			    struct bnxt_qplib_qp *qp)
1429 {
1430 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1431 	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1432 	kfree(qp->sq.swq);
1433 
1434 	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1435 	kfree(qp->rq.swq);
1436 
1437 	if (qp->irrq.max_elements)
1438 		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1439 	if (qp->orrq.max_elements)
1440 		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1441 
1442 }
1443 
1444 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1445 				struct bnxt_qplib_sge *sge)
1446 {
1447 	struct bnxt_qplib_q *sq = &qp->sq;
1448 	u32 sw_prod;
1449 
1450 	memset(sge, 0, sizeof(*sge));
1451 
1452 	if (qp->sq_hdr_buf) {
1453 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1454 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1455 					 sw_prod * qp->sq_hdr_buf_size);
1456 		sge->lkey = 0xFFFFFFFF;
1457 		sge->size = qp->sq_hdr_buf_size;
1458 		return qp->sq_hdr_buf + sw_prod * sge->size;
1459 	}
1460 	return NULL;
1461 }
1462 
1463 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1464 {
1465 	struct bnxt_qplib_q *rq = &qp->rq;
1466 
1467 	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1468 }
1469 
1470 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1471 {
1472 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1473 }
1474 
1475 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1476 				struct bnxt_qplib_sge *sge)
1477 {
1478 	struct bnxt_qplib_q *rq = &qp->rq;
1479 	u32 sw_prod;
1480 
1481 	memset(sge, 0, sizeof(*sge));
1482 
1483 	if (qp->rq_hdr_buf) {
1484 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1485 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1486 					 sw_prod * qp->rq_hdr_buf_size);
1487 		sge->lkey = 0xFFFFFFFF;
1488 		sge->size = qp->rq_hdr_buf_size;
1489 		return qp->rq_hdr_buf + sw_prod * sge->size;
1490 	}
1491 	return NULL;
1492 }
1493 
1494 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1495 {
1496 	struct bnxt_qplib_q *sq = &qp->sq;
1497 	struct dbr_dbr db_msg = { 0 };
1498 	u32 sw_prod;
1499 
1500 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1501 
1502 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1503 				   DBR_DBR_INDEX_MASK);
1504 	db_msg.type_xid =
1505 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1506 			    DBR_DBR_TYPE_SQ);
1507 	/* Flush all the WQE writes to HW */
1508 	wmb();
1509 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1510 }
1511 
1512 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1513 			 struct bnxt_qplib_swqe *wqe)
1514 {
1515 	struct bnxt_qplib_q *sq = &qp->sq;
1516 	struct bnxt_qplib_swq *swq;
1517 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1518 	struct sq_sge *hw_sge;
1519 	struct bnxt_qplib_nq_work *nq_work = NULL;
1520 	bool sch_handler = false;
1521 	u32 sw_prod;
1522 	u8 wqe_size16;
1523 	int i, rc = 0, data_len = 0, pkt_num = 0;
1524 	__le32 temp32;
1525 
1526 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1527 		if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1528 			sch_handler = true;
1529 			dev_dbg(&sq->hwq.pdev->dev,
1530 				"%s Error QP. Scheduling for poll_cq\n",
1531 				__func__);
1532 			goto queue_err;
1533 		}
1534 	}
1535 
1536 	if (bnxt_qplib_queue_full(sq)) {
1537 		dev_err(&sq->hwq.pdev->dev,
1538 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1539 			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1540 			sq->q_full_delta);
1541 		rc = -ENOMEM;
1542 		goto done;
1543 	}
1544 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1545 	swq = &sq->swq[sw_prod];
1546 	swq->wr_id = wqe->wr_id;
1547 	swq->type = wqe->type;
1548 	swq->flags = wqe->flags;
1549 	if (qp->sig_type)
1550 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1551 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1552 
1553 	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1554 	hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1555 					[get_sqe_idx(sw_prod)];
1556 
1557 	memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1558 
1559 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1560 		/* Copy the inline data */
1561 		if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1562 			dev_warn(&sq->hwq.pdev->dev,
1563 				 "Inline data length > 96 detected\n");
1564 			data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1565 		} else {
1566 			data_len = wqe->inline_len;
1567 		}
1568 		memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1569 		wqe_size16 = (data_len + 15) >> 4;
1570 	} else {
1571 		for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1572 		     i < wqe->num_sge; i++, hw_sge++) {
1573 			hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1574 			hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1575 			hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1576 			data_len += wqe->sg_list[i].size;
1577 		}
1578 		/* Each SGE entry = 1 WQE size16 */
1579 		wqe_size16 = wqe->num_sge;
1580 		/* HW requires wqe size has room for atleast one SGE even if
1581 		 * none was supplied by ULP
1582 		 */
1583 		if (!wqe->num_sge)
1584 			wqe_size16++;
1585 	}
1586 
1587 	/* Specifics */
1588 	switch (wqe->type) {
1589 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1590 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1591 			/* Assemble info for Raw Ethertype QPs */
1592 			struct sq_send_raweth_qp1 *sqe =
1593 				(struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1594 
1595 			sqe->wqe_type = wqe->type;
1596 			sqe->flags = wqe->flags;
1597 			sqe->wqe_size = wqe_size16 +
1598 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1599 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1600 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1601 			sqe->length = cpu_to_le32(data_len);
1602 			sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1603 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1604 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1605 
1606 			break;
1607 		}
1608 		/* fall thru */
1609 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1610 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1611 	{
1612 		struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1613 
1614 		sqe->wqe_type = wqe->type;
1615 		sqe->flags = wqe->flags;
1616 		sqe->wqe_size = wqe_size16 +
1617 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1618 		sqe->inv_key_or_imm_data = cpu_to_le32(
1619 						wqe->send.inv_key);
1620 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1621 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1622 			sqe->dst_qp = cpu_to_le32(
1623 					wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1624 			sqe->length = cpu_to_le32(data_len);
1625 			sqe->avid = cpu_to_le32(wqe->send.avid &
1626 						SQ_SEND_AVID_MASK);
1627 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1628 		} else {
1629 			sqe->length = cpu_to_le32(data_len);
1630 			sqe->dst_qp = 0;
1631 			sqe->avid = 0;
1632 			if (qp->mtu)
1633 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1634 			if (!pkt_num)
1635 				pkt_num = 1;
1636 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1637 		}
1638 		break;
1639 	}
1640 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1641 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1642 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1643 	{
1644 		struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1645 
1646 		sqe->wqe_type = wqe->type;
1647 		sqe->flags = wqe->flags;
1648 		sqe->wqe_size = wqe_size16 +
1649 				((offsetof(typeof(*sqe), data) + 15) >> 4);
1650 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1651 		sqe->length = cpu_to_le32((u32)data_len);
1652 		sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1653 		sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1654 		if (qp->mtu)
1655 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1656 		if (!pkt_num)
1657 			pkt_num = 1;
1658 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1659 		break;
1660 	}
1661 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1662 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1663 	{
1664 		struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1665 
1666 		sqe->wqe_type = wqe->type;
1667 		sqe->flags = wqe->flags;
1668 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1669 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1670 		sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1671 		sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1672 		if (qp->mtu)
1673 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1674 		if (!pkt_num)
1675 			pkt_num = 1;
1676 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1677 		break;
1678 	}
1679 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1680 	{
1681 		struct sq_localinvalidate *sqe =
1682 				(struct sq_localinvalidate *)hw_sq_send_hdr;
1683 
1684 		sqe->wqe_type = wqe->type;
1685 		sqe->flags = wqe->flags;
1686 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1687 
1688 		break;
1689 	}
1690 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1691 	{
1692 		struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1693 
1694 		sqe->wqe_type = wqe->type;
1695 		sqe->flags = wqe->flags;
1696 		sqe->access_cntl = wqe->frmr.access_cntl |
1697 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1698 		sqe->zero_based_page_size_log =
1699 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1700 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1701 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1702 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1703 		temp32 = cpu_to_le32(wqe->frmr.length);
1704 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1705 		sqe->numlevels_pbl_page_size_log =
1706 			((wqe->frmr.pbl_pg_sz_log <<
1707 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1708 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1709 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1710 					SQ_FR_PMR_NUMLEVELS_MASK);
1711 
1712 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1713 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1714 						wqe->frmr.page_list[i] |
1715 						PTU_PTE_VALID);
1716 		sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1717 		sqe->va = cpu_to_le64(wqe->frmr.va);
1718 
1719 		break;
1720 	}
1721 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1722 	{
1723 		struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1724 
1725 		sqe->wqe_type = wqe->type;
1726 		sqe->flags = wqe->flags;
1727 		sqe->access_cntl = wqe->bind.access_cntl;
1728 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1729 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1730 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1731 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1732 		sqe->va = cpu_to_le64(wqe->bind.va);
1733 		temp32 = cpu_to_le32(wqe->bind.length);
1734 		memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1735 		break;
1736 	}
1737 	default:
1738 		/* Bad wqe, return error */
1739 		rc = -EINVAL;
1740 		goto done;
1741 	}
1742 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1743 	if (swq->psn_search) {
1744 		swq->psn_search->opcode_start_psn = cpu_to_le32(
1745 			((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1746 			 SQ_PSN_SEARCH_START_PSN_MASK) |
1747 			((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1748 			 SQ_PSN_SEARCH_OPCODE_MASK));
1749 		swq->psn_search->flags_next_psn = cpu_to_le32(
1750 			((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1751 			 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1752 	}
1753 queue_err:
1754 	if (sch_handler) {
1755 		/* Store the ULP info in the software structures */
1756 		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1757 		swq = &sq->swq[sw_prod];
1758 		swq->wr_id = wqe->wr_id;
1759 		swq->type = wqe->type;
1760 		swq->flags = wqe->flags;
1761 		if (qp->sig_type)
1762 			swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1763 		swq->start_psn = sq->psn & BTH_PSN_MASK;
1764 	}
1765 	sq->hwq.prod++;
1766 	qp->wqe_cnt++;
1767 
1768 done:
1769 	if (sch_handler) {
1770 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1771 		if (nq_work) {
1772 			nq_work->cq = qp->scq;
1773 			nq_work->nq = qp->scq->nq;
1774 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1775 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1776 		} else {
1777 			dev_err(&sq->hwq.pdev->dev,
1778 				"FP: Failed to allocate SQ nq_work!\n");
1779 			rc = -ENOMEM;
1780 		}
1781 	}
1782 	return rc;
1783 }
1784 
1785 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1786 {
1787 	struct bnxt_qplib_q *rq = &qp->rq;
1788 	struct dbr_dbr db_msg = { 0 };
1789 	u32 sw_prod;
1790 
1791 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1792 	db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1793 				   DBR_DBR_INDEX_MASK);
1794 	db_msg.type_xid =
1795 		cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1796 			    DBR_DBR_TYPE_RQ);
1797 
1798 	/* Flush the writes to HW Rx WQE before the ringing Rx DB */
1799 	wmb();
1800 	__iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1801 }
1802 
1803 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1804 			 struct bnxt_qplib_swqe *wqe)
1805 {
1806 	struct bnxt_qplib_q *rq = &qp->rq;
1807 	struct rq_wqe *rqe, **rqe_ptr;
1808 	struct sq_sge *hw_sge;
1809 	struct bnxt_qplib_nq_work *nq_work = NULL;
1810 	bool sch_handler = false;
1811 	u32 sw_prod;
1812 	int i, rc = 0;
1813 
1814 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1815 		sch_handler = true;
1816 		dev_dbg(&rq->hwq.pdev->dev,
1817 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
1818 		goto queue_err;
1819 	}
1820 	if (bnxt_qplib_queue_full(rq)) {
1821 		dev_err(&rq->hwq.pdev->dev,
1822 			"FP: QP (0x%x) RQ is full!\n", qp->id);
1823 		rc = -EINVAL;
1824 		goto done;
1825 	}
1826 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1827 	rq->swq[sw_prod].wr_id = wqe->wr_id;
1828 
1829 	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1830 	rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1831 
1832 	memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1833 
1834 	/* Calculate wqe_size16 and data_len */
1835 	for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1836 	     i < wqe->num_sge; i++, hw_sge++) {
1837 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1838 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1839 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1840 	}
1841 	rqe->wqe_type = wqe->type;
1842 	rqe->flags = wqe->flags;
1843 	rqe->wqe_size = wqe->num_sge +
1844 			((offsetof(typeof(*rqe), data) + 15) >> 4);
1845 	/* HW requires wqe size has room for atleast one SGE even if none
1846 	 * was supplied by ULP
1847 	 */
1848 	if (!wqe->num_sge)
1849 		rqe->wqe_size++;
1850 
1851 	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
1852 	rqe->wr_id[0] = cpu_to_le32(sw_prod);
1853 
1854 queue_err:
1855 	if (sch_handler) {
1856 		/* Store the ULP info in the software structures */
1857 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1858 		rq->swq[sw_prod].wr_id = wqe->wr_id;
1859 	}
1860 
1861 	rq->hwq.prod++;
1862 	if (sch_handler) {
1863 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1864 		if (nq_work) {
1865 			nq_work->cq = qp->rcq;
1866 			nq_work->nq = qp->rcq->nq;
1867 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1868 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1869 		} else {
1870 			dev_err(&rq->hwq.pdev->dev,
1871 				"FP: Failed to allocate RQ nq_work!\n");
1872 			rc = -ENOMEM;
1873 		}
1874 	}
1875 done:
1876 	return rc;
1877 }
1878 
1879 /* CQ */
1880 
1881 /* Spinlock must be held */
1882 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1883 {
1884 	struct dbr_dbr db_msg = { 0 };
1885 
1886 	db_msg.type_xid =
1887 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1888 			    DBR_DBR_TYPE_CQ_ARMENA);
1889 	/* Flush memory writes before enabling the CQ */
1890 	wmb();
1891 	__iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1892 }
1893 
1894 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1895 {
1896 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1897 	struct dbr_dbr db_msg = { 0 };
1898 	u32 sw_cons;
1899 
1900 	/* Ring DB */
1901 	sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1902 	db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1903 				    DBR_DBR_INDEX_MASK);
1904 	db_msg.type_xid =
1905 		cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1906 			    arm_type);
1907 	/* flush memory writes before arming the CQ */
1908 	wmb();
1909 	__iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1910 }
1911 
1912 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1913 {
1914 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1915 	struct cmdq_create_cq req;
1916 	struct creq_create_cq_resp resp;
1917 	struct bnxt_qplib_pbl *pbl;
1918 	u16 cmd_flags = 0;
1919 	int rc;
1920 
1921 	cq->hwq.max_elements = cq->max_wqe;
1922 	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1923 				       cq->nmap, &cq->hwq.max_elements,
1924 				       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1925 				       PAGE_SIZE, HWQ_TYPE_QUEUE);
1926 	if (rc)
1927 		goto exit;
1928 
1929 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1930 
1931 	if (!cq->dpi) {
1932 		dev_err(&rcfw->pdev->dev,
1933 			"FP: CREATE_CQ failed due to NULL DPI\n");
1934 		return -EINVAL;
1935 	}
1936 	req.dpi = cpu_to_le32(cq->dpi->dpi);
1937 	req.cq_handle = cpu_to_le64(cq->cq_handle);
1938 
1939 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1940 	pbl = &cq->hwq.pbl[PBL_LVL_0];
1941 	req.pg_size_lvl = cpu_to_le32(
1942 	    ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1943 						CMDQ_CREATE_CQ_LVL_SFT) |
1944 	    (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1945 	     pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1946 	     pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1947 	     pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1948 	     pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1949 	     pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1950 	     CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1951 
1952 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1953 
1954 	req.cq_fco_cnq_id = cpu_to_le32(
1955 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1956 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1957 
1958 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1959 					  (void *)&resp, NULL, 0);
1960 	if (rc)
1961 		goto fail;
1962 
1963 	cq->id = le32_to_cpu(resp.xid);
1964 	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1965 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1966 	init_waitqueue_head(&cq->waitq);
1967 	INIT_LIST_HEAD(&cq->sqf_head);
1968 	INIT_LIST_HEAD(&cq->rqf_head);
1969 	spin_lock_init(&cq->compl_lock);
1970 	spin_lock_init(&cq->flush_lock);
1971 
1972 	bnxt_qplib_arm_cq_enable(cq);
1973 	return 0;
1974 
1975 fail:
1976 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1977 exit:
1978 	return rc;
1979 }
1980 
1981 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1982 {
1983 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1984 	struct cmdq_destroy_cq req;
1985 	struct creq_destroy_cq_resp resp;
1986 	u16 cmd_flags = 0;
1987 	int rc;
1988 
1989 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1990 
1991 	req.cq_cid = cpu_to_le32(cq->id);
1992 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1993 					  (void *)&resp, NULL, 0);
1994 	if (rc)
1995 		return rc;
1996 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1997 	return 0;
1998 }
1999 
2000 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2001 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2002 {
2003 	u32 sw_prod, sw_cons;
2004 	struct bnxt_qplib_cqe *cqe;
2005 	int rc = 0;
2006 
2007 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2008 	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2009 	cqe = *pcqe;
2010 	while (*budget) {
2011 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2012 		if (sw_cons == sw_prod) {
2013 			break;
2014 		}
2015 		/* Skip the FENCE WQE completions */
2016 		if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2017 			bnxt_qplib_cancel_phantom_processing(qp);
2018 			goto skip_compl;
2019 		}
2020 		memset(cqe, 0, sizeof(*cqe));
2021 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2022 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2023 		cqe->qp_handle = (u64)(unsigned long)qp;
2024 		cqe->wr_id = sq->swq[sw_cons].wr_id;
2025 		cqe->src_qp = qp->id;
2026 		cqe->type = sq->swq[sw_cons].type;
2027 		cqe++;
2028 		(*budget)--;
2029 skip_compl:
2030 		sq->hwq.cons++;
2031 	}
2032 	*pcqe = cqe;
2033 	if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2034 		/* Out of budget */
2035 		rc = -EAGAIN;
2036 
2037 	return rc;
2038 }
2039 
2040 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2041 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2042 {
2043 	struct bnxt_qplib_cqe *cqe;
2044 	u32 sw_prod, sw_cons;
2045 	int rc = 0;
2046 	int opcode = 0;
2047 
2048 	switch (qp->type) {
2049 	case CMDQ_CREATE_QP1_TYPE_GSI:
2050 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2051 		break;
2052 	case CMDQ_CREATE_QP_TYPE_RC:
2053 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2054 		break;
2055 	case CMDQ_CREATE_QP_TYPE_UD:
2056 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2057 		break;
2058 	}
2059 
2060 	/* Flush the rest of the RQ */
2061 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2062 	cqe = *pcqe;
2063 	while (*budget) {
2064 		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2065 		if (sw_cons == sw_prod)
2066 			break;
2067 		memset(cqe, 0, sizeof(*cqe));
2068 		cqe->status =
2069 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2070 		cqe->opcode = opcode;
2071 		cqe->qp_handle = (unsigned long)qp;
2072 		cqe->wr_id = rq->swq[sw_cons].wr_id;
2073 		cqe++;
2074 		(*budget)--;
2075 		rq->hwq.cons++;
2076 	}
2077 	*pcqe = cqe;
2078 	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2079 		/* Out of budget */
2080 		rc = -EAGAIN;
2081 
2082 	return rc;
2083 }
2084 
2085 void bnxt_qplib_mark_qp_error(void *qp_handle)
2086 {
2087 	struct bnxt_qplib_qp *qp = qp_handle;
2088 
2089 	if (!qp)
2090 		return;
2091 
2092 	/* Must block new posting of SQ and RQ */
2093 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2094 	bnxt_qplib_cancel_phantom_processing(qp);
2095 }
2096 
2097 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2098  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2099  */
2100 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2101 		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2102 {
2103 	struct bnxt_qplib_q *sq = &qp->sq;
2104 	struct bnxt_qplib_swq *swq;
2105 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2106 	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2107 	struct cq_req *peek_req_hwcqe;
2108 	struct bnxt_qplib_qp *peek_qp;
2109 	struct bnxt_qplib_q *peek_sq;
2110 	int i, rc = 0;
2111 
2112 	/* Normal mode */
2113 	/* Check for the psn_search marking before completing */
2114 	swq = &sq->swq[sw_sq_cons];
2115 	if (swq->psn_search &&
2116 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2117 		/* Unmark */
2118 		swq->psn_search->flags_next_psn = cpu_to_le32
2119 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2120 				     & ~0x80000000);
2121 		dev_dbg(&cq->hwq.pdev->dev,
2122 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2123 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2124 		sq->condition = true;
2125 		sq->send_phantom = true;
2126 
2127 		/* TODO: Only ARM if the previous SQE is ARMALL */
2128 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
2129 
2130 		rc = -EAGAIN;
2131 		goto out;
2132 	}
2133 	if (sq->condition) {
2134 		/* Peek at the completions */
2135 		peek_raw_cq_cons = cq->hwq.cons;
2136 		peek_sw_cq_cons = cq_cons;
2137 		i = cq->hwq.max_elements;
2138 		while (i--) {
2139 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2140 			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2141 			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2142 						     [CQE_IDX(peek_sw_cq_cons)];
2143 			/* If the next hwcqe is VALID */
2144 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2145 					  cq->hwq.max_elements)) {
2146 			/*
2147 			 * The valid test of the entry must be done first before
2148 			 * reading any further.
2149 			 */
2150 				dma_rmb();
2151 				/* If the next hwcqe is a REQ */
2152 				if ((peek_hwcqe->cqe_type_toggle &
2153 				    CQ_BASE_CQE_TYPE_MASK) ==
2154 				    CQ_BASE_CQE_TYPE_REQ) {
2155 					peek_req_hwcqe = (struct cq_req *)
2156 							 peek_hwcqe;
2157 					peek_qp = (struct bnxt_qplib_qp *)
2158 						((unsigned long)
2159 						 le64_to_cpu
2160 						 (peek_req_hwcqe->qp_handle));
2161 					peek_sq = &peek_qp->sq;
2162 					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2163 						peek_req_hwcqe->sq_cons_idx) - 1
2164 						, &sq->hwq);
2165 					/* If the hwcqe's sq's wr_id matches */
2166 					if (peek_sq == sq &&
2167 					    sq->swq[peek_sq_cons_idx].wr_id ==
2168 					    BNXT_QPLIB_FENCE_WRID) {
2169 						/*
2170 						 *  Unbreak only if the phantom
2171 						 *  comes back
2172 						 */
2173 						dev_dbg(&cq->hwq.pdev->dev,
2174 							"FP: Got Phantom CQE\n");
2175 						sq->condition = false;
2176 						sq->single = true;
2177 						rc = 0;
2178 						goto out;
2179 					}
2180 				}
2181 				/* Valid but not the phantom, so keep looping */
2182 			} else {
2183 				/* Not valid yet, just exit and wait */
2184 				rc = -EINVAL;
2185 				goto out;
2186 			}
2187 			peek_sw_cq_cons++;
2188 			peek_raw_cq_cons++;
2189 		}
2190 		dev_err(&cq->hwq.pdev->dev,
2191 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2192 			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2193 		rc = -EINVAL;
2194 	}
2195 out:
2196 	return rc;
2197 }
2198 
2199 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2200 				     struct cq_req *hwcqe,
2201 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2202 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2203 {
2204 	struct bnxt_qplib_qp *qp;
2205 	struct bnxt_qplib_q *sq;
2206 	struct bnxt_qplib_cqe *cqe;
2207 	u32 sw_sq_cons, cqe_sq_cons;
2208 	struct bnxt_qplib_swq *swq;
2209 	int rc = 0;
2210 
2211 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2212 				      le64_to_cpu(hwcqe->qp_handle));
2213 	if (!qp) {
2214 		dev_err(&cq->hwq.pdev->dev,
2215 			"FP: Process Req qp is NULL\n");
2216 		return -EINVAL;
2217 	}
2218 	sq = &qp->sq;
2219 
2220 	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2221 	if (cqe_sq_cons > sq->hwq.max_elements) {
2222 		dev_err(&cq->hwq.pdev->dev,
2223 			"FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2224 			cqe_sq_cons, sq->hwq.max_elements);
2225 		return -EINVAL;
2226 	}
2227 
2228 	if (qp->sq.flushed) {
2229 		dev_dbg(&cq->hwq.pdev->dev,
2230 			"%s: QP in Flush QP = %p\n", __func__, qp);
2231 		goto done;
2232 	}
2233 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2234 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2235 	 * to the cqe_sq_cons
2236 	 */
2237 	cqe = *pcqe;
2238 	while (*budget) {
2239 		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2240 		if (sw_sq_cons == cqe_sq_cons)
2241 			/* Done */
2242 			break;
2243 
2244 		swq = &sq->swq[sw_sq_cons];
2245 		memset(cqe, 0, sizeof(*cqe));
2246 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2247 		cqe->qp_handle = (u64)(unsigned long)qp;
2248 		cqe->src_qp = qp->id;
2249 		cqe->wr_id = swq->wr_id;
2250 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2251 			goto skip;
2252 		cqe->type = swq->type;
2253 
2254 		/* For the last CQE, check for status.  For errors, regardless
2255 		 * of the request being signaled or not, it must complete with
2256 		 * the hwcqe error status
2257 		 */
2258 		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2259 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2260 			cqe->status = hwcqe->status;
2261 			dev_err(&cq->hwq.pdev->dev,
2262 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2263 				sw_sq_cons, cqe->wr_id, cqe->status);
2264 			cqe++;
2265 			(*budget)--;
2266 			bnxt_qplib_mark_qp_error(qp);
2267 			/* Add qp to flush list of the CQ */
2268 			bnxt_qplib_add_flush_qp(qp);
2269 		} else {
2270 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2271 				/* Before we complete, do WA 9060 */
2272 				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2273 					      cqe_sq_cons)) {
2274 					*lib_qp = qp;
2275 					goto out;
2276 				}
2277 				cqe->status = CQ_REQ_STATUS_OK;
2278 				cqe++;
2279 				(*budget)--;
2280 			}
2281 		}
2282 skip:
2283 		sq->hwq.cons++;
2284 		if (sq->single)
2285 			break;
2286 	}
2287 out:
2288 	*pcqe = cqe;
2289 	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2290 		/* Out of budget */
2291 		rc = -EAGAIN;
2292 		goto done;
2293 	}
2294 	/*
2295 	 * Back to normal completion mode only after it has completed all of
2296 	 * the WC for this CQE
2297 	 */
2298 	sq->single = false;
2299 done:
2300 	return rc;
2301 }
2302 
2303 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2304 {
2305 	spin_lock(&srq->hwq.lock);
2306 	srq->swq[srq->last_idx].next_idx = (int)tag;
2307 	srq->last_idx = (int)tag;
2308 	srq->swq[srq->last_idx].next_idx = -1;
2309 	srq->hwq.cons++; /* Support for SRQE counter */
2310 	spin_unlock(&srq->hwq.lock);
2311 }
2312 
2313 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2314 					struct cq_res_rc *hwcqe,
2315 					struct bnxt_qplib_cqe **pcqe,
2316 					int *budget)
2317 {
2318 	struct bnxt_qplib_qp *qp;
2319 	struct bnxt_qplib_q *rq;
2320 	struct bnxt_qplib_srq *srq;
2321 	struct bnxt_qplib_cqe *cqe;
2322 	u32 wr_id_idx;
2323 	int rc = 0;
2324 
2325 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2326 				      le64_to_cpu(hwcqe->qp_handle));
2327 	if (!qp) {
2328 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2329 		return -EINVAL;
2330 	}
2331 	if (qp->rq.flushed) {
2332 		dev_dbg(&cq->hwq.pdev->dev,
2333 			"%s: QP in Flush QP = %p\n", __func__, qp);
2334 		goto done;
2335 	}
2336 
2337 	cqe = *pcqe;
2338 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2339 	cqe->length = le32_to_cpu(hwcqe->length);
2340 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2341 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2342 	cqe->flags = le16_to_cpu(hwcqe->flags);
2343 	cqe->status = hwcqe->status;
2344 	cqe->qp_handle = (u64)(unsigned long)qp;
2345 
2346 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2347 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2348 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2349 		srq = qp->srq;
2350 		if (!srq)
2351 			return -EINVAL;
2352 		if (wr_id_idx >= srq->hwq.max_elements) {
2353 			dev_err(&cq->hwq.pdev->dev,
2354 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2355 				wr_id_idx, srq->hwq.max_elements);
2356 			return -EINVAL;
2357 		}
2358 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2359 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2360 		cqe++;
2361 		(*budget)--;
2362 		*pcqe = cqe;
2363 	} else {
2364 		rq = &qp->rq;
2365 		if (wr_id_idx >= rq->hwq.max_elements) {
2366 			dev_err(&cq->hwq.pdev->dev,
2367 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2368 				wr_id_idx, rq->hwq.max_elements);
2369 			return -EINVAL;
2370 		}
2371 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2372 		cqe++;
2373 		(*budget)--;
2374 		rq->hwq.cons++;
2375 		*pcqe = cqe;
2376 
2377 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2378 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2379 			/* Add qp to flush list of the CQ */
2380 			bnxt_qplib_add_flush_qp(qp);
2381 		}
2382 	}
2383 
2384 done:
2385 	return rc;
2386 }
2387 
2388 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2389 					struct cq_res_ud *hwcqe,
2390 					struct bnxt_qplib_cqe **pcqe,
2391 					int *budget)
2392 {
2393 	struct bnxt_qplib_qp *qp;
2394 	struct bnxt_qplib_q *rq;
2395 	struct bnxt_qplib_srq *srq;
2396 	struct bnxt_qplib_cqe *cqe;
2397 	u32 wr_id_idx;
2398 	int rc = 0;
2399 
2400 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2401 				      le64_to_cpu(hwcqe->qp_handle));
2402 	if (!qp) {
2403 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2404 		return -EINVAL;
2405 	}
2406 	if (qp->rq.flushed) {
2407 		dev_dbg(&cq->hwq.pdev->dev,
2408 			"%s: QP in Flush QP = %p\n", __func__, qp);
2409 		goto done;
2410 	}
2411 	cqe = *pcqe;
2412 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2413 	cqe->length = le32_to_cpu(hwcqe->length);
2414 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2415 	cqe->flags = le16_to_cpu(hwcqe->flags);
2416 	cqe->status = hwcqe->status;
2417 	cqe->qp_handle = (u64)(unsigned long)qp;
2418 	memcpy(cqe->smac, hwcqe->src_mac, 6);
2419 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2420 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2421 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2422 				  ((le32_to_cpu(
2423 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2424 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2425 
2426 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2427 		srq = qp->srq;
2428 		if (!srq)
2429 			return -EINVAL;
2430 
2431 		if (wr_id_idx >= srq->hwq.max_elements) {
2432 			dev_err(&cq->hwq.pdev->dev,
2433 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2434 				wr_id_idx, srq->hwq.max_elements);
2435 			return -EINVAL;
2436 		}
2437 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2438 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2439 		cqe++;
2440 		(*budget)--;
2441 		*pcqe = cqe;
2442 	} else {
2443 		rq = &qp->rq;
2444 		if (wr_id_idx >= rq->hwq.max_elements) {
2445 			dev_err(&cq->hwq.pdev->dev,
2446 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2447 				wr_id_idx, rq->hwq.max_elements);
2448 			return -EINVAL;
2449 		}
2450 
2451 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2452 		cqe++;
2453 		(*budget)--;
2454 		rq->hwq.cons++;
2455 		*pcqe = cqe;
2456 
2457 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2458 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2459 			/* Add qp to flush list of the CQ */
2460 			bnxt_qplib_add_flush_qp(qp);
2461 		}
2462 	}
2463 done:
2464 	return rc;
2465 }
2466 
2467 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2468 {
2469 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2470 	u32 sw_cons, raw_cons;
2471 	bool rc = true;
2472 
2473 	raw_cons = cq->hwq.cons;
2474 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2475 	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2476 	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2477 
2478 	 /* Check for Valid bit. If the CQE is valid, return false */
2479 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2480 	return rc;
2481 }
2482 
2483 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2484 						struct cq_res_raweth_qp1 *hwcqe,
2485 						struct bnxt_qplib_cqe **pcqe,
2486 						int *budget)
2487 {
2488 	struct bnxt_qplib_qp *qp;
2489 	struct bnxt_qplib_q *rq;
2490 	struct bnxt_qplib_srq *srq;
2491 	struct bnxt_qplib_cqe *cqe;
2492 	u32 wr_id_idx;
2493 	int rc = 0;
2494 
2495 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2496 				      le64_to_cpu(hwcqe->qp_handle));
2497 	if (!qp) {
2498 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2499 		return -EINVAL;
2500 	}
2501 	if (qp->rq.flushed) {
2502 		dev_dbg(&cq->hwq.pdev->dev,
2503 			"%s: QP in Flush QP = %p\n", __func__, qp);
2504 		goto done;
2505 	}
2506 	cqe = *pcqe;
2507 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2508 	cqe->flags = le16_to_cpu(hwcqe->flags);
2509 	cqe->qp_handle = (u64)(unsigned long)qp;
2510 
2511 	wr_id_idx =
2512 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2513 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2514 	cqe->src_qp = qp->id;
2515 	if (qp->id == 1 && !cqe->length) {
2516 		/* Add workaround for the length misdetection */
2517 		cqe->length = 296;
2518 	} else {
2519 		cqe->length = le16_to_cpu(hwcqe->length);
2520 	}
2521 	cqe->pkey_index = qp->pkey_index;
2522 	memcpy(cqe->smac, qp->smac, 6);
2523 
2524 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2525 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2526 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2527 
2528 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2529 		srq = qp->srq;
2530 		if (!srq) {
2531 			dev_err(&cq->hwq.pdev->dev,
2532 				"FP: SRQ used but not defined??\n");
2533 			return -EINVAL;
2534 		}
2535 		if (wr_id_idx >= srq->hwq.max_elements) {
2536 			dev_err(&cq->hwq.pdev->dev,
2537 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2538 				wr_id_idx, srq->hwq.max_elements);
2539 			return -EINVAL;
2540 		}
2541 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2542 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2543 		cqe++;
2544 		(*budget)--;
2545 		*pcqe = cqe;
2546 	} else {
2547 		rq = &qp->rq;
2548 		if (wr_id_idx >= rq->hwq.max_elements) {
2549 			dev_err(&cq->hwq.pdev->dev,
2550 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2551 				wr_id_idx, rq->hwq.max_elements);
2552 			return -EINVAL;
2553 		}
2554 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2555 		cqe++;
2556 		(*budget)--;
2557 		rq->hwq.cons++;
2558 		*pcqe = cqe;
2559 
2560 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2561 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2562 			/* Add qp to flush list of the CQ */
2563 			bnxt_qplib_add_flush_qp(qp);
2564 		}
2565 	}
2566 
2567 done:
2568 	return rc;
2569 }
2570 
2571 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2572 					  struct cq_terminal *hwcqe,
2573 					  struct bnxt_qplib_cqe **pcqe,
2574 					  int *budget)
2575 {
2576 	struct bnxt_qplib_qp *qp;
2577 	struct bnxt_qplib_q *sq, *rq;
2578 	struct bnxt_qplib_cqe *cqe;
2579 	u32 sw_cons = 0, cqe_cons;
2580 	int rc = 0;
2581 
2582 	/* Check the Status */
2583 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2584 		dev_warn(&cq->hwq.pdev->dev,
2585 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2586 			 hwcqe->status);
2587 
2588 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2589 				      le64_to_cpu(hwcqe->qp_handle));
2590 	if (!qp) {
2591 		dev_err(&cq->hwq.pdev->dev,
2592 			"FP: CQ Process terminal qp is NULL\n");
2593 		return -EINVAL;
2594 	}
2595 
2596 	/* Must block new posting of SQ and RQ */
2597 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2598 
2599 	sq = &qp->sq;
2600 	rq = &qp->rq;
2601 
2602 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2603 	if (cqe_cons == 0xFFFF)
2604 		goto do_rq;
2605 
2606 	if (cqe_cons > sq->hwq.max_elements) {
2607 		dev_err(&cq->hwq.pdev->dev,
2608 			"FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2609 			cqe_cons, sq->hwq.max_elements);
2610 		goto do_rq;
2611 	}
2612 
2613 	if (qp->sq.flushed) {
2614 		dev_dbg(&cq->hwq.pdev->dev,
2615 			"%s: QP in Flush QP = %p\n", __func__, qp);
2616 		goto sq_done;
2617 	}
2618 
2619 	/* Terminal CQE can also include aggregated successful CQEs prior.
2620 	 * So we must complete all CQEs from the current sq's cons to the
2621 	 * cq_cons with status OK
2622 	 */
2623 	cqe = *pcqe;
2624 	while (*budget) {
2625 		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2626 		if (sw_cons == cqe_cons)
2627 			break;
2628 		if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2629 			memset(cqe, 0, sizeof(*cqe));
2630 			cqe->status = CQ_REQ_STATUS_OK;
2631 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2632 			cqe->qp_handle = (u64)(unsigned long)qp;
2633 			cqe->src_qp = qp->id;
2634 			cqe->wr_id = sq->swq[sw_cons].wr_id;
2635 			cqe->type = sq->swq[sw_cons].type;
2636 			cqe++;
2637 			(*budget)--;
2638 		}
2639 		sq->hwq.cons++;
2640 	}
2641 	*pcqe = cqe;
2642 	if (!(*budget) && sw_cons != cqe_cons) {
2643 		/* Out of budget */
2644 		rc = -EAGAIN;
2645 		goto sq_done;
2646 	}
2647 sq_done:
2648 	if (rc)
2649 		return rc;
2650 do_rq:
2651 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2652 	if (cqe_cons == 0xFFFF) {
2653 		goto done;
2654 	} else if (cqe_cons > rq->hwq.max_elements) {
2655 		dev_err(&cq->hwq.pdev->dev,
2656 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2657 			cqe_cons, rq->hwq.max_elements);
2658 		goto done;
2659 	}
2660 
2661 	if (qp->rq.flushed) {
2662 		dev_dbg(&cq->hwq.pdev->dev,
2663 			"%s: QP in Flush QP = %p\n", __func__, qp);
2664 		rc = 0;
2665 		goto done;
2666 	}
2667 
2668 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2669 	 * from the current rq->cons to the rq->prod regardless what the
2670 	 * rq->cons the terminal CQE indicates
2671 	 */
2672 
2673 	/* Add qp to flush list of the CQ */
2674 	bnxt_qplib_add_flush_qp(qp);
2675 done:
2676 	return rc;
2677 }
2678 
2679 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2680 					struct cq_cutoff *hwcqe)
2681 {
2682 	/* Check the Status */
2683 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2684 		dev_err(&cq->hwq.pdev->dev,
2685 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2686 			hwcqe->status);
2687 		return -EINVAL;
2688 	}
2689 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2690 	wake_up_interruptible(&cq->waitq);
2691 
2692 	return 0;
2693 }
2694 
2695 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2696 				  struct bnxt_qplib_cqe *cqe,
2697 				  int num_cqes)
2698 {
2699 	struct bnxt_qplib_qp *qp = NULL;
2700 	u32 budget = num_cqes;
2701 	unsigned long flags;
2702 
2703 	spin_lock_irqsave(&cq->flush_lock, flags);
2704 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2705 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2706 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2707 	}
2708 
2709 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2710 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2711 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2712 	}
2713 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2714 
2715 	return num_cqes - budget;
2716 }
2717 
2718 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2719 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2720 {
2721 	struct cq_base *hw_cqe, **hw_cqe_ptr;
2722 	u32 sw_cons, raw_cons;
2723 	int budget, rc = 0;
2724 
2725 	raw_cons = cq->hwq.cons;
2726 	budget = num_cqes;
2727 
2728 	while (budget) {
2729 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2730 		hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2731 		hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2732 
2733 		/* Check for Valid bit */
2734 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2735 			break;
2736 
2737 		/*
2738 		 * The valid test of the entry must be done first before
2739 		 * reading any further.
2740 		 */
2741 		dma_rmb();
2742 		/* From the device's respective CQE format to qplib_wc*/
2743 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2744 		case CQ_BASE_CQE_TYPE_REQ:
2745 			rc = bnxt_qplib_cq_process_req(cq,
2746 						       (struct cq_req *)hw_cqe,
2747 						       &cqe, &budget,
2748 						       sw_cons, lib_qp);
2749 			break;
2750 		case CQ_BASE_CQE_TYPE_RES_RC:
2751 			rc = bnxt_qplib_cq_process_res_rc(cq,
2752 							  (struct cq_res_rc *)
2753 							  hw_cqe, &cqe,
2754 							  &budget);
2755 			break;
2756 		case CQ_BASE_CQE_TYPE_RES_UD:
2757 			rc = bnxt_qplib_cq_process_res_ud
2758 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2759 					 &budget);
2760 			break;
2761 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2762 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2763 					(cq, (struct cq_res_raweth_qp1 *)
2764 					 hw_cqe, &cqe, &budget);
2765 			break;
2766 		case CQ_BASE_CQE_TYPE_TERMINAL:
2767 			rc = bnxt_qplib_cq_process_terminal
2768 					(cq, (struct cq_terminal *)hw_cqe,
2769 					 &cqe, &budget);
2770 			break;
2771 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2772 			bnxt_qplib_cq_process_cutoff
2773 					(cq, (struct cq_cutoff *)hw_cqe);
2774 			/* Done processing this CQ */
2775 			goto exit;
2776 		default:
2777 			dev_err(&cq->hwq.pdev->dev,
2778 				"process_cq unknown type 0x%lx\n",
2779 				hw_cqe->cqe_type_toggle &
2780 				CQ_BASE_CQE_TYPE_MASK);
2781 			rc = -EINVAL;
2782 			break;
2783 		}
2784 		if (rc < 0) {
2785 			if (rc == -EAGAIN)
2786 				break;
2787 			/* Error while processing the CQE, just skip to the
2788 			 * next one
2789 			 */
2790 			dev_err(&cq->hwq.pdev->dev,
2791 				"process_cqe error rc = 0x%x\n", rc);
2792 		}
2793 		raw_cons++;
2794 	}
2795 	if (cq->hwq.cons != raw_cons) {
2796 		cq->hwq.cons = raw_cons;
2797 		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2798 	}
2799 exit:
2800 	return num_cqes - budget;
2801 }
2802 
2803 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2804 {
2805 	if (arm_type)
2806 		bnxt_qplib_arm_cq(cq, arm_type);
2807 	/* Using cq->arm_state variable to track whether to issue cq handler */
2808 	atomic_set(&cq->arm_state, 1);
2809 }
2810 
2811 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2812 {
2813 	flush_workqueue(qp->scq->nq->cqn_wq);
2814 	if (qp->scq != qp->rcq)
2815 		flush_workqueue(qp->rcq->nq->cqn_wq);
2816 }
2817