1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 
50 #include "roce_hsi.h"
51 
52 #include "qplib_res.h"
53 #include "qplib_rcfw.h"
54 #include "qplib_sp.h"
55 #include "qplib_fp.h"
56 
57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
58 
59 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
60 {
61 	qp->sq.condition = false;
62 	qp->sq.send_phantom = false;
63 	qp->sq.single = false;
64 }
65 
66 /* Flush list */
67 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
68 {
69 	struct bnxt_qplib_cq *scq, *rcq;
70 
71 	scq = qp->scq;
72 	rcq = qp->rcq;
73 
74 	if (!qp->sq.flushed) {
75 		dev_dbg(&scq->hwq.pdev->dev,
76 			"FP: Adding to SQ Flush list = %p\n", qp);
77 		bnxt_qplib_cancel_phantom_processing(qp);
78 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
79 		qp->sq.flushed = true;
80 	}
81 	if (!qp->srq) {
82 		if (!qp->rq.flushed) {
83 			dev_dbg(&rcq->hwq.pdev->dev,
84 				"FP: Adding to RQ Flush list = %p\n", qp);
85 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 			qp->rq.flushed = true;
87 		}
88 	}
89 }
90 
91 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 				       unsigned long *flags)
93 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94 {
95 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 	if (qp->scq == qp->rcq)
97 		__acquire(&qp->rcq->flush_lock);
98 	else
99 		spin_lock(&qp->rcq->flush_lock);
100 }
101 
102 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 				       unsigned long *flags)
104 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105 {
106 	if (qp->scq == qp->rcq)
107 		__release(&qp->rcq->flush_lock);
108 	else
109 		spin_unlock(&qp->rcq->flush_lock);
110 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111 }
112 
113 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
114 {
115 	unsigned long flags;
116 
117 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118 	__bnxt_qplib_add_flush_qp(qp);
119 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
120 }
121 
122 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
123 {
124 	if (qp->sq.flushed) {
125 		qp->sq.flushed = false;
126 		list_del(&qp->sq_flush);
127 	}
128 	if (!qp->srq) {
129 		if (qp->rq.flushed) {
130 			qp->rq.flushed = false;
131 			list_del(&qp->rq_flush);
132 		}
133 	}
134 }
135 
136 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
137 {
138 	unsigned long flags;
139 
140 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
142 	qp->sq.hwq.prod = 0;
143 	qp->sq.hwq.cons = 0;
144 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
145 	qp->rq.hwq.prod = 0;
146 	qp->rq.hwq.cons = 0;
147 
148 	__bnxt_qplib_del_flush_qp(qp);
149 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
150 }
151 
152 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
153 {
154 	struct bnxt_qplib_nq_work *nq_work =
155 			container_of(work, struct bnxt_qplib_nq_work, work);
156 
157 	struct bnxt_qplib_cq *cq = nq_work->cq;
158 	struct bnxt_qplib_nq *nq = nq_work->nq;
159 
160 	if (cq && nq) {
161 		spin_lock_bh(&cq->compl_lock);
162 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163 			dev_dbg(&nq->pdev->dev,
164 				"%s:Trigger cq  = %p event nq = %p\n",
165 				__func__, cq, nq);
166 			nq->cqn_handler(nq, cq);
167 		}
168 		spin_unlock_bh(&cq->compl_lock);
169 	}
170 	kfree(nq_work);
171 }
172 
173 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174 				       struct bnxt_qplib_qp *qp)
175 {
176 	struct bnxt_qplib_q *rq = &qp->rq;
177 	struct bnxt_qplib_q *sq = &qp->sq;
178 
179 	if (qp->rq_hdr_buf)
180 		dma_free_coherent(&res->pdev->dev,
181 				  rq->max_wqe * qp->rq_hdr_buf_size,
182 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183 	if (qp->sq_hdr_buf)
184 		dma_free_coherent(&res->pdev->dev,
185 				  sq->max_wqe * qp->sq_hdr_buf_size,
186 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187 	qp->rq_hdr_buf = NULL;
188 	qp->sq_hdr_buf = NULL;
189 	qp->rq_hdr_buf_map = 0;
190 	qp->sq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_size = 0;
192 	qp->rq_hdr_buf_size = 0;
193 }
194 
195 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 				       struct bnxt_qplib_qp *qp)
197 {
198 	struct bnxt_qplib_q *rq = &qp->rq;
199 	struct bnxt_qplib_q *sq = &qp->sq;
200 	int rc = 0;
201 
202 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
203 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204 					sq->max_wqe * qp->sq_hdr_buf_size,
205 					&qp->sq_hdr_buf_map, GFP_KERNEL);
206 		if (!qp->sq_hdr_buf) {
207 			rc = -ENOMEM;
208 			dev_err(&res->pdev->dev,
209 				"Failed to create sq_hdr_buf\n");
210 			goto fail;
211 		}
212 	}
213 
214 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
215 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
216 						    rq->max_wqe *
217 						    qp->rq_hdr_buf_size,
218 						    &qp->rq_hdr_buf_map,
219 						    GFP_KERNEL);
220 		if (!qp->rq_hdr_buf) {
221 			rc = -ENOMEM;
222 			dev_err(&res->pdev->dev,
223 				"Failed to create rq_hdr_buf\n");
224 			goto fail;
225 		}
226 	}
227 	return 0;
228 
229 fail:
230 	bnxt_qplib_free_qp_hdr_buf(res, qp);
231 	return rc;
232 }
233 
234 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
235 {
236 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
237 	struct nq_base *nqe, **nq_ptr;
238 	int budget = nq->budget;
239 	u32 sw_cons, raw_cons;
240 	uintptr_t q_handle;
241 	u16 type;
242 
243 	spin_lock_bh(&hwq->lock);
244 	/* Service the NQ until empty */
245 	raw_cons = hwq->cons;
246 	while (budget--) {
247 		sw_cons = HWQ_CMP(raw_cons, hwq);
248 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
249 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
250 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
251 			break;
252 
253 		/*
254 		 * The valid test of the entry must be done first before
255 		 * reading any further.
256 		 */
257 		dma_rmb();
258 
259 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
260 		switch (type) {
261 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
262 		{
263 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
264 
265 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
266 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
267 						     << 32;
268 			if ((unsigned long)cq == q_handle) {
269 				nqcne->cq_handle_low = 0;
270 				nqcne->cq_handle_high = 0;
271 				cq->cnq_events++;
272 			}
273 			break;
274 		}
275 		default:
276 			break;
277 		}
278 		raw_cons++;
279 	}
280 	spin_unlock_bh(&hwq->lock);
281 }
282 
283 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
284  * this CQ.
285  */
286 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
287 {
288 	u32 retry_cnt = 100;
289 
290 	while (retry_cnt--) {
291 		if (cnq_events == cq->cnq_events)
292 			return;
293 		usleep_range(50, 100);
294 		clean_nq(cq->nq, cq);
295 	}
296 }
297 
298 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
299 {
300 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
301 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
302 	int num_srqne_processed = 0;
303 	int num_cqne_processed = 0;
304 	struct bnxt_qplib_cq *cq;
305 	int budget = nq->budget;
306 	u32 sw_cons, raw_cons;
307 	struct nq_base *nqe;
308 	uintptr_t q_handle;
309 	u16 type;
310 
311 	spin_lock_bh(&hwq->lock);
312 	/* Service the NQ until empty */
313 	raw_cons = hwq->cons;
314 	while (budget--) {
315 		sw_cons = HWQ_CMP(raw_cons, hwq);
316 		nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
317 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
318 			break;
319 
320 		/*
321 		 * The valid test of the entry must be done first before
322 		 * reading any further.
323 		 */
324 		dma_rmb();
325 
326 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
327 		switch (type) {
328 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
329 		{
330 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
331 
332 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 						     << 32;
335 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336 			if (!cq)
337 				break;
338 			bnxt_qplib_armen_db(&cq->dbinfo,
339 					    DBC_DBC_TYPE_CQ_ARMENA);
340 			spin_lock_bh(&cq->compl_lock);
341 			atomic_set(&cq->arm_state, 0);
342 			if (!nq->cqn_handler(nq, (cq)))
343 				num_cqne_processed++;
344 			else
345 				dev_warn(&nq->pdev->dev,
346 					 "cqn - type 0x%x not handled\n", type);
347 			cq->cnq_events++;
348 			spin_unlock_bh(&cq->compl_lock);
349 			break;
350 		}
351 		case NQ_BASE_TYPE_SRQ_EVENT:
352 		{
353 			struct bnxt_qplib_srq *srq;
354 			struct nq_srq_event *nqsrqe =
355 						(struct nq_srq_event *)nqe;
356 
357 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
358 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
359 				     << 32;
360 			srq = (struct bnxt_qplib_srq *)q_handle;
361 			bnxt_qplib_armen_db(&srq->dbinfo,
362 					    DBC_DBC_TYPE_SRQ_ARMENA);
363 			if (!nq->srqn_handler(nq,
364 					      (struct bnxt_qplib_srq *)q_handle,
365 					      nqsrqe->event))
366 				num_srqne_processed++;
367 			else
368 				dev_warn(&nq->pdev->dev,
369 					 "SRQ event 0x%x not handled\n",
370 					 nqsrqe->event);
371 			break;
372 		}
373 		case NQ_BASE_TYPE_DBQ_EVENT:
374 			break;
375 		default:
376 			dev_warn(&nq->pdev->dev,
377 				 "nqe with type = 0x%x not handled\n", type);
378 			break;
379 		}
380 		raw_cons++;
381 	}
382 	if (hwq->cons != raw_cons) {
383 		hwq->cons = raw_cons;
384 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
385 	}
386 	spin_unlock_bh(&hwq->lock);
387 }
388 
389 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
390 {
391 	struct bnxt_qplib_nq *nq = dev_instance;
392 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
393 	u32 sw_cons;
394 
395 	/* Prefetch the NQ element */
396 	sw_cons = HWQ_CMP(hwq->cons, hwq);
397 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
398 
399 	/* Fan out to CPU affinitized kthreads? */
400 	tasklet_schedule(&nq->nq_tasklet);
401 
402 	return IRQ_HANDLED;
403 }
404 
405 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
406 {
407 	tasklet_disable(&nq->nq_tasklet);
408 	/* Mask h/w interrupt */
409 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
410 	/* Sync with last running IRQ handler */
411 	synchronize_irq(nq->msix_vec);
412 	if (kill)
413 		tasklet_kill(&nq->nq_tasklet);
414 	if (nq->requested) {
415 		irq_set_affinity_hint(nq->msix_vec, NULL);
416 		free_irq(nq->msix_vec, nq);
417 		nq->requested = false;
418 	}
419 }
420 
421 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
422 {
423 	if (nq->cqn_wq) {
424 		destroy_workqueue(nq->cqn_wq);
425 		nq->cqn_wq = NULL;
426 	}
427 
428 	/* Make sure the HW is stopped! */
429 	bnxt_qplib_nq_stop_irq(nq, true);
430 
431 	if (nq->nq_db.reg.bar_reg) {
432 		iounmap(nq->nq_db.reg.bar_reg);
433 		nq->nq_db.reg.bar_reg = NULL;
434 	}
435 
436 	nq->cqn_handler = NULL;
437 	nq->srqn_handler = NULL;
438 	nq->msix_vec = 0;
439 }
440 
441 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
442 			    int msix_vector, bool need_init)
443 {
444 	int rc;
445 
446 	if (nq->requested)
447 		return -EFAULT;
448 
449 	nq->msix_vec = msix_vector;
450 	if (need_init)
451 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
452 	else
453 		tasklet_enable(&nq->nq_tasklet);
454 
455 	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
456 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
457 	if (rc)
458 		return rc;
459 
460 	cpumask_clear(&nq->mask);
461 	cpumask_set_cpu(nq_indx, &nq->mask);
462 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
463 	if (rc) {
464 		dev_warn(&nq->pdev->dev,
465 			 "set affinity failed; vector: %d nq_idx: %d\n",
466 			 nq->msix_vec, nq_indx);
467 	}
468 	nq->requested = true;
469 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
470 
471 	return rc;
472 }
473 
474 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
475 {
476 	resource_size_t reg_base;
477 	struct bnxt_qplib_nq_db *nq_db;
478 	struct pci_dev *pdev;
479 	int rc = 0;
480 
481 	pdev = nq->pdev;
482 	nq_db = &nq->nq_db;
483 
484 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
485 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
486 	if (!nq_db->reg.bar_base) {
487 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
488 			nq_db->reg.bar_id);
489 		rc = -ENOMEM;
490 		goto fail;
491 	}
492 
493 	reg_base = nq_db->reg.bar_base + reg_offt;
494 	/* Unconditionally map 8 bytes to support 57500 series */
495 	nq_db->reg.len = 8;
496 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
497 	if (!nq_db->reg.bar_reg) {
498 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
499 			nq_db->reg.bar_id);
500 		rc = -ENOMEM;
501 		goto fail;
502 	}
503 
504 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
505 	nq_db->dbinfo.hwq = &nq->hwq;
506 	nq_db->dbinfo.xid = nq->ring_id;
507 fail:
508 	return rc;
509 }
510 
511 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
512 			 int nq_idx, int msix_vector, int bar_reg_offset,
513 			 cqn_handler_t cqn_handler,
514 			 srqn_handler_t srqn_handler)
515 {
516 	int rc = -1;
517 
518 	nq->pdev = pdev;
519 	nq->cqn_handler = cqn_handler;
520 	nq->srqn_handler = srqn_handler;
521 
522 	/* Have a task to schedule CQ notifiers in post send case */
523 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
524 	if (!nq->cqn_wq)
525 		return -ENOMEM;
526 
527 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
528 	if (rc)
529 		goto fail;
530 
531 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
532 	if (rc) {
533 		dev_err(&nq->pdev->dev,
534 			"Failed to request irq for nq-idx %d\n", nq_idx);
535 		goto fail;
536 	}
537 
538 	return 0;
539 fail:
540 	bnxt_qplib_disable_nq(nq);
541 	return rc;
542 }
543 
544 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
545 {
546 	if (nq->hwq.max_elements) {
547 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
548 		nq->hwq.max_elements = 0;
549 	}
550 }
551 
552 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
553 {
554 	struct bnxt_qplib_hwq_attr hwq_attr = {};
555 	struct bnxt_qplib_sg_info sginfo = {};
556 
557 	nq->pdev = res->pdev;
558 	nq->res = res;
559 	if (!nq->hwq.max_elements ||
560 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
561 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
562 
563 	sginfo.pgsize = PAGE_SIZE;
564 	sginfo.pgshft = PAGE_SHIFT;
565 	hwq_attr.res = res;
566 	hwq_attr.sginfo = &sginfo;
567 	hwq_attr.depth = nq->hwq.max_elements;
568 	hwq_attr.stride = sizeof(struct nq_base);
569 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
570 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
571 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
572 		return -ENOMEM;
573 	}
574 	nq->budget = 8;
575 	return 0;
576 }
577 
578 /* SRQ */
579 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
580 			   struct bnxt_qplib_srq *srq)
581 {
582 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
583 	struct cmdq_destroy_srq req;
584 	struct creq_destroy_srq_resp resp;
585 	u16 cmd_flags = 0;
586 	int rc;
587 
588 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
589 
590 	/* Configure the request */
591 	req.srq_cid = cpu_to_le32(srq->id);
592 
593 	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
594 					  (struct creq_base *)&resp, NULL, 0);
595 	kfree(srq->swq);
596 	if (rc)
597 		return;
598 	bnxt_qplib_free_hwq(res, &srq->hwq);
599 }
600 
601 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
602 			  struct bnxt_qplib_srq *srq)
603 {
604 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 	struct bnxt_qplib_hwq_attr hwq_attr = {};
606 	struct creq_create_srq_resp resp;
607 	struct cmdq_create_srq req;
608 	struct bnxt_qplib_pbl *pbl;
609 	u16 cmd_flags = 0;
610 	u16 pg_sz_lvl;
611 	int rc, idx;
612 
613 	hwq_attr.res = res;
614 	hwq_attr.sginfo = &srq->sg_info;
615 	hwq_attr.depth = srq->max_wqe;
616 	hwq_attr.stride = srq->wqe_size;
617 	hwq_attr.type = HWQ_TYPE_QUEUE;
618 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
619 	if (rc)
620 		goto exit;
621 
622 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
623 			   GFP_KERNEL);
624 	if (!srq->swq) {
625 		rc = -ENOMEM;
626 		goto fail;
627 	}
628 
629 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
630 
631 	/* Configure the request */
632 	req.dpi = cpu_to_le32(srq->dpi->dpi);
633 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
634 
635 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
636 	pbl = &srq->hwq.pbl[PBL_LVL_0];
637 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
638 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
639 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
640 		      CMDQ_CREATE_SRQ_LVL_SFT;
641 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
642 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
643 	req.pd_id = cpu_to_le32(srq->pd->id);
644 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
645 
646 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
647 					  (void *)&resp, NULL, 0);
648 	if (rc)
649 		goto fail;
650 
651 	spin_lock_init(&srq->lock);
652 	srq->start_idx = 0;
653 	srq->last_idx = srq->hwq.max_elements - 1;
654 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
655 		srq->swq[idx].next_idx = idx + 1;
656 	srq->swq[srq->last_idx].next_idx = -1;
657 
658 	srq->id = le32_to_cpu(resp.xid);
659 	srq->dbinfo.hwq = &srq->hwq;
660 	srq->dbinfo.xid = srq->id;
661 	srq->dbinfo.db = srq->dpi->dbr;
662 	srq->dbinfo.max_slot = 1;
663 	srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
664 	if (srq->threshold)
665 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
666 	srq->arm_req = false;
667 
668 	return 0;
669 fail:
670 	bnxt_qplib_free_hwq(res, &srq->hwq);
671 	kfree(srq->swq);
672 exit:
673 	return rc;
674 }
675 
676 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
677 			  struct bnxt_qplib_srq *srq)
678 {
679 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
680 	u32 sw_prod, sw_cons, count = 0;
681 
682 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
683 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
684 
685 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
686 				    srq_hwq->max_elements - sw_cons + sw_prod;
687 	if (count > srq->threshold) {
688 		srq->arm_req = false;
689 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
690 	} else {
691 		/* Deferred arming */
692 		srq->arm_req = true;
693 	}
694 
695 	return 0;
696 }
697 
698 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
699 			 struct bnxt_qplib_srq *srq)
700 {
701 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
702 	struct cmdq_query_srq req;
703 	struct creq_query_srq_resp resp;
704 	struct bnxt_qplib_rcfw_sbuf *sbuf;
705 	struct creq_query_srq_resp_sb *sb;
706 	u16 cmd_flags = 0;
707 	int rc = 0;
708 
709 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
710 
711 	/* Configure the request */
712 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
713 	if (!sbuf)
714 		return -ENOMEM;
715 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
716 	req.srq_cid = cpu_to_le32(srq->id);
717 	sb = sbuf->sb;
718 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
719 					  (void *)sbuf, 0);
720 	srq->threshold = le16_to_cpu(sb->srq_limit);
721 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
722 
723 	return rc;
724 }
725 
726 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
727 			     struct bnxt_qplib_swqe *wqe)
728 {
729 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
730 	struct rq_wqe *srqe;
731 	struct sq_sge *hw_sge;
732 	u32 sw_prod, sw_cons, count = 0;
733 	int i, rc = 0, next;
734 
735 	spin_lock(&srq_hwq->lock);
736 	if (srq->start_idx == srq->last_idx) {
737 		dev_err(&srq_hwq->pdev->dev,
738 			"FP: SRQ (0x%x) is full!\n", srq->id);
739 		rc = -EINVAL;
740 		spin_unlock(&srq_hwq->lock);
741 		goto done;
742 	}
743 	next = srq->start_idx;
744 	srq->start_idx = srq->swq[next].next_idx;
745 	spin_unlock(&srq_hwq->lock);
746 
747 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
748 	srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
749 	memset(srqe, 0, srq->wqe_size);
750 	/* Calculate wqe_size16 and data_len */
751 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
752 	     i < wqe->num_sge; i++, hw_sge++) {
753 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
754 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
755 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
756 	}
757 	srqe->wqe_type = wqe->type;
758 	srqe->flags = wqe->flags;
759 	srqe->wqe_size = wqe->num_sge +
760 			((offsetof(typeof(*srqe), data) + 15) >> 4);
761 	srqe->wr_id[0] = cpu_to_le32((u32)next);
762 	srq->swq[next].wr_id = wqe->wr_id;
763 
764 	srq_hwq->prod++;
765 
766 	spin_lock(&srq_hwq->lock);
767 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
768 	/* retaining srq_hwq->cons for this logic
769 	 * actually the lock is only required to
770 	 * read srq_hwq->cons.
771 	 */
772 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
773 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
774 				    srq_hwq->max_elements - sw_cons + sw_prod;
775 	spin_unlock(&srq_hwq->lock);
776 	/* Ring DB */
777 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
778 	if (srq->arm_req == true && count > srq->threshold) {
779 		srq->arm_req = false;
780 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
781 	}
782 done:
783 	return rc;
784 }
785 
786 /* QP */
787 
788 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
789 {
790 	int rc = 0;
791 	int indx;
792 
793 	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
794 	if (!que->swq) {
795 		rc = -ENOMEM;
796 		goto out;
797 	}
798 
799 	que->swq_start = 0;
800 	que->swq_last = que->max_wqe - 1;
801 	for (indx = 0; indx < que->max_wqe; indx++)
802 		que->swq[indx].next_idx = indx + 1;
803 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
804 	que->swq_last = 0;
805 out:
806 	return rc;
807 }
808 
809 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
810 {
811 	struct bnxt_qplib_hwq_attr hwq_attr = {};
812 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
813 	struct bnxt_qplib_q *sq = &qp->sq;
814 	struct bnxt_qplib_q *rq = &qp->rq;
815 	struct creq_create_qp1_resp resp;
816 	struct cmdq_create_qp1 req;
817 	struct bnxt_qplib_pbl *pbl;
818 	u16 cmd_flags = 0;
819 	u32 qp_flags = 0;
820 	u8 pg_sz_lvl;
821 	u32 tbl_indx;
822 	int rc;
823 
824 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
825 
826 	/* General */
827 	req.type = qp->type;
828 	req.dpi = cpu_to_le32(qp->dpi->dpi);
829 	req.qp_handle = cpu_to_le64(qp->qp_handle);
830 
831 	/* SQ */
832 	hwq_attr.res = res;
833 	hwq_attr.sginfo = &sq->sg_info;
834 	hwq_attr.stride = sizeof(struct sq_sge);
835 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
836 	hwq_attr.type = HWQ_TYPE_QUEUE;
837 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
838 	if (rc)
839 		goto exit;
840 
841 	rc = bnxt_qplib_alloc_init_swq(sq);
842 	if (rc)
843 		goto fail_sq;
844 
845 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
846 	pbl = &sq->hwq.pbl[PBL_LVL_0];
847 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
848 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
849 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
850 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
851 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
852 	req.sq_fwo_sq_sge =
853 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
854 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
855 	req.scq_cid = cpu_to_le32(qp->scq->id);
856 
857 	/* RQ */
858 	if (rq->max_wqe) {
859 		hwq_attr.res = res;
860 		hwq_attr.sginfo = &rq->sg_info;
861 		hwq_attr.stride = sizeof(struct sq_sge);
862 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
863 		hwq_attr.type = HWQ_TYPE_QUEUE;
864 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
865 		if (rc)
866 			goto sq_swq;
867 		rc = bnxt_qplib_alloc_init_swq(rq);
868 		if (rc)
869 			goto fail_rq;
870 		req.rq_size = cpu_to_le32(rq->max_wqe);
871 		pbl = &rq->hwq.pbl[PBL_LVL_0];
872 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
873 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
874 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
875 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
876 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
877 		req.rq_fwo_rq_sge =
878 			cpu_to_le16((rq->max_sge &
879 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
880 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
881 	}
882 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
883 	/* Header buffer - allow hdr_buf pass in */
884 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
885 	if (rc) {
886 		rc = -ENOMEM;
887 		goto rq_rwq;
888 	}
889 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
890 	req.qp_flags = cpu_to_le32(qp_flags);
891 	req.pd_id = cpu_to_le32(qp->pd->id);
892 
893 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
894 					  (void *)&resp, NULL, 0);
895 	if (rc)
896 		goto fail;
897 
898 	qp->id = le32_to_cpu(resp.xid);
899 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
900 	qp->cctx = res->cctx;
901 	sq->dbinfo.hwq = &sq->hwq;
902 	sq->dbinfo.xid = qp->id;
903 	sq->dbinfo.db = qp->dpi->dbr;
904 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
905 	if (rq->max_wqe) {
906 		rq->dbinfo.hwq = &rq->hwq;
907 		rq->dbinfo.xid = qp->id;
908 		rq->dbinfo.db = qp->dpi->dbr;
909 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
910 	}
911 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
912 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
913 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
914 
915 	return 0;
916 
917 fail:
918 	bnxt_qplib_free_qp_hdr_buf(res, qp);
919 rq_rwq:
920 	kfree(rq->swq);
921 fail_rq:
922 	bnxt_qplib_free_hwq(res, &rq->hwq);
923 sq_swq:
924 	kfree(sq->swq);
925 fail_sq:
926 	bnxt_qplib_free_hwq(res, &sq->hwq);
927 exit:
928 	return rc;
929 }
930 
931 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
932 {
933 	struct bnxt_qplib_hwq *hwq;
934 	struct bnxt_qplib_q *sq;
935 	u64 fpsne, psn_pg;
936 	u16 indx_pad = 0;
937 
938 	sq = &qp->sq;
939 	hwq = &sq->hwq;
940 	/* First psn entry */
941 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
942 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
943 		indx_pad = (fpsne & ~PAGE_MASK) / size;
944 	hwq->pad_pgofft = indx_pad;
945 	hwq->pad_pg = (u64 *)psn_pg;
946 	hwq->pad_stride = size;
947 }
948 
949 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
950 {
951 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
952 	struct bnxt_qplib_hwq_attr hwq_attr = {};
953 	struct bnxt_qplib_sg_info sginfo = {};
954 	struct bnxt_qplib_q *sq = &qp->sq;
955 	struct bnxt_qplib_q *rq = &qp->rq;
956 	struct creq_create_qp_resp resp;
957 	int rc, req_size, psn_sz = 0;
958 	struct bnxt_qplib_hwq *xrrq;
959 	struct bnxt_qplib_pbl *pbl;
960 	struct cmdq_create_qp req;
961 	u16 cmd_flags = 0;
962 	u32 qp_flags = 0;
963 	u8 pg_sz_lvl;
964 	u32 tbl_indx;
965 	u16 nsge;
966 
967 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
968 
969 	/* General */
970 	req.type = qp->type;
971 	req.dpi = cpu_to_le32(qp->dpi->dpi);
972 	req.qp_handle = cpu_to_le64(qp->qp_handle);
973 
974 	/* SQ */
975 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
976 		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
977 			 sizeof(struct sq_psn_search_ext) :
978 			 sizeof(struct sq_psn_search);
979 	}
980 
981 	hwq_attr.res = res;
982 	hwq_attr.sginfo = &sq->sg_info;
983 	hwq_attr.stride = sizeof(struct sq_sge);
984 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
985 	hwq_attr.aux_stride = psn_sz;
986 	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
987 	hwq_attr.type = HWQ_TYPE_QUEUE;
988 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
989 	if (rc)
990 		goto exit;
991 
992 	rc = bnxt_qplib_alloc_init_swq(sq);
993 	if (rc)
994 		goto fail_sq;
995 
996 	if (psn_sz)
997 		bnxt_qplib_init_psn_ptr(qp, psn_sz);
998 
999 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1000 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1001 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1002 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1003 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1004 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1005 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1006 	req.sq_fwo_sq_sge =
1007 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1008 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1009 	req.scq_cid = cpu_to_le32(qp->scq->id);
1010 
1011 	/* RQ */
1012 	if (!qp->srq) {
1013 		hwq_attr.res = res;
1014 		hwq_attr.sginfo = &rq->sg_info;
1015 		hwq_attr.stride = sizeof(struct sq_sge);
1016 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1017 		hwq_attr.aux_stride = 0;
1018 		hwq_attr.aux_depth = 0;
1019 		hwq_attr.type = HWQ_TYPE_QUEUE;
1020 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1021 		if (rc)
1022 			goto sq_swq;
1023 		rc = bnxt_qplib_alloc_init_swq(rq);
1024 		if (rc)
1025 			goto fail_rq;
1026 
1027 		req.rq_size = cpu_to_le32(rq->max_wqe);
1028 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1029 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1030 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1031 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1032 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1033 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1034 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1035 			6 : rq->max_sge;
1036 		req.rq_fwo_rq_sge =
1037 			cpu_to_le16(((nsge &
1038 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1039 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1040 	} else {
1041 		/* SRQ */
1042 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1043 		req.srq_cid = cpu_to_le32(qp->srq->id);
1044 	}
1045 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1046 
1047 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1048 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1049 	if (qp->sig_type)
1050 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1051 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1052 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1053 	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1054 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1055 
1056 	req.qp_flags = cpu_to_le32(qp_flags);
1057 
1058 	/* ORRQ and IRRQ */
1059 	if (psn_sz) {
1060 		xrrq = &qp->orrq;
1061 		xrrq->max_elements =
1062 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1063 		req_size = xrrq->max_elements *
1064 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1065 		req_size &= ~(PAGE_SIZE - 1);
1066 		sginfo.pgsize = req_size;
1067 		sginfo.pgshft = PAGE_SHIFT;
1068 
1069 		hwq_attr.res = res;
1070 		hwq_attr.sginfo = &sginfo;
1071 		hwq_attr.depth = xrrq->max_elements;
1072 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1073 		hwq_attr.aux_stride = 0;
1074 		hwq_attr.aux_depth = 0;
1075 		hwq_attr.type = HWQ_TYPE_CTX;
1076 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1077 		if (rc)
1078 			goto rq_swq;
1079 		pbl = &xrrq->pbl[PBL_LVL_0];
1080 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1081 
1082 		xrrq = &qp->irrq;
1083 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1084 						qp->max_dest_rd_atomic);
1085 		req_size = xrrq->max_elements *
1086 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1087 		req_size &= ~(PAGE_SIZE - 1);
1088 		sginfo.pgsize = req_size;
1089 		hwq_attr.depth =  xrrq->max_elements;
1090 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1091 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1092 		if (rc)
1093 			goto fail_orrq;
1094 
1095 		pbl = &xrrq->pbl[PBL_LVL_0];
1096 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1097 	}
1098 	req.pd_id = cpu_to_le32(qp->pd->id);
1099 
1100 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1101 					  (void *)&resp, NULL, 0);
1102 	if (rc)
1103 		goto fail;
1104 
1105 	qp->id = le32_to_cpu(resp.xid);
1106 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1107 	INIT_LIST_HEAD(&qp->sq_flush);
1108 	INIT_LIST_HEAD(&qp->rq_flush);
1109 	qp->cctx = res->cctx;
1110 	sq->dbinfo.hwq = &sq->hwq;
1111 	sq->dbinfo.xid = qp->id;
1112 	sq->dbinfo.db = qp->dpi->dbr;
1113 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1114 	if (rq->max_wqe) {
1115 		rq->dbinfo.hwq = &rq->hwq;
1116 		rq->dbinfo.xid = qp->id;
1117 		rq->dbinfo.db = qp->dpi->dbr;
1118 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1119 	}
1120 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1121 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1122 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1123 
1124 	return 0;
1125 fail:
1126 	bnxt_qplib_free_hwq(res, &qp->irrq);
1127 fail_orrq:
1128 	bnxt_qplib_free_hwq(res, &qp->orrq);
1129 rq_swq:
1130 	kfree(rq->swq);
1131 fail_rq:
1132 	bnxt_qplib_free_hwq(res, &rq->hwq);
1133 sq_swq:
1134 	kfree(sq->swq);
1135 fail_sq:
1136 	bnxt_qplib_free_hwq(res, &sq->hwq);
1137 exit:
1138 	return rc;
1139 }
1140 
1141 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1142 {
1143 	switch (qp->state) {
1144 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1145 		/* INIT->RTR, configure the path_mtu to the default
1146 		 * 2048 if not being requested
1147 		 */
1148 		if (!(qp->modify_flags &
1149 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1150 			qp->modify_flags |=
1151 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1152 			qp->path_mtu =
1153 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1154 		}
1155 		qp->modify_flags &=
1156 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1157 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1158 		if (qp->max_dest_rd_atomic < 1)
1159 			qp->max_dest_rd_atomic = 1;
1160 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1161 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1162 		if (!(qp->modify_flags &
1163 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1164 			qp->modify_flags |=
1165 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1166 			qp->ah.sgid_index = 0;
1167 		}
1168 		break;
1169 	default:
1170 		break;
1171 	}
1172 }
1173 
1174 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1175 {
1176 	switch (qp->state) {
1177 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1178 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1179 		if (qp->max_rd_atomic < 1)
1180 			qp->max_rd_atomic = 1;
1181 		/* Bono FW does not allow PKEY_INDEX,
1182 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1183 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1184 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1185 		 * modification
1186 		 */
1187 		qp->modify_flags &=
1188 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1189 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1190 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1191 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1192 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1193 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1194 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1195 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1196 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1197 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1198 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1199 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1200 		break;
1201 	default:
1202 		break;
1203 	}
1204 }
1205 
1206 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1207 {
1208 	switch (qp->cur_qp_state) {
1209 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1210 		break;
1211 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1212 		__modify_flags_from_init_state(qp);
1213 		break;
1214 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1215 		__modify_flags_from_rtr_state(qp);
1216 		break;
1217 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1218 		break;
1219 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1220 		break;
1221 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1222 		break;
1223 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1224 		break;
1225 	default:
1226 		break;
1227 	}
1228 }
1229 
1230 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1231 {
1232 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1233 	struct cmdq_modify_qp req;
1234 	struct creq_modify_qp_resp resp;
1235 	u16 cmd_flags = 0, pkey;
1236 	u32 temp32[4];
1237 	u32 bmask;
1238 	int rc;
1239 
1240 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1241 
1242 	/* Filter out the qp_attr_mask based on the state->new transition */
1243 	__filter_modify_flags(qp);
1244 	bmask = qp->modify_flags;
1245 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1246 	req.qp_cid = cpu_to_le32(qp->id);
1247 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1248 		req.network_type_en_sqd_async_notify_new_state =
1249 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1250 				(qp->en_sqd_async_notify ?
1251 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1252 	}
1253 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1254 
1255 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1256 		req.access = qp->access;
1257 
1258 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1259 		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1260 					 qp->pkey_index, &pkey))
1261 			req.pkey = cpu_to_le16(pkey);
1262 	}
1263 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1264 		req.qkey = cpu_to_le32(qp->qkey);
1265 
1266 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1267 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1268 		req.dgid[0] = cpu_to_le32(temp32[0]);
1269 		req.dgid[1] = cpu_to_le32(temp32[1]);
1270 		req.dgid[2] = cpu_to_le32(temp32[2]);
1271 		req.dgid[3] = cpu_to_le32(temp32[3]);
1272 	}
1273 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1274 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1275 
1276 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1277 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1278 					     [qp->ah.sgid_index]);
1279 
1280 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1281 		req.hop_limit = qp->ah.hop_limit;
1282 
1283 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1284 		req.traffic_class = qp->ah.traffic_class;
1285 
1286 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1287 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1288 
1289 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1290 		req.path_mtu = qp->path_mtu;
1291 
1292 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1293 		req.timeout = qp->timeout;
1294 
1295 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1296 		req.retry_cnt = qp->retry_cnt;
1297 
1298 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1299 		req.rnr_retry = qp->rnr_retry;
1300 
1301 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1302 		req.min_rnr_timer = qp->min_rnr_timer;
1303 
1304 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1305 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1306 
1307 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1308 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1309 
1310 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1311 		req.max_rd_atomic =
1312 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1313 
1314 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1315 		req.max_dest_rd_atomic =
1316 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1317 
1318 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1319 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1320 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1321 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1322 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1323 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1324 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1325 
1326 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1327 
1328 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1329 					  (void *)&resp, NULL, 0);
1330 	if (rc)
1331 		return rc;
1332 	qp->cur_qp_state = qp->state;
1333 	return 0;
1334 }
1335 
1336 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1337 {
1338 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1339 	struct cmdq_query_qp req;
1340 	struct creq_query_qp_resp resp;
1341 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1342 	struct creq_query_qp_resp_sb *sb;
1343 	u16 cmd_flags = 0;
1344 	u32 temp32[4];
1345 	int i, rc = 0;
1346 
1347 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1348 
1349 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1350 	if (!sbuf)
1351 		return -ENOMEM;
1352 	sb = sbuf->sb;
1353 
1354 	req.qp_cid = cpu_to_le32(qp->id);
1355 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1356 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1357 					  (void *)sbuf, 0);
1358 	if (rc)
1359 		goto bail;
1360 	/* Extract the context from the side buffer */
1361 	qp->state = sb->en_sqd_async_notify_state &
1362 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1363 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1364 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1365 				  true : false;
1366 	qp->access = sb->access;
1367 	qp->pkey_index = le16_to_cpu(sb->pkey);
1368 	qp->qkey = le32_to_cpu(sb->qkey);
1369 
1370 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1371 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1372 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1373 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1374 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1375 
1376 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1377 
1378 	qp->ah.sgid_index = 0;
1379 	for (i = 0; i < res->sgid_tbl.max; i++) {
1380 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1381 			qp->ah.sgid_index = i;
1382 			break;
1383 		}
1384 	}
1385 	if (i == res->sgid_tbl.max)
1386 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1387 
1388 	qp->ah.hop_limit = sb->hop_limit;
1389 	qp->ah.traffic_class = sb->traffic_class;
1390 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1391 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1392 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1393 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1394 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1395 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1396 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1397 	qp->timeout = sb->timeout;
1398 	qp->retry_cnt = sb->retry_cnt;
1399 	qp->rnr_retry = sb->rnr_retry;
1400 	qp->min_rnr_timer = sb->min_rnr_timer;
1401 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1402 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1403 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1404 	qp->max_dest_rd_atomic =
1405 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1406 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1407 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1408 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1409 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1410 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1411 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1412 	memcpy(qp->smac, sb->src_mac, 6);
1413 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1414 bail:
1415 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1416 	return rc;
1417 }
1418 
1419 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1420 {
1421 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1422 	struct cq_base *hw_cqe;
1423 	int i;
1424 
1425 	for (i = 0; i < cq_hwq->max_elements; i++) {
1426 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1427 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1428 			continue;
1429 		/*
1430 		 * The valid test of the entry must be done first before
1431 		 * reading any further.
1432 		 */
1433 		dma_rmb();
1434 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1435 		case CQ_BASE_CQE_TYPE_REQ:
1436 		case CQ_BASE_CQE_TYPE_TERMINAL:
1437 		{
1438 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1439 
1440 			if (qp == le64_to_cpu(cqe->qp_handle))
1441 				cqe->qp_handle = 0;
1442 			break;
1443 		}
1444 		case CQ_BASE_CQE_TYPE_RES_RC:
1445 		case CQ_BASE_CQE_TYPE_RES_UD:
1446 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1447 		{
1448 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1449 
1450 			if (qp == le64_to_cpu(cqe->qp_handle))
1451 				cqe->qp_handle = 0;
1452 			break;
1453 		}
1454 		default:
1455 			break;
1456 		}
1457 	}
1458 }
1459 
1460 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1461 			  struct bnxt_qplib_qp *qp)
1462 {
1463 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1464 	struct cmdq_destroy_qp req;
1465 	struct creq_destroy_qp_resp resp;
1466 	u16 cmd_flags = 0;
1467 	u32 tbl_indx;
1468 	int rc;
1469 
1470 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1471 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1472 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1473 
1474 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1475 
1476 	req.qp_cid = cpu_to_le32(qp->id);
1477 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1478 					  (void *)&resp, NULL, 0);
1479 	if (rc) {
1480 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1481 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1482 		return rc;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1489 			    struct bnxt_qplib_qp *qp)
1490 {
1491 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1492 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1493 	kfree(qp->sq.swq);
1494 
1495 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1496 	kfree(qp->rq.swq);
1497 
1498 	if (qp->irrq.max_elements)
1499 		bnxt_qplib_free_hwq(res, &qp->irrq);
1500 	if (qp->orrq.max_elements)
1501 		bnxt_qplib_free_hwq(res, &qp->orrq);
1502 
1503 }
1504 
1505 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1506 				struct bnxt_qplib_sge *sge)
1507 {
1508 	struct bnxt_qplib_q *sq = &qp->sq;
1509 	u32 sw_prod;
1510 
1511 	memset(sge, 0, sizeof(*sge));
1512 
1513 	if (qp->sq_hdr_buf) {
1514 		sw_prod = sq->swq_start;
1515 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1516 					 sw_prod * qp->sq_hdr_buf_size);
1517 		sge->lkey = 0xFFFFFFFF;
1518 		sge->size = qp->sq_hdr_buf_size;
1519 		return qp->sq_hdr_buf + sw_prod * sge->size;
1520 	}
1521 	return NULL;
1522 }
1523 
1524 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1525 {
1526 	struct bnxt_qplib_q *rq = &qp->rq;
1527 
1528 	return rq->swq_start;
1529 }
1530 
1531 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1532 {
1533 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1534 }
1535 
1536 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1537 				struct bnxt_qplib_sge *sge)
1538 {
1539 	struct bnxt_qplib_q *rq = &qp->rq;
1540 	u32 sw_prod;
1541 
1542 	memset(sge, 0, sizeof(*sge));
1543 
1544 	if (qp->rq_hdr_buf) {
1545 		sw_prod = rq->swq_start;
1546 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1547 					 sw_prod * qp->rq_hdr_buf_size);
1548 		sge->lkey = 0xFFFFFFFF;
1549 		sge->size = qp->rq_hdr_buf_size;
1550 		return qp->rq_hdr_buf + sw_prod * sge->size;
1551 	}
1552 	return NULL;
1553 }
1554 
1555 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1556 				       struct bnxt_qplib_swqe *wqe,
1557 				       struct bnxt_qplib_swq *swq)
1558 {
1559 	struct sq_psn_search_ext *psns_ext;
1560 	struct sq_psn_search *psns;
1561 	u32 flg_npsn;
1562 	u32 op_spsn;
1563 
1564 	if (!swq->psn_search)
1565 		return;
1566 	psns = swq->psn_search;
1567 	psns_ext = swq->psn_ext;
1568 
1569 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1570 		    SQ_PSN_SEARCH_START_PSN_MASK);
1571 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1572 		     SQ_PSN_SEARCH_OPCODE_MASK);
1573 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1574 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1575 
1576 	if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1577 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1578 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1579 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1580 	} else {
1581 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1582 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1583 	}
1584 }
1585 
1586 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1587 				 struct bnxt_qplib_swqe *wqe,
1588 				 u16 *idx)
1589 {
1590 	struct bnxt_qplib_hwq *hwq;
1591 	int len, t_len, offt;
1592 	bool pull_dst = true;
1593 	void *il_dst = NULL;
1594 	void *il_src = NULL;
1595 	int t_cplen, cplen;
1596 	int indx;
1597 
1598 	hwq = &qp->sq.hwq;
1599 	t_len = 0;
1600 	for (indx = 0; indx < wqe->num_sge; indx++) {
1601 		len = wqe->sg_list[indx].size;
1602 		il_src = (void *)wqe->sg_list[indx].addr;
1603 		t_len += len;
1604 		if (t_len > qp->max_inline_data)
1605 			goto bad;
1606 		while (len) {
1607 			if (pull_dst) {
1608 				pull_dst = false;
1609 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1610 				(*idx)++;
1611 				t_cplen = 0;
1612 				offt = 0;
1613 			}
1614 			cplen = min_t(int, len, sizeof(struct sq_sge));
1615 			cplen = min_t(int, cplen,
1616 					(sizeof(struct sq_sge) - offt));
1617 			memcpy(il_dst, il_src, cplen);
1618 			t_cplen += cplen;
1619 			il_src += cplen;
1620 			il_dst += cplen;
1621 			offt += cplen;
1622 			len -= cplen;
1623 			if (t_cplen == sizeof(struct sq_sge))
1624 				pull_dst = true;
1625 		}
1626 	}
1627 
1628 	return t_len;
1629 bad:
1630 	return -ENOMEM;
1631 }
1632 
1633 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1634 			       struct bnxt_qplib_sge *ssge,
1635 			       u16 nsge, u16 *idx)
1636 {
1637 	struct sq_sge *dsge;
1638 	int indx, len = 0;
1639 
1640 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1641 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1642 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1643 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1644 		dsge->size = cpu_to_le32(ssge[indx].size);
1645 		len += ssge[indx].size;
1646 	}
1647 
1648 	return len;
1649 }
1650 
1651 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1652 				     struct bnxt_qplib_swqe *wqe,
1653 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1654 {
1655 	u32 ilsize, bytes;
1656 	u16 nsge;
1657 	u16 slot;
1658 
1659 	nsge = wqe->num_sge;
1660 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1661 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1662 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1663 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1664 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1665 		bytes += sizeof(struct sq_send_hdr);
1666 	}
1667 
1668 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1669 	slot = bytes >> 4;
1670 	*wqe_sz = slot;
1671 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1672 		slot = 8;
1673 	return slot;
1674 }
1675 
1676 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1677 				     struct bnxt_qplib_swq *swq)
1678 {
1679 	struct bnxt_qplib_hwq *hwq;
1680 	u32 pg_num, pg_indx;
1681 	void *buff;
1682 	u32 tail;
1683 
1684 	hwq = &sq->hwq;
1685 	if (!hwq->pad_pg)
1686 		return;
1687 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1688 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1689 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1690 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1691 	swq->psn_ext = buff;
1692 	swq->psn_search = buff;
1693 }
1694 
1695 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1696 {
1697 	struct bnxt_qplib_q *sq = &qp->sq;
1698 
1699 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1700 }
1701 
1702 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1703 			 struct bnxt_qplib_swqe *wqe)
1704 {
1705 	struct bnxt_qplib_nq_work *nq_work = NULL;
1706 	int i, rc = 0, data_len = 0, pkt_num = 0;
1707 	struct bnxt_qplib_q *sq = &qp->sq;
1708 	struct bnxt_qplib_hwq *hwq;
1709 	struct bnxt_qplib_swq *swq;
1710 	bool sch_handler = false;
1711 	u16 wqe_sz, qdf = 0;
1712 	void *base_hdr;
1713 	void *ext_hdr;
1714 	__le32 temp32;
1715 	u32 wqe_idx;
1716 	u32 slots;
1717 	u16 idx;
1718 
1719 	hwq = &sq->hwq;
1720 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1721 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1722 		dev_err(&hwq->pdev->dev,
1723 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1724 			qp->id, qp->state);
1725 		rc = -EINVAL;
1726 		goto done;
1727 	}
1728 
1729 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1730 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1731 		dev_err(&hwq->pdev->dev,
1732 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1733 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1734 		rc = -ENOMEM;
1735 		goto done;
1736 	}
1737 
1738 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1739 	bnxt_qplib_pull_psn_buff(sq, swq);
1740 
1741 	idx = 0;
1742 	swq->slot_idx = hwq->prod;
1743 	swq->slots = slots;
1744 	swq->wr_id = wqe->wr_id;
1745 	swq->type = wqe->type;
1746 	swq->flags = wqe->flags;
1747 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1748 	if (qp->sig_type)
1749 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1750 
1751 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1752 		sch_handler = true;
1753 		dev_dbg(&hwq->pdev->dev,
1754 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1755 		goto queue_err;
1756 	}
1757 
1758 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1759 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1760 	memset(base_hdr, 0, sizeof(struct sq_sge));
1761 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1762 
1763 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1764 		/* Copy the inline data */
1765 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1766 	else
1767 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1768 					       &idx);
1769 	if (data_len < 0)
1770 		goto queue_err;
1771 	/* Specifics */
1772 	switch (wqe->type) {
1773 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1774 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1775 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1776 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1777 			/* Assemble info for Raw Ethertype QPs */
1778 
1779 			sqe->wqe_type = wqe->type;
1780 			sqe->flags = wqe->flags;
1781 			sqe->wqe_size = wqe_sz;
1782 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1783 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1784 			sqe->length = cpu_to_le32(data_len);
1785 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1786 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1787 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1788 
1789 			break;
1790 		}
1791 		fallthrough;
1792 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1793 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1794 	{
1795 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1796 		struct sq_send_hdr *sqe = base_hdr;
1797 
1798 		sqe->wqe_type = wqe->type;
1799 		sqe->flags = wqe->flags;
1800 		sqe->wqe_size = wqe_sz;
1801 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1802 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1803 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1804 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1805 			sqe->length = cpu_to_le32(data_len);
1806 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1807 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1808 						      SQ_SEND_DST_QP_MASK);
1809 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1810 						    SQ_SEND_AVID_MASK);
1811 		} else {
1812 			sqe->length = cpu_to_le32(data_len);
1813 			if (qp->mtu)
1814 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1815 			if (!pkt_num)
1816 				pkt_num = 1;
1817 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1818 		}
1819 		break;
1820 	}
1821 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1822 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1823 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1824 	{
1825 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1826 		struct sq_rdma_hdr *sqe = base_hdr;
1827 
1828 		sqe->wqe_type = wqe->type;
1829 		sqe->flags = wqe->flags;
1830 		sqe->wqe_size = wqe_sz;
1831 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1832 		sqe->length = cpu_to_le32((u32)data_len);
1833 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1834 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1835 		if (qp->mtu)
1836 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1837 		if (!pkt_num)
1838 			pkt_num = 1;
1839 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1840 		break;
1841 	}
1842 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1843 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1844 	{
1845 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1846 		struct sq_atomic_hdr *sqe = base_hdr;
1847 
1848 		sqe->wqe_type = wqe->type;
1849 		sqe->flags = wqe->flags;
1850 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1851 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1852 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1853 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1854 		if (qp->mtu)
1855 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1856 		if (!pkt_num)
1857 			pkt_num = 1;
1858 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1859 		break;
1860 	}
1861 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1862 	{
1863 		struct sq_localinvalidate *sqe = base_hdr;
1864 
1865 		sqe->wqe_type = wqe->type;
1866 		sqe->flags = wqe->flags;
1867 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1868 
1869 		break;
1870 	}
1871 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1872 	{
1873 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1874 		struct sq_fr_pmr_hdr *sqe = base_hdr;
1875 
1876 		sqe->wqe_type = wqe->type;
1877 		sqe->flags = wqe->flags;
1878 		sqe->access_cntl = wqe->frmr.access_cntl |
1879 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1880 		sqe->zero_based_page_size_log =
1881 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1882 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1883 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1884 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1885 		temp32 = cpu_to_le32(wqe->frmr.length);
1886 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1887 		sqe->numlevels_pbl_page_size_log =
1888 			((wqe->frmr.pbl_pg_sz_log <<
1889 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1890 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1891 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1892 					SQ_FR_PMR_NUMLEVELS_MASK);
1893 
1894 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1895 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1896 						wqe->frmr.page_list[i] |
1897 						PTU_PTE_VALID);
1898 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1899 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1900 
1901 		break;
1902 	}
1903 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1904 	{
1905 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1906 		struct sq_bind_hdr *sqe = base_hdr;
1907 
1908 		sqe->wqe_type = wqe->type;
1909 		sqe->flags = wqe->flags;
1910 		sqe->access_cntl = wqe->bind.access_cntl;
1911 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1912 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1913 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1914 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1915 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
1916 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1917 		break;
1918 	}
1919 	default:
1920 		/* Bad wqe, return error */
1921 		rc = -EINVAL;
1922 		goto done;
1923 	}
1924 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1925 	bnxt_qplib_fill_psn_search(qp, wqe, swq);
1926 queue_err:
1927 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
1928 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1929 	qp->wqe_cnt++;
1930 done:
1931 	if (sch_handler) {
1932 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1933 		if (nq_work) {
1934 			nq_work->cq = qp->scq;
1935 			nq_work->nq = qp->scq->nq;
1936 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1937 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1938 		} else {
1939 			dev_err(&hwq->pdev->dev,
1940 				"FP: Failed to allocate SQ nq_work!\n");
1941 			rc = -ENOMEM;
1942 		}
1943 	}
1944 	return rc;
1945 }
1946 
1947 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1948 {
1949 	struct bnxt_qplib_q *rq = &qp->rq;
1950 
1951 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1952 }
1953 
1954 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1955 			 struct bnxt_qplib_swqe *wqe)
1956 {
1957 	struct bnxt_qplib_nq_work *nq_work = NULL;
1958 	struct bnxt_qplib_q *rq = &qp->rq;
1959 	struct rq_wqe_hdr *base_hdr;
1960 	struct rq_ext_hdr *ext_hdr;
1961 	struct bnxt_qplib_hwq *hwq;
1962 	struct bnxt_qplib_swq *swq;
1963 	bool sch_handler = false;
1964 	u16 wqe_sz, idx;
1965 	u32 wqe_idx;
1966 	int rc = 0;
1967 
1968 	hwq = &rq->hwq;
1969 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1970 		dev_err(&hwq->pdev->dev,
1971 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1972 			qp->id, qp->state);
1973 		rc = -EINVAL;
1974 		goto done;
1975 	}
1976 
1977 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1978 		dev_err(&hwq->pdev->dev,
1979 			"FP: QP (0x%x) RQ is full!\n", qp->id);
1980 		rc = -EINVAL;
1981 		goto done;
1982 	}
1983 
1984 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1985 	swq->wr_id = wqe->wr_id;
1986 	swq->slots = rq->dbinfo.max_slot;
1987 
1988 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1989 		sch_handler = true;
1990 		dev_dbg(&hwq->pdev->dev,
1991 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
1992 		goto queue_err;
1993 	}
1994 
1995 	idx = 0;
1996 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1997 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1998 	memset(base_hdr, 0, sizeof(struct sq_sge));
1999 	memset(ext_hdr, 0, sizeof(struct sq_sge));
2000 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2001 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2002 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2003 	if (!wqe->num_sge) {
2004 		struct sq_sge *sge;
2005 
2006 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2007 		sge->size = 0;
2008 		wqe_sz++;
2009 	}
2010 	base_hdr->wqe_type = wqe->type;
2011 	base_hdr->flags = wqe->flags;
2012 	base_hdr->wqe_size = wqe_sz;
2013 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2014 queue_err:
2015 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2016 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2017 done:
2018 	if (sch_handler) {
2019 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2020 		if (nq_work) {
2021 			nq_work->cq = qp->rcq;
2022 			nq_work->nq = qp->rcq->nq;
2023 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2024 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2025 		} else {
2026 			dev_err(&hwq->pdev->dev,
2027 				"FP: Failed to allocate RQ nq_work!\n");
2028 			rc = -ENOMEM;
2029 		}
2030 	}
2031 
2032 	return rc;
2033 }
2034 
2035 /* CQ */
2036 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2037 {
2038 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2039 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2040 	struct creq_create_cq_resp resp;
2041 	struct bnxt_qplib_pbl *pbl;
2042 	struct cmdq_create_cq req;
2043 	u16 cmd_flags = 0;
2044 	u32 pg_sz_lvl;
2045 	int rc;
2046 
2047 	hwq_attr.res = res;
2048 	hwq_attr.depth = cq->max_wqe;
2049 	hwq_attr.stride = sizeof(struct cq_base);
2050 	hwq_attr.type = HWQ_TYPE_QUEUE;
2051 	hwq_attr.sginfo = &cq->sg_info;
2052 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2053 	if (rc)
2054 		goto exit;
2055 
2056 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2057 
2058 	if (!cq->dpi) {
2059 		dev_err(&rcfw->pdev->dev,
2060 			"FP: CREATE_CQ failed due to NULL DPI\n");
2061 		return -EINVAL;
2062 	}
2063 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2064 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2065 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2066 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2067 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2068 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2069 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2070 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2071 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2072 	req.cq_fco_cnq_id = cpu_to_le32(
2073 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2074 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2075 
2076 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2077 					  (void *)&resp, NULL, 0);
2078 	if (rc)
2079 		goto fail;
2080 
2081 	cq->id = le32_to_cpu(resp.xid);
2082 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2083 	init_waitqueue_head(&cq->waitq);
2084 	INIT_LIST_HEAD(&cq->sqf_head);
2085 	INIT_LIST_HEAD(&cq->rqf_head);
2086 	spin_lock_init(&cq->compl_lock);
2087 	spin_lock_init(&cq->flush_lock);
2088 
2089 	cq->dbinfo.hwq = &cq->hwq;
2090 	cq->dbinfo.xid = cq->id;
2091 	cq->dbinfo.db = cq->dpi->dbr;
2092 	cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2093 
2094 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2095 
2096 	return 0;
2097 
2098 fail:
2099 	bnxt_qplib_free_hwq(res, &cq->hwq);
2100 exit:
2101 	return rc;
2102 }
2103 
2104 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2105 {
2106 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2107 	struct cmdq_destroy_cq req;
2108 	struct creq_destroy_cq_resp resp;
2109 	u16 total_cnq_events;
2110 	u16 cmd_flags = 0;
2111 	int rc;
2112 
2113 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2114 
2115 	req.cq_cid = cpu_to_le32(cq->id);
2116 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2117 					  (void *)&resp, NULL, 0);
2118 	if (rc)
2119 		return rc;
2120 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2121 	__wait_for_all_nqes(cq, total_cnq_events);
2122 	bnxt_qplib_free_hwq(res, &cq->hwq);
2123 	return 0;
2124 }
2125 
2126 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2127 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2128 {
2129 	struct bnxt_qplib_cqe *cqe;
2130 	u32 start, last;
2131 	int rc = 0;
2132 
2133 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2134 	start = sq->swq_start;
2135 	cqe = *pcqe;
2136 	while (*budget) {
2137 		last = sq->swq_last;
2138 		if (start == last)
2139 			break;
2140 		/* Skip the FENCE WQE completions */
2141 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2142 			bnxt_qplib_cancel_phantom_processing(qp);
2143 			goto skip_compl;
2144 		}
2145 		memset(cqe, 0, sizeof(*cqe));
2146 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2147 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2148 		cqe->qp_handle = (u64)(unsigned long)qp;
2149 		cqe->wr_id = sq->swq[last].wr_id;
2150 		cqe->src_qp = qp->id;
2151 		cqe->type = sq->swq[last].type;
2152 		cqe++;
2153 		(*budget)--;
2154 skip_compl:
2155 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2156 		sq->swq_last = sq->swq[last].next_idx;
2157 	}
2158 	*pcqe = cqe;
2159 	if (!(*budget) && sq->swq_last != start)
2160 		/* Out of budget */
2161 		rc = -EAGAIN;
2162 
2163 	return rc;
2164 }
2165 
2166 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2167 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2168 {
2169 	struct bnxt_qplib_cqe *cqe;
2170 	u32 start, last;
2171 	int opcode = 0;
2172 	int rc = 0;
2173 
2174 	switch (qp->type) {
2175 	case CMDQ_CREATE_QP1_TYPE_GSI:
2176 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2177 		break;
2178 	case CMDQ_CREATE_QP_TYPE_RC:
2179 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2180 		break;
2181 	case CMDQ_CREATE_QP_TYPE_UD:
2182 	case CMDQ_CREATE_QP_TYPE_GSI:
2183 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2184 		break;
2185 	}
2186 
2187 	/* Flush the rest of the RQ */
2188 	start = rq->swq_start;
2189 	cqe = *pcqe;
2190 	while (*budget) {
2191 		last = rq->swq_last;
2192 		if (last == start)
2193 			break;
2194 		memset(cqe, 0, sizeof(*cqe));
2195 		cqe->status =
2196 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2197 		cqe->opcode = opcode;
2198 		cqe->qp_handle = (unsigned long)qp;
2199 		cqe->wr_id = rq->swq[last].wr_id;
2200 		cqe++;
2201 		(*budget)--;
2202 		bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2203 		rq->swq_last = rq->swq[last].next_idx;
2204 	}
2205 	*pcqe = cqe;
2206 	if (!*budget && rq->swq_last != start)
2207 		/* Out of budget */
2208 		rc = -EAGAIN;
2209 
2210 	return rc;
2211 }
2212 
2213 void bnxt_qplib_mark_qp_error(void *qp_handle)
2214 {
2215 	struct bnxt_qplib_qp *qp = qp_handle;
2216 
2217 	if (!qp)
2218 		return;
2219 
2220 	/* Must block new posting of SQ and RQ */
2221 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2222 	bnxt_qplib_cancel_phantom_processing(qp);
2223 }
2224 
2225 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2226  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2227  */
2228 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2229 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2230 {
2231 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2232 	struct bnxt_qplib_q *sq = &qp->sq;
2233 	struct cq_req *peek_req_hwcqe;
2234 	struct bnxt_qplib_qp *peek_qp;
2235 	struct bnxt_qplib_q *peek_sq;
2236 	struct bnxt_qplib_swq *swq;
2237 	struct cq_base *peek_hwcqe;
2238 	int i, rc = 0;
2239 
2240 	/* Normal mode */
2241 	/* Check for the psn_search marking before completing */
2242 	swq = &sq->swq[swq_last];
2243 	if (swq->psn_search &&
2244 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2245 		/* Unmark */
2246 		swq->psn_search->flags_next_psn = cpu_to_le32
2247 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2248 				     & ~0x80000000);
2249 		dev_dbg(&cq->hwq.pdev->dev,
2250 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2251 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2252 		sq->condition = true;
2253 		sq->send_phantom = true;
2254 
2255 		/* TODO: Only ARM if the previous SQE is ARMALL */
2256 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2257 		rc = -EAGAIN;
2258 		goto out;
2259 	}
2260 	if (sq->condition) {
2261 		/* Peek at the completions */
2262 		peek_raw_cq_cons = cq->hwq.cons;
2263 		peek_sw_cq_cons = cq_cons;
2264 		i = cq->hwq.max_elements;
2265 		while (i--) {
2266 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2267 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2268 						       peek_sw_cq_cons, NULL);
2269 			/* If the next hwcqe is VALID */
2270 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2271 					  cq->hwq.max_elements)) {
2272 			/*
2273 			 * The valid test of the entry must be done first before
2274 			 * reading any further.
2275 			 */
2276 				dma_rmb();
2277 				/* If the next hwcqe is a REQ */
2278 				if ((peek_hwcqe->cqe_type_toggle &
2279 				    CQ_BASE_CQE_TYPE_MASK) ==
2280 				    CQ_BASE_CQE_TYPE_REQ) {
2281 					peek_req_hwcqe = (struct cq_req *)
2282 							 peek_hwcqe;
2283 					peek_qp = (struct bnxt_qplib_qp *)
2284 						((unsigned long)
2285 						 le64_to_cpu
2286 						 (peek_req_hwcqe->qp_handle));
2287 					peek_sq = &peek_qp->sq;
2288 					peek_sq_cons_idx =
2289 						((le16_to_cpu(
2290 						  peek_req_hwcqe->sq_cons_idx)
2291 						  - 1) % sq->max_wqe);
2292 					/* If the hwcqe's sq's wr_id matches */
2293 					if (peek_sq == sq &&
2294 					    sq->swq[peek_sq_cons_idx].wr_id ==
2295 					    BNXT_QPLIB_FENCE_WRID) {
2296 						/*
2297 						 *  Unbreak only if the phantom
2298 						 *  comes back
2299 						 */
2300 						dev_dbg(&cq->hwq.pdev->dev,
2301 							"FP: Got Phantom CQE\n");
2302 						sq->condition = false;
2303 						sq->single = true;
2304 						rc = 0;
2305 						goto out;
2306 					}
2307 				}
2308 				/* Valid but not the phantom, so keep looping */
2309 			} else {
2310 				/* Not valid yet, just exit and wait */
2311 				rc = -EINVAL;
2312 				goto out;
2313 			}
2314 			peek_sw_cq_cons++;
2315 			peek_raw_cq_cons++;
2316 		}
2317 		dev_err(&cq->hwq.pdev->dev,
2318 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2319 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2320 		rc = -EINVAL;
2321 	}
2322 out:
2323 	return rc;
2324 }
2325 
2326 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2327 				     struct cq_req *hwcqe,
2328 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2329 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2330 {
2331 	struct bnxt_qplib_swq *swq;
2332 	struct bnxt_qplib_cqe *cqe;
2333 	struct bnxt_qplib_qp *qp;
2334 	struct bnxt_qplib_q *sq;
2335 	u32 cqe_sq_cons;
2336 	int rc = 0;
2337 
2338 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2339 				      le64_to_cpu(hwcqe->qp_handle));
2340 	if (!qp) {
2341 		dev_err(&cq->hwq.pdev->dev,
2342 			"FP: Process Req qp is NULL\n");
2343 		return -EINVAL;
2344 	}
2345 	sq = &qp->sq;
2346 
2347 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2348 	if (qp->sq.flushed) {
2349 		dev_dbg(&cq->hwq.pdev->dev,
2350 			"%s: QP in Flush QP = %p\n", __func__, qp);
2351 		goto done;
2352 	}
2353 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2354 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2355 	 * to the cqe_sq_cons
2356 	 */
2357 	cqe = *pcqe;
2358 	while (*budget) {
2359 		if (sq->swq_last == cqe_sq_cons)
2360 			/* Done */
2361 			break;
2362 
2363 		swq = &sq->swq[sq->swq_last];
2364 		memset(cqe, 0, sizeof(*cqe));
2365 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2366 		cqe->qp_handle = (u64)(unsigned long)qp;
2367 		cqe->src_qp = qp->id;
2368 		cqe->wr_id = swq->wr_id;
2369 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2370 			goto skip;
2371 		cqe->type = swq->type;
2372 
2373 		/* For the last CQE, check for status.  For errors, regardless
2374 		 * of the request being signaled or not, it must complete with
2375 		 * the hwcqe error status
2376 		 */
2377 		if (swq->next_idx == cqe_sq_cons &&
2378 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2379 			cqe->status = hwcqe->status;
2380 			dev_err(&cq->hwq.pdev->dev,
2381 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2382 				sq->swq_last, cqe->wr_id, cqe->status);
2383 			cqe++;
2384 			(*budget)--;
2385 			bnxt_qplib_mark_qp_error(qp);
2386 			/* Add qp to flush list of the CQ */
2387 			bnxt_qplib_add_flush_qp(qp);
2388 		} else {
2389 			/* Before we complete, do WA 9060 */
2390 			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2391 				      cqe_sq_cons)) {
2392 				*lib_qp = qp;
2393 				goto out;
2394 			}
2395 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2396 				cqe->status = CQ_REQ_STATUS_OK;
2397 				cqe++;
2398 				(*budget)--;
2399 			}
2400 		}
2401 skip:
2402 		bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2403 		sq->swq_last = swq->next_idx;
2404 		if (sq->single)
2405 			break;
2406 	}
2407 out:
2408 	*pcqe = cqe;
2409 	if (sq->swq_last != cqe_sq_cons) {
2410 		/* Out of budget */
2411 		rc = -EAGAIN;
2412 		goto done;
2413 	}
2414 	/*
2415 	 * Back to normal completion mode only after it has completed all of
2416 	 * the WC for this CQE
2417 	 */
2418 	sq->single = false;
2419 done:
2420 	return rc;
2421 }
2422 
2423 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2424 {
2425 	spin_lock(&srq->hwq.lock);
2426 	srq->swq[srq->last_idx].next_idx = (int)tag;
2427 	srq->last_idx = (int)tag;
2428 	srq->swq[srq->last_idx].next_idx = -1;
2429 	srq->hwq.cons++; /* Support for SRQE counter */
2430 	spin_unlock(&srq->hwq.lock);
2431 }
2432 
2433 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2434 					struct cq_res_rc *hwcqe,
2435 					struct bnxt_qplib_cqe **pcqe,
2436 					int *budget)
2437 {
2438 	struct bnxt_qplib_srq *srq;
2439 	struct bnxt_qplib_cqe *cqe;
2440 	struct bnxt_qplib_qp *qp;
2441 	struct bnxt_qplib_q *rq;
2442 	u32 wr_id_idx;
2443 	int rc = 0;
2444 
2445 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2446 				      le64_to_cpu(hwcqe->qp_handle));
2447 	if (!qp) {
2448 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2449 		return -EINVAL;
2450 	}
2451 	if (qp->rq.flushed) {
2452 		dev_dbg(&cq->hwq.pdev->dev,
2453 			"%s: QP in Flush QP = %p\n", __func__, qp);
2454 		goto done;
2455 	}
2456 
2457 	cqe = *pcqe;
2458 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2459 	cqe->length = le32_to_cpu(hwcqe->length);
2460 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2461 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2462 	cqe->flags = le16_to_cpu(hwcqe->flags);
2463 	cqe->status = hwcqe->status;
2464 	cqe->qp_handle = (u64)(unsigned long)qp;
2465 
2466 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2467 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2468 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2469 		srq = qp->srq;
2470 		if (!srq)
2471 			return -EINVAL;
2472 		if (wr_id_idx >= srq->hwq.max_elements) {
2473 			dev_err(&cq->hwq.pdev->dev,
2474 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2475 				wr_id_idx, srq->hwq.max_elements);
2476 			return -EINVAL;
2477 		}
2478 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2479 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2480 		cqe++;
2481 		(*budget)--;
2482 		*pcqe = cqe;
2483 	} else {
2484 		struct bnxt_qplib_swq *swq;
2485 
2486 		rq = &qp->rq;
2487 		if (wr_id_idx > (rq->max_wqe - 1)) {
2488 			dev_err(&cq->hwq.pdev->dev,
2489 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2490 				wr_id_idx, rq->max_wqe);
2491 			return -EINVAL;
2492 		}
2493 		if (wr_id_idx != rq->swq_last)
2494 			return -EINVAL;
2495 		swq = &rq->swq[rq->swq_last];
2496 		cqe->wr_id = swq->wr_id;
2497 		cqe++;
2498 		(*budget)--;
2499 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2500 		rq->swq_last = swq->next_idx;
2501 		*pcqe = cqe;
2502 
2503 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2504 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2505 			/* Add qp to flush list of the CQ */
2506 			bnxt_qplib_add_flush_qp(qp);
2507 		}
2508 	}
2509 
2510 done:
2511 	return rc;
2512 }
2513 
2514 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2515 					struct cq_res_ud *hwcqe,
2516 					struct bnxt_qplib_cqe **pcqe,
2517 					int *budget)
2518 {
2519 	struct bnxt_qplib_srq *srq;
2520 	struct bnxt_qplib_cqe *cqe;
2521 	struct bnxt_qplib_qp *qp;
2522 	struct bnxt_qplib_q *rq;
2523 	u32 wr_id_idx;
2524 	int rc = 0;
2525 
2526 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2527 				      le64_to_cpu(hwcqe->qp_handle));
2528 	if (!qp) {
2529 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2530 		return -EINVAL;
2531 	}
2532 	if (qp->rq.flushed) {
2533 		dev_dbg(&cq->hwq.pdev->dev,
2534 			"%s: QP in Flush QP = %p\n", __func__, qp);
2535 		goto done;
2536 	}
2537 	cqe = *pcqe;
2538 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2539 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2540 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2541 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2542 	cqe->flags = le16_to_cpu(hwcqe->flags);
2543 	cqe->status = hwcqe->status;
2544 	cqe->qp_handle = (u64)(unsigned long)qp;
2545 	/*FIXME: Endianness fix needed for smace */
2546 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2547 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2548 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2549 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2550 				  ((le32_to_cpu(
2551 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2552 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2553 
2554 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2555 		srq = qp->srq;
2556 		if (!srq)
2557 			return -EINVAL;
2558 
2559 		if (wr_id_idx >= srq->hwq.max_elements) {
2560 			dev_err(&cq->hwq.pdev->dev,
2561 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2562 				wr_id_idx, srq->hwq.max_elements);
2563 			return -EINVAL;
2564 		}
2565 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2566 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2567 		cqe++;
2568 		(*budget)--;
2569 		*pcqe = cqe;
2570 	} else {
2571 		struct bnxt_qplib_swq *swq;
2572 
2573 		rq = &qp->rq;
2574 		if (wr_id_idx > (rq->max_wqe - 1)) {
2575 			dev_err(&cq->hwq.pdev->dev,
2576 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2577 				wr_id_idx, rq->max_wqe);
2578 			return -EINVAL;
2579 		}
2580 
2581 		if (rq->swq_last != wr_id_idx)
2582 			return -EINVAL;
2583 		swq = &rq->swq[rq->swq_last];
2584 		cqe->wr_id = swq->wr_id;
2585 		cqe++;
2586 		(*budget)--;
2587 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2588 		rq->swq_last = swq->next_idx;
2589 		*pcqe = cqe;
2590 
2591 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2592 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2593 			/* Add qp to flush list of the CQ */
2594 			bnxt_qplib_add_flush_qp(qp);
2595 		}
2596 	}
2597 done:
2598 	return rc;
2599 }
2600 
2601 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2602 {
2603 	struct cq_base *hw_cqe;
2604 	u32 sw_cons, raw_cons;
2605 	bool rc = true;
2606 
2607 	raw_cons = cq->hwq.cons;
2608 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2609 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2610 	 /* Check for Valid bit. If the CQE is valid, return false */
2611 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2612 	return rc;
2613 }
2614 
2615 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2616 						struct cq_res_raweth_qp1 *hwcqe,
2617 						struct bnxt_qplib_cqe **pcqe,
2618 						int *budget)
2619 {
2620 	struct bnxt_qplib_qp *qp;
2621 	struct bnxt_qplib_q *rq;
2622 	struct bnxt_qplib_srq *srq;
2623 	struct bnxt_qplib_cqe *cqe;
2624 	u32 wr_id_idx;
2625 	int rc = 0;
2626 
2627 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2628 				      le64_to_cpu(hwcqe->qp_handle));
2629 	if (!qp) {
2630 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2631 		return -EINVAL;
2632 	}
2633 	if (qp->rq.flushed) {
2634 		dev_dbg(&cq->hwq.pdev->dev,
2635 			"%s: QP in Flush QP = %p\n", __func__, qp);
2636 		goto done;
2637 	}
2638 	cqe = *pcqe;
2639 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2640 	cqe->flags = le16_to_cpu(hwcqe->flags);
2641 	cqe->qp_handle = (u64)(unsigned long)qp;
2642 
2643 	wr_id_idx =
2644 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2645 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2646 	cqe->src_qp = qp->id;
2647 	if (qp->id == 1 && !cqe->length) {
2648 		/* Add workaround for the length misdetection */
2649 		cqe->length = 296;
2650 	} else {
2651 		cqe->length = le16_to_cpu(hwcqe->length);
2652 	}
2653 	cqe->pkey_index = qp->pkey_index;
2654 	memcpy(cqe->smac, qp->smac, 6);
2655 
2656 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2657 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2658 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2659 
2660 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2661 		srq = qp->srq;
2662 		if (!srq) {
2663 			dev_err(&cq->hwq.pdev->dev,
2664 				"FP: SRQ used but not defined??\n");
2665 			return -EINVAL;
2666 		}
2667 		if (wr_id_idx >= srq->hwq.max_elements) {
2668 			dev_err(&cq->hwq.pdev->dev,
2669 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2670 				wr_id_idx, srq->hwq.max_elements);
2671 			return -EINVAL;
2672 		}
2673 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2674 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2675 		cqe++;
2676 		(*budget)--;
2677 		*pcqe = cqe;
2678 	} else {
2679 		struct bnxt_qplib_swq *swq;
2680 
2681 		rq = &qp->rq;
2682 		if (wr_id_idx > (rq->max_wqe - 1)) {
2683 			dev_err(&cq->hwq.pdev->dev,
2684 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2685 				wr_id_idx, rq->max_wqe);
2686 			return -EINVAL;
2687 		}
2688 		if (rq->swq_last != wr_id_idx)
2689 			return -EINVAL;
2690 		swq = &rq->swq[rq->swq_last];
2691 		cqe->wr_id = swq->wr_id;
2692 		cqe++;
2693 		(*budget)--;
2694 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2695 		rq->swq_last = swq->next_idx;
2696 		*pcqe = cqe;
2697 
2698 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2699 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2700 			/* Add qp to flush list of the CQ */
2701 			bnxt_qplib_add_flush_qp(qp);
2702 		}
2703 	}
2704 
2705 done:
2706 	return rc;
2707 }
2708 
2709 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2710 					  struct cq_terminal *hwcqe,
2711 					  struct bnxt_qplib_cqe **pcqe,
2712 					  int *budget)
2713 {
2714 	struct bnxt_qplib_qp *qp;
2715 	struct bnxt_qplib_q *sq, *rq;
2716 	struct bnxt_qplib_cqe *cqe;
2717 	u32 swq_last = 0, cqe_cons;
2718 	int rc = 0;
2719 
2720 	/* Check the Status */
2721 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2722 		dev_warn(&cq->hwq.pdev->dev,
2723 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2724 			 hwcqe->status);
2725 
2726 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2727 				      le64_to_cpu(hwcqe->qp_handle));
2728 	if (!qp) {
2729 		dev_err(&cq->hwq.pdev->dev,
2730 			"FP: CQ Process terminal qp is NULL\n");
2731 		return -EINVAL;
2732 	}
2733 
2734 	/* Must block new posting of SQ and RQ */
2735 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2736 
2737 	sq = &qp->sq;
2738 	rq = &qp->rq;
2739 
2740 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2741 	if (cqe_cons == 0xFFFF)
2742 		goto do_rq;
2743 	cqe_cons %= sq->max_wqe;
2744 
2745 	if (qp->sq.flushed) {
2746 		dev_dbg(&cq->hwq.pdev->dev,
2747 			"%s: QP in Flush QP = %p\n", __func__, qp);
2748 		goto sq_done;
2749 	}
2750 
2751 	/* Terminal CQE can also include aggregated successful CQEs prior.
2752 	 * So we must complete all CQEs from the current sq's cons to the
2753 	 * cq_cons with status OK
2754 	 */
2755 	cqe = *pcqe;
2756 	while (*budget) {
2757 		swq_last = sq->swq_last;
2758 		if (swq_last == cqe_cons)
2759 			break;
2760 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2761 			memset(cqe, 0, sizeof(*cqe));
2762 			cqe->status = CQ_REQ_STATUS_OK;
2763 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2764 			cqe->qp_handle = (u64)(unsigned long)qp;
2765 			cqe->src_qp = qp->id;
2766 			cqe->wr_id = sq->swq[swq_last].wr_id;
2767 			cqe->type = sq->swq[swq_last].type;
2768 			cqe++;
2769 			(*budget)--;
2770 		}
2771 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2772 		sq->swq_last = sq->swq[swq_last].next_idx;
2773 	}
2774 	*pcqe = cqe;
2775 	if (!(*budget) && swq_last != cqe_cons) {
2776 		/* Out of budget */
2777 		rc = -EAGAIN;
2778 		goto sq_done;
2779 	}
2780 sq_done:
2781 	if (rc)
2782 		return rc;
2783 do_rq:
2784 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2785 	if (cqe_cons == 0xFFFF) {
2786 		goto done;
2787 	} else if (cqe_cons > rq->max_wqe - 1) {
2788 		dev_err(&cq->hwq.pdev->dev,
2789 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2790 			cqe_cons, rq->max_wqe);
2791 		rc = -EINVAL;
2792 		goto done;
2793 	}
2794 
2795 	if (qp->rq.flushed) {
2796 		dev_dbg(&cq->hwq.pdev->dev,
2797 			"%s: QP in Flush QP = %p\n", __func__, qp);
2798 		rc = 0;
2799 		goto done;
2800 	}
2801 
2802 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2803 	 * from the current rq->cons to the rq->prod regardless what the
2804 	 * rq->cons the terminal CQE indicates
2805 	 */
2806 
2807 	/* Add qp to flush list of the CQ */
2808 	bnxt_qplib_add_flush_qp(qp);
2809 done:
2810 	return rc;
2811 }
2812 
2813 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2814 					struct cq_cutoff *hwcqe)
2815 {
2816 	/* Check the Status */
2817 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2818 		dev_err(&cq->hwq.pdev->dev,
2819 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2820 			hwcqe->status);
2821 		return -EINVAL;
2822 	}
2823 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2824 	wake_up_interruptible(&cq->waitq);
2825 
2826 	return 0;
2827 }
2828 
2829 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2830 				  struct bnxt_qplib_cqe *cqe,
2831 				  int num_cqes)
2832 {
2833 	struct bnxt_qplib_qp *qp = NULL;
2834 	u32 budget = num_cqes;
2835 	unsigned long flags;
2836 
2837 	spin_lock_irqsave(&cq->flush_lock, flags);
2838 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2839 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2840 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2841 	}
2842 
2843 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2844 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2845 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2846 	}
2847 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2848 
2849 	return num_cqes - budget;
2850 }
2851 
2852 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2853 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2854 {
2855 	struct cq_base *hw_cqe;
2856 	u32 sw_cons, raw_cons;
2857 	int budget, rc = 0;
2858 	u8 type;
2859 
2860 	raw_cons = cq->hwq.cons;
2861 	budget = num_cqes;
2862 
2863 	while (budget) {
2864 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2865 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2866 
2867 		/* Check for Valid bit */
2868 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2869 			break;
2870 
2871 		/*
2872 		 * The valid test of the entry must be done first before
2873 		 * reading any further.
2874 		 */
2875 		dma_rmb();
2876 		/* From the device's respective CQE format to qplib_wc*/
2877 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2878 		switch (type) {
2879 		case CQ_BASE_CQE_TYPE_REQ:
2880 			rc = bnxt_qplib_cq_process_req(cq,
2881 						       (struct cq_req *)hw_cqe,
2882 						       &cqe, &budget,
2883 						       sw_cons, lib_qp);
2884 			break;
2885 		case CQ_BASE_CQE_TYPE_RES_RC:
2886 			rc = bnxt_qplib_cq_process_res_rc(cq,
2887 							  (struct cq_res_rc *)
2888 							  hw_cqe, &cqe,
2889 							  &budget);
2890 			break;
2891 		case CQ_BASE_CQE_TYPE_RES_UD:
2892 			rc = bnxt_qplib_cq_process_res_ud
2893 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2894 					 &budget);
2895 			break;
2896 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2897 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2898 					(cq, (struct cq_res_raweth_qp1 *)
2899 					 hw_cqe, &cqe, &budget);
2900 			break;
2901 		case CQ_BASE_CQE_TYPE_TERMINAL:
2902 			rc = bnxt_qplib_cq_process_terminal
2903 					(cq, (struct cq_terminal *)hw_cqe,
2904 					 &cqe, &budget);
2905 			break;
2906 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2907 			bnxt_qplib_cq_process_cutoff
2908 					(cq, (struct cq_cutoff *)hw_cqe);
2909 			/* Done processing this CQ */
2910 			goto exit;
2911 		default:
2912 			dev_err(&cq->hwq.pdev->dev,
2913 				"process_cq unknown type 0x%lx\n",
2914 				hw_cqe->cqe_type_toggle &
2915 				CQ_BASE_CQE_TYPE_MASK);
2916 			rc = -EINVAL;
2917 			break;
2918 		}
2919 		if (rc < 0) {
2920 			if (rc == -EAGAIN)
2921 				break;
2922 			/* Error while processing the CQE, just skip to the
2923 			 * next one
2924 			 */
2925 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2926 				dev_err(&cq->hwq.pdev->dev,
2927 					"process_cqe error rc = 0x%x\n", rc);
2928 		}
2929 		raw_cons++;
2930 	}
2931 	if (cq->hwq.cons != raw_cons) {
2932 		cq->hwq.cons = raw_cons;
2933 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2934 	}
2935 exit:
2936 	return num_cqes - budget;
2937 }
2938 
2939 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2940 {
2941 	if (arm_type)
2942 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2943 	/* Using cq->arm_state variable to track whether to issue cq handler */
2944 	atomic_set(&cq->arm_state, 1);
2945 }
2946 
2947 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2948 {
2949 	flush_workqueue(qp->scq->nq->cqn_wq);
2950 	if (qp->scq != qp->rcq)
2951 		flush_workqueue(qp->rcq->nq->cqn_wq);
2952 }
2953