xref: /openbmc/linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c (revision aad29a73199b7fbccfbabea3f1ee627ad1924f52)
1  /*
2   * Broadcom NetXtreme-E RoCE driver.
3   *
4   * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5   * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6   *
7   * This software is available to you under a choice of one of two
8   * licenses.  You may choose to be licensed under the terms of the GNU
9   * General Public License (GPL) Version 2, available from the file
10   * COPYING in the main directory of this source tree, or the
11   * BSD license below:
12   *
13   * Redistribution and use in source and binary forms, with or without
14   * modification, are permitted provided that the following conditions
15   * are met:
16   *
17   * 1. Redistributions of source code must retain the above copyright
18   *    notice, this list of conditions and the following disclaimer.
19   * 2. Redistributions in binary form must reproduce the above copyright
20   *    notice, this list of conditions and the following disclaimer in
21   *    the documentation and/or other materials provided with the
22   *    distribution.
23   *
24   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26   * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27   * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31   * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32   * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33   * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34   * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35   *
36   * Description: Fast Path Operators
37   */
38  
39  #define dev_fmt(fmt) "QPLIB: " fmt
40  
41  #include <linux/interrupt.h>
42  #include <linux/spinlock.h>
43  #include <linux/sched.h>
44  #include <linux/slab.h>
45  #include <linux/pci.h>
46  #include <linux/delay.h>
47  #include <linux/prefetch.h>
48  #include <linux/if_ether.h>
49  #include <rdma/ib_mad.h>
50  
51  #include "roce_hsi.h"
52  
53  #include "qplib_res.h"
54  #include "qplib_rcfw.h"
55  #include "qplib_sp.h"
56  #include "qplib_fp.h"
57  
58  static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59  
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)60  static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61  {
62  	qp->sq.condition = false;
63  	qp->sq.send_phantom = false;
64  	qp->sq.single = false;
65  }
66  
67  /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)68  static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69  {
70  	struct bnxt_qplib_cq *scq, *rcq;
71  
72  	scq = qp->scq;
73  	rcq = qp->rcq;
74  
75  	if (!qp->sq.flushed) {
76  		dev_dbg(&scq->hwq.pdev->dev,
77  			"FP: Adding to SQ Flush list = %p\n", qp);
78  		bnxt_qplib_cancel_phantom_processing(qp);
79  		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80  		qp->sq.flushed = true;
81  	}
82  	if (!qp->srq) {
83  		if (!qp->rq.flushed) {
84  			dev_dbg(&rcq->hwq.pdev->dev,
85  				"FP: Adding to RQ Flush list = %p\n", qp);
86  			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87  			qp->rq.flushed = true;
88  		}
89  	}
90  }
91  
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)92  static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93  				       unsigned long *flags)
94  	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95  {
96  	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97  	if (qp->scq == qp->rcq)
98  		__acquire(&qp->rcq->flush_lock);
99  	else
100  		spin_lock(&qp->rcq->flush_lock);
101  }
102  
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)103  static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104  				       unsigned long *flags)
105  	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106  {
107  	if (qp->scq == qp->rcq)
108  		__release(&qp->rcq->flush_lock);
109  	else
110  		spin_unlock(&qp->rcq->flush_lock);
111  	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112  }
113  
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)114  void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115  {
116  	unsigned long flags;
117  
118  	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119  	__bnxt_qplib_add_flush_qp(qp);
120  	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121  }
122  
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)123  static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124  {
125  	if (qp->sq.flushed) {
126  		qp->sq.flushed = false;
127  		list_del(&qp->sq_flush);
128  	}
129  	if (!qp->srq) {
130  		if (qp->rq.flushed) {
131  			qp->rq.flushed = false;
132  			list_del(&qp->rq_flush);
133  		}
134  	}
135  }
136  
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)137  void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138  {
139  	unsigned long flags;
140  
141  	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142  	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143  	qp->sq.hwq.prod = 0;
144  	qp->sq.hwq.cons = 0;
145  	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146  	qp->rq.hwq.prod = 0;
147  	qp->rq.hwq.cons = 0;
148  
149  	__bnxt_qplib_del_flush_qp(qp);
150  	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151  }
152  
bnxt_qpn_cqn_sched_task(struct work_struct * work)153  static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154  {
155  	struct bnxt_qplib_nq_work *nq_work =
156  			container_of(work, struct bnxt_qplib_nq_work, work);
157  
158  	struct bnxt_qplib_cq *cq = nq_work->cq;
159  	struct bnxt_qplib_nq *nq = nq_work->nq;
160  
161  	if (cq && nq) {
162  		spin_lock_bh(&cq->compl_lock);
163  		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164  			dev_dbg(&nq->pdev->dev,
165  				"%s:Trigger cq  = %p event nq = %p\n",
166  				__func__, cq, nq);
167  			nq->cqn_handler(nq, cq);
168  		}
169  		spin_unlock_bh(&cq->compl_lock);
170  	}
171  	kfree(nq_work);
172  }
173  
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)174  static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175  				       struct bnxt_qplib_qp *qp)
176  {
177  	struct bnxt_qplib_q *rq = &qp->rq;
178  	struct bnxt_qplib_q *sq = &qp->sq;
179  
180  	if (qp->rq_hdr_buf)
181  		dma_free_coherent(&res->pdev->dev,
182  				  rq->max_wqe * qp->rq_hdr_buf_size,
183  				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184  	if (qp->sq_hdr_buf)
185  		dma_free_coherent(&res->pdev->dev,
186  				  sq->max_wqe * qp->sq_hdr_buf_size,
187  				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188  	qp->rq_hdr_buf = NULL;
189  	qp->sq_hdr_buf = NULL;
190  	qp->rq_hdr_buf_map = 0;
191  	qp->sq_hdr_buf_map = 0;
192  	qp->sq_hdr_buf_size = 0;
193  	qp->rq_hdr_buf_size = 0;
194  }
195  
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)196  static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197  				       struct bnxt_qplib_qp *qp)
198  {
199  	struct bnxt_qplib_q *rq = &qp->rq;
200  	struct bnxt_qplib_q *sq = &qp->sq;
201  	int rc = 0;
202  
203  	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204  		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205  					sq->max_wqe * qp->sq_hdr_buf_size,
206  					&qp->sq_hdr_buf_map, GFP_KERNEL);
207  		if (!qp->sq_hdr_buf) {
208  			rc = -ENOMEM;
209  			dev_err(&res->pdev->dev,
210  				"Failed to create sq_hdr_buf\n");
211  			goto fail;
212  		}
213  	}
214  
215  	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216  		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217  						    rq->max_wqe *
218  						    qp->rq_hdr_buf_size,
219  						    &qp->rq_hdr_buf_map,
220  						    GFP_KERNEL);
221  		if (!qp->rq_hdr_buf) {
222  			rc = -ENOMEM;
223  			dev_err(&res->pdev->dev,
224  				"Failed to create rq_hdr_buf\n");
225  			goto fail;
226  		}
227  	}
228  	return 0;
229  
230  fail:
231  	bnxt_qplib_free_qp_hdr_buf(res, qp);
232  	return rc;
233  }
234  
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)235  static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236  {
237  	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238  	struct nq_base *nqe, **nq_ptr;
239  	int budget = nq->budget;
240  	uintptr_t q_handle;
241  	u16 type;
242  
243  	spin_lock_bh(&hwq->lock);
244  	/* Service the NQ until empty */
245  	while (budget--) {
246  		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247  		nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248  		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
249  			break;
250  
251  		/*
252  		 * The valid test of the entry must be done first before
253  		 * reading any further.
254  		 */
255  		dma_rmb();
256  
257  		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
258  		switch (type) {
259  		case NQ_BASE_TYPE_CQ_NOTIFICATION:
260  		{
261  			struct nq_cn *nqcne = (struct nq_cn *)nqe;
262  
263  			q_handle = le32_to_cpu(nqcne->cq_handle_low);
264  			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
265  						     << 32;
266  			if ((unsigned long)cq == q_handle) {
267  				nqcne->cq_handle_low = 0;
268  				nqcne->cq_handle_high = 0;
269  				cq->cnq_events++;
270  			}
271  			break;
272  		}
273  		default:
274  			break;
275  		}
276  		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277  					 1, &nq->nq_db.dbinfo.flags);
278  	}
279  	spin_unlock_bh(&hwq->lock);
280  }
281  
282  /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
283   * this CQ.
284   */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)285  static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
286  {
287  	u32 retry_cnt = 100;
288  
289  	while (retry_cnt--) {
290  		if (cnq_events == cq->cnq_events)
291  			return;
292  		usleep_range(50, 100);
293  		clean_nq(cq->nq, cq);
294  	}
295  }
296  
bnxt_qplib_service_nq(struct tasklet_struct * t)297  static void bnxt_qplib_service_nq(struct tasklet_struct *t)
298  {
299  	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300  	struct bnxt_qplib_hwq *hwq = &nq->hwq;
301  	struct bnxt_qplib_cq *cq;
302  	int budget = nq->budget;
303  	struct nq_base *nqe;
304  	uintptr_t q_handle;
305  	u32 hw_polled = 0;
306  	u16 type;
307  
308  	spin_lock_bh(&hwq->lock);
309  	/* Service the NQ until empty */
310  	while (budget--) {
311  		nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312  		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
313  			break;
314  
315  		/*
316  		 * The valid test of the entry must be done first before
317  		 * reading any further.
318  		 */
319  		dma_rmb();
320  
321  		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
322  		switch (type) {
323  		case NQ_BASE_TYPE_CQ_NOTIFICATION:
324  		{
325  			struct nq_cn *nqcne = (struct nq_cn *)nqe;
326  
327  			q_handle = le32_to_cpu(nqcne->cq_handle_low);
328  			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
329  						     << 32;
330  			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
331  			if (!cq)
332  				break;
333  			bnxt_qplib_armen_db(&cq->dbinfo,
334  					    DBC_DBC_TYPE_CQ_ARMENA);
335  			spin_lock_bh(&cq->compl_lock);
336  			atomic_set(&cq->arm_state, 0);
337  			if (nq->cqn_handler(nq, (cq)))
338  				dev_warn(&nq->pdev->dev,
339  					 "cqn - type 0x%x not handled\n", type);
340  			cq->cnq_events++;
341  			spin_unlock_bh(&cq->compl_lock);
342  			break;
343  		}
344  		case NQ_BASE_TYPE_SRQ_EVENT:
345  		{
346  			struct bnxt_qplib_srq *srq;
347  			struct nq_srq_event *nqsrqe =
348  						(struct nq_srq_event *)nqe;
349  
350  			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
351  			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
352  				     << 32;
353  			srq = (struct bnxt_qplib_srq *)q_handle;
354  			bnxt_qplib_armen_db(&srq->dbinfo,
355  					    DBC_DBC_TYPE_SRQ_ARMENA);
356  			if (nq->srqn_handler(nq,
357  					     (struct bnxt_qplib_srq *)q_handle,
358  					     nqsrqe->event))
359  				dev_warn(&nq->pdev->dev,
360  					 "SRQ event 0x%x not handled\n",
361  					 nqsrqe->event);
362  			break;
363  		}
364  		case NQ_BASE_TYPE_DBQ_EVENT:
365  			break;
366  		default:
367  			dev_warn(&nq->pdev->dev,
368  				 "nqe with type = 0x%x not handled\n", type);
369  			break;
370  		}
371  		hw_polled++;
372  		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
373  					 1, &nq->nq_db.dbinfo.flags);
374  	}
375  	if (hw_polled)
376  		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
377  	spin_unlock_bh(&hwq->lock);
378  }
379  
380  /* bnxt_re_synchronize_nq - self polling notification queue.
381   * @nq      -     notification queue pointer
382   *
383   * This function will start polling entries of a given notification queue
384   * for all pending  entries.
385   * This function is useful to synchronize notification entries while resources
386   * are going away.
387   */
388  
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)389  void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
390  {
391  	int budget = nq->budget;
392  
393  	nq->budget = nq->hwq.max_elements;
394  	bnxt_qplib_service_nq(&nq->nq_tasklet);
395  	nq->budget = budget;
396  }
397  
bnxt_qplib_nq_irq(int irq,void * dev_instance)398  static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
399  {
400  	struct bnxt_qplib_nq *nq = dev_instance;
401  	struct bnxt_qplib_hwq *hwq = &nq->hwq;
402  	u32 sw_cons;
403  
404  	/* Prefetch the NQ element */
405  	sw_cons = HWQ_CMP(hwq->cons, hwq);
406  	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
407  
408  	/* Fan out to CPU affinitized kthreads? */
409  	tasklet_schedule(&nq->nq_tasklet);
410  
411  	return IRQ_HANDLED;
412  }
413  
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)414  void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
415  {
416  	if (!nq->requested)
417  		return;
418  
419  	nq->requested = false;
420  	/* Mask h/w interrupt */
421  	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
422  	/* Sync with last running IRQ handler */
423  	synchronize_irq(nq->msix_vec);
424  	irq_set_affinity_hint(nq->msix_vec, NULL);
425  	free_irq(nq->msix_vec, nq);
426  	kfree(nq->name);
427  	nq->name = NULL;
428  
429  	if (kill)
430  		tasklet_kill(&nq->nq_tasklet);
431  	tasklet_disable(&nq->nq_tasklet);
432  }
433  
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)434  void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
435  {
436  	if (nq->cqn_wq) {
437  		destroy_workqueue(nq->cqn_wq);
438  		nq->cqn_wq = NULL;
439  	}
440  
441  	/* Make sure the HW is stopped! */
442  	bnxt_qplib_nq_stop_irq(nq, true);
443  
444  	if (nq->nq_db.reg.bar_reg) {
445  		iounmap(nq->nq_db.reg.bar_reg);
446  		nq->nq_db.reg.bar_reg = NULL;
447  	}
448  
449  	nq->cqn_handler = NULL;
450  	nq->srqn_handler = NULL;
451  	nq->msix_vec = 0;
452  }
453  
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)454  int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
455  			    int msix_vector, bool need_init)
456  {
457  	struct bnxt_qplib_res *res = nq->res;
458  	int rc;
459  
460  	if (nq->requested)
461  		return -EFAULT;
462  
463  	nq->msix_vec = msix_vector;
464  	if (need_init)
465  		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
466  	else
467  		tasklet_enable(&nq->nq_tasklet);
468  
469  	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
470  			     nq_indx, pci_name(res->pdev));
471  	if (!nq->name)
472  		return -ENOMEM;
473  	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
474  	if (rc) {
475  		kfree(nq->name);
476  		nq->name = NULL;
477  		tasklet_disable(&nq->nq_tasklet);
478  		return rc;
479  	}
480  
481  	cpumask_clear(&nq->mask);
482  	cpumask_set_cpu(nq_indx, &nq->mask);
483  	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
484  	if (rc) {
485  		dev_warn(&nq->pdev->dev,
486  			 "set affinity failed; vector: %d nq_idx: %d\n",
487  			 nq->msix_vec, nq_indx);
488  	}
489  	nq->requested = true;
490  	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
491  
492  	return rc;
493  }
494  
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)495  static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
496  {
497  	resource_size_t reg_base;
498  	struct bnxt_qplib_nq_db *nq_db;
499  	struct pci_dev *pdev;
500  
501  	pdev = nq->pdev;
502  	nq_db = &nq->nq_db;
503  
504  	nq_db->dbinfo.flags = 0;
505  	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
506  	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
507  	if (!nq_db->reg.bar_base) {
508  		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
509  			nq_db->reg.bar_id);
510  		return -ENOMEM;
511  	}
512  
513  	reg_base = nq_db->reg.bar_base + reg_offt;
514  	/* Unconditionally map 8 bytes to support 57500 series */
515  	nq_db->reg.len = 8;
516  	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
517  	if (!nq_db->reg.bar_reg) {
518  		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
519  			nq_db->reg.bar_id);
520  		return -ENOMEM;
521  	}
522  
523  	nq_db->dbinfo.db = nq_db->reg.bar_reg;
524  	nq_db->dbinfo.hwq = &nq->hwq;
525  	nq_db->dbinfo.xid = nq->ring_id;
526  
527  	return 0;
528  }
529  
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)530  int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
531  			 int nq_idx, int msix_vector, int bar_reg_offset,
532  			 cqn_handler_t cqn_handler,
533  			 srqn_handler_t srqn_handler)
534  {
535  	int rc;
536  
537  	nq->pdev = pdev;
538  	nq->cqn_handler = cqn_handler;
539  	nq->srqn_handler = srqn_handler;
540  
541  	/* Have a task to schedule CQ notifiers in post send case */
542  	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
543  	if (!nq->cqn_wq)
544  		return -ENOMEM;
545  
546  	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
547  	if (rc)
548  		goto fail;
549  
550  	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
551  	if (rc) {
552  		dev_err(&nq->pdev->dev,
553  			"Failed to request irq for nq-idx %d\n", nq_idx);
554  		goto fail;
555  	}
556  
557  	return 0;
558  fail:
559  	bnxt_qplib_disable_nq(nq);
560  	return rc;
561  }
562  
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)563  void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
564  {
565  	if (nq->hwq.max_elements) {
566  		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
567  		nq->hwq.max_elements = 0;
568  	}
569  }
570  
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)571  int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
572  {
573  	struct bnxt_qplib_hwq_attr hwq_attr = {};
574  	struct bnxt_qplib_sg_info sginfo = {};
575  
576  	nq->pdev = res->pdev;
577  	nq->res = res;
578  	if (!nq->hwq.max_elements ||
579  	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
580  		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
581  
582  	sginfo.pgsize = PAGE_SIZE;
583  	sginfo.pgshft = PAGE_SHIFT;
584  	hwq_attr.res = res;
585  	hwq_attr.sginfo = &sginfo;
586  	hwq_attr.depth = nq->hwq.max_elements;
587  	hwq_attr.stride = sizeof(struct nq_base);
588  	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
589  	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
590  		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
591  		return -ENOMEM;
592  	}
593  	nq->budget = 8;
594  	return 0;
595  }
596  
597  /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)598  void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
599  			   struct bnxt_qplib_srq *srq)
600  {
601  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
602  	struct creq_destroy_srq_resp resp = {};
603  	struct bnxt_qplib_cmdqmsg msg = {};
604  	struct cmdq_destroy_srq req = {};
605  	int rc;
606  
607  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
608  				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
609  				 sizeof(req));
610  
611  	/* Configure the request */
612  	req.srq_cid = cpu_to_le32(srq->id);
613  
614  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
615  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
616  	kfree(srq->swq);
617  	if (rc)
618  		return;
619  	bnxt_qplib_free_hwq(res, &srq->hwq);
620  }
621  
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)622  int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
623  			  struct bnxt_qplib_srq *srq)
624  {
625  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
626  	struct bnxt_qplib_hwq_attr hwq_attr = {};
627  	struct creq_create_srq_resp resp = {};
628  	struct bnxt_qplib_cmdqmsg msg = {};
629  	struct cmdq_create_srq req = {};
630  	struct bnxt_qplib_pbl *pbl;
631  	u16 pg_sz_lvl;
632  	int rc, idx;
633  
634  	hwq_attr.res = res;
635  	hwq_attr.sginfo = &srq->sg_info;
636  	hwq_attr.depth = srq->max_wqe;
637  	hwq_attr.stride = srq->wqe_size;
638  	hwq_attr.type = HWQ_TYPE_QUEUE;
639  	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
640  	if (rc)
641  		return rc;
642  	srq->dbinfo.flags = 0;
643  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
644  				 CMDQ_BASE_OPCODE_CREATE_SRQ,
645  				 sizeof(req));
646  
647  	/* Configure the request */
648  	req.dpi = cpu_to_le32(srq->dpi->dpi);
649  	req.srq_handle = cpu_to_le64((uintptr_t)srq);
650  
651  	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
652  	pbl = &srq->hwq.pbl[PBL_LVL_0];
653  	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
654  		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
655  	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
656  		      CMDQ_CREATE_SRQ_LVL_SFT;
657  	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
658  	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
659  	req.pd_id = cpu_to_le32(srq->pd->id);
660  	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
661  
662  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
663  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
664  	if (rc)
665  		goto fail;
666  
667  	spin_lock_init(&srq->lock);
668  	srq->start_idx = 0;
669  	srq->last_idx = srq->hwq.max_elements - 1;
670  	if (!srq->hwq.is_user) {
671  		srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
672  				   GFP_KERNEL);
673  		if (!srq->swq) {
674  			rc = -ENOMEM;
675  			goto fail;
676  		}
677  		for (idx = 0; idx < srq->hwq.max_elements; idx++)
678  			srq->swq[idx].next_idx = idx + 1;
679  		srq->swq[srq->last_idx].next_idx = -1;
680  	}
681  
682  	srq->id = le32_to_cpu(resp.xid);
683  	srq->dbinfo.hwq = &srq->hwq;
684  	srq->dbinfo.xid = srq->id;
685  	srq->dbinfo.db = srq->dpi->dbr;
686  	srq->dbinfo.max_slot = 1;
687  	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
688  	if (srq->threshold)
689  		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
690  	srq->arm_req = false;
691  
692  	return 0;
693  fail:
694  	bnxt_qplib_free_hwq(res, &srq->hwq);
695  	kfree(srq->swq);
696  
697  	return rc;
698  }
699  
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)700  int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
701  			  struct bnxt_qplib_srq *srq)
702  {
703  	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
704  	u32 count;
705  
706  	count = __bnxt_qplib_get_avail(srq_hwq);
707  	if (count > srq->threshold) {
708  		srq->arm_req = false;
709  		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
710  	} else {
711  		/* Deferred arming */
712  		srq->arm_req = true;
713  	}
714  
715  	return 0;
716  }
717  
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)718  int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
719  			 struct bnxt_qplib_srq *srq)
720  {
721  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
722  	struct creq_query_srq_resp resp = {};
723  	struct bnxt_qplib_cmdqmsg msg = {};
724  	struct bnxt_qplib_rcfw_sbuf sbuf;
725  	struct creq_query_srq_resp_sb *sb;
726  	struct cmdq_query_srq req = {};
727  	int rc;
728  
729  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
730  				 CMDQ_BASE_OPCODE_QUERY_SRQ,
731  				 sizeof(req));
732  
733  	/* Configure the request */
734  	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
735  	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
736  				     &sbuf.dma_addr, GFP_KERNEL);
737  	if (!sbuf.sb)
738  		return -ENOMEM;
739  	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
740  	req.srq_cid = cpu_to_le32(srq->id);
741  	sb = sbuf.sb;
742  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
743  				sizeof(resp), 0);
744  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
745  	if (!rc)
746  		srq->threshold = le16_to_cpu(sb->srq_limit);
747  	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
748  			  sbuf.sb, sbuf.dma_addr);
749  
750  	return rc;
751  }
752  
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)753  int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
754  			     struct bnxt_qplib_swqe *wqe)
755  {
756  	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
757  	struct rq_wqe *srqe;
758  	struct sq_sge *hw_sge;
759  	u32 count = 0;
760  	int i, next;
761  
762  	spin_lock(&srq_hwq->lock);
763  	if (srq->start_idx == srq->last_idx) {
764  		dev_err(&srq_hwq->pdev->dev,
765  			"FP: SRQ (0x%x) is full!\n", srq->id);
766  		spin_unlock(&srq_hwq->lock);
767  		return -EINVAL;
768  	}
769  	next = srq->start_idx;
770  	srq->start_idx = srq->swq[next].next_idx;
771  	spin_unlock(&srq_hwq->lock);
772  
773  	srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
774  	memset(srqe, 0, srq->wqe_size);
775  	/* Calculate wqe_size16 and data_len */
776  	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
777  	     i < wqe->num_sge; i++, hw_sge++) {
778  		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
779  		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
780  		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
781  	}
782  	srqe->wqe_type = wqe->type;
783  	srqe->flags = wqe->flags;
784  	srqe->wqe_size = wqe->num_sge +
785  			((offsetof(typeof(*srqe), data) + 15) >> 4);
786  	srqe->wr_id[0] = cpu_to_le32((u32)next);
787  	srq->swq[next].wr_id = wqe->wr_id;
788  
789  	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
790  
791  	spin_lock(&srq_hwq->lock);
792  	count = __bnxt_qplib_get_avail(srq_hwq);
793  	spin_unlock(&srq_hwq->lock);
794  	/* Ring DB */
795  	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
796  	if (srq->arm_req == true && count > srq->threshold) {
797  		srq->arm_req = false;
798  		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
799  	}
800  
801  	return 0;
802  }
803  
804  /* QP */
805  
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)806  static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
807  {
808  	int indx;
809  
810  	que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
811  	if (!que->swq)
812  		return -ENOMEM;
813  
814  	que->swq_start = 0;
815  	que->swq_last = que->max_sw_wqe - 1;
816  	for (indx = 0; indx < que->max_sw_wqe; indx++)
817  		que->swq[indx].next_idx = indx + 1;
818  	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
819  	que->swq_last = 0;
820  
821  	return 0;
822  }
823  
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)824  int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
825  {
826  	struct bnxt_qplib_hwq_attr hwq_attr = {};
827  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
828  	struct creq_create_qp1_resp resp = {};
829  	struct bnxt_qplib_cmdqmsg msg = {};
830  	struct bnxt_qplib_q *sq = &qp->sq;
831  	struct bnxt_qplib_q *rq = &qp->rq;
832  	struct cmdq_create_qp1 req = {};
833  	struct bnxt_qplib_pbl *pbl;
834  	u32 qp_flags = 0;
835  	u8 pg_sz_lvl;
836  	u32 tbl_indx;
837  	int rc;
838  
839  	sq->dbinfo.flags = 0;
840  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
841  				 CMDQ_BASE_OPCODE_CREATE_QP1,
842  				 sizeof(req));
843  	/* General */
844  	req.type = qp->type;
845  	req.dpi = cpu_to_le32(qp->dpi->dpi);
846  	req.qp_handle = cpu_to_le64(qp->qp_handle);
847  
848  	/* SQ */
849  	hwq_attr.res = res;
850  	hwq_attr.sginfo = &sq->sg_info;
851  	hwq_attr.stride = sizeof(struct sq_sge);
852  	hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
853  	hwq_attr.type = HWQ_TYPE_QUEUE;
854  	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
855  	if (rc)
856  		return rc;
857  
858  	rc = bnxt_qplib_alloc_init_swq(sq);
859  	if (rc)
860  		goto fail_sq;
861  
862  	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
863  	pbl = &sq->hwq.pbl[PBL_LVL_0];
864  	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
865  	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
866  		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
867  	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
868  	req.sq_pg_size_sq_lvl = pg_sz_lvl;
869  	req.sq_fwo_sq_sge =
870  		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
871  			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
872  	req.scq_cid = cpu_to_le32(qp->scq->id);
873  
874  	/* RQ */
875  	if (rq->max_wqe) {
876  		rq->dbinfo.flags = 0;
877  		hwq_attr.res = res;
878  		hwq_attr.sginfo = &rq->sg_info;
879  		hwq_attr.stride = sizeof(struct sq_sge);
880  		hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
881  		hwq_attr.type = HWQ_TYPE_QUEUE;
882  		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
883  		if (rc)
884  			goto sq_swq;
885  		rc = bnxt_qplib_alloc_init_swq(rq);
886  		if (rc)
887  			goto fail_rq;
888  		req.rq_size = cpu_to_le32(rq->max_wqe);
889  		pbl = &rq->hwq.pbl[PBL_LVL_0];
890  		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
891  		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
892  			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
893  		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
894  		req.rq_pg_size_rq_lvl = pg_sz_lvl;
895  		req.rq_fwo_rq_sge =
896  			cpu_to_le16((rq->max_sge &
897  				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
898  				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
899  	}
900  	req.rcq_cid = cpu_to_le32(qp->rcq->id);
901  	/* Header buffer - allow hdr_buf pass in */
902  	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
903  	if (rc) {
904  		rc = -ENOMEM;
905  		goto rq_rwq;
906  	}
907  	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
908  	req.qp_flags = cpu_to_le32(qp_flags);
909  	req.pd_id = cpu_to_le32(qp->pd->id);
910  
911  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
912  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
913  	if (rc)
914  		goto fail;
915  
916  	qp->id = le32_to_cpu(resp.xid);
917  	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
918  	qp->cctx = res->cctx;
919  	sq->dbinfo.hwq = &sq->hwq;
920  	sq->dbinfo.xid = qp->id;
921  	sq->dbinfo.db = qp->dpi->dbr;
922  	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
923  	if (rq->max_wqe) {
924  		rq->dbinfo.hwq = &rq->hwq;
925  		rq->dbinfo.xid = qp->id;
926  		rq->dbinfo.db = qp->dpi->dbr;
927  		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
928  	}
929  	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
930  	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
931  	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
932  
933  	return 0;
934  
935  fail:
936  	bnxt_qplib_free_qp_hdr_buf(res, qp);
937  rq_rwq:
938  	kfree(rq->swq);
939  fail_rq:
940  	bnxt_qplib_free_hwq(res, &rq->hwq);
941  sq_swq:
942  	kfree(sq->swq);
943  fail_sq:
944  	bnxt_qplib_free_hwq(res, &sq->hwq);
945  	return rc;
946  }
947  
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)948  static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
949  {
950  	struct bnxt_qplib_hwq *hwq;
951  	struct bnxt_qplib_q *sq;
952  	u64 fpsne, psn_pg;
953  	u16 indx_pad = 0;
954  
955  	sq = &qp->sq;
956  	hwq = &sq->hwq;
957  	/* First psn entry */
958  	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
959  	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
960  		indx_pad = (fpsne & ~PAGE_MASK) / size;
961  	hwq->pad_pgofft = indx_pad;
962  	hwq->pad_pg = (u64 *)psn_pg;
963  	hwq->pad_stride = size;
964  }
965  
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)966  int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
967  {
968  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
969  	struct bnxt_qplib_hwq_attr hwq_attr = {};
970  	struct bnxt_qplib_sg_info sginfo = {};
971  	struct creq_create_qp_resp resp = {};
972  	struct bnxt_qplib_cmdqmsg msg = {};
973  	struct bnxt_qplib_q *sq = &qp->sq;
974  	struct bnxt_qplib_q *rq = &qp->rq;
975  	struct cmdq_create_qp req = {};
976  	int rc, req_size, psn_sz = 0;
977  	struct bnxt_qplib_hwq *xrrq;
978  	struct bnxt_qplib_pbl *pbl;
979  	u32 qp_flags = 0;
980  	u8 pg_sz_lvl;
981  	u32 tbl_indx;
982  	u16 nsge;
983  
984  	qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
985  	sq->dbinfo.flags = 0;
986  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
987  				 CMDQ_BASE_OPCODE_CREATE_QP,
988  				 sizeof(req));
989  
990  	/* General */
991  	req.type = qp->type;
992  	req.dpi = cpu_to_le32(qp->dpi->dpi);
993  	req.qp_handle = cpu_to_le64(qp->qp_handle);
994  
995  	/* SQ */
996  	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
997  		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
998  			 sizeof(struct sq_psn_search_ext) :
999  			 sizeof(struct sq_psn_search);
1000  
1001  		if (qp->is_host_msn_tbl) {
1002  			psn_sz = sizeof(struct sq_msn_search);
1003  			qp->msn = 0;
1004  		}
1005  	}
1006  
1007  	hwq_attr.res = res;
1008  	hwq_attr.sginfo = &sq->sg_info;
1009  	hwq_attr.stride = sizeof(struct sq_sge);
1010  	hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1011  	hwq_attr.aux_stride = psn_sz;
1012  	hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1013  				    : 0;
1014  	/* Update msn tbl size */
1015  	if (qp->is_host_msn_tbl && psn_sz) {
1016  		if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1017  			hwq_attr.aux_depth =
1018  				roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1019  		else
1020  			hwq_attr.aux_depth =
1021  				roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1022  		qp->msn_tbl_sz = hwq_attr.aux_depth;
1023  		qp->msn = 0;
1024  	}
1025  
1026  	hwq_attr.type = HWQ_TYPE_QUEUE;
1027  	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1028  	if (rc)
1029  		return rc;
1030  
1031  	if (!sq->hwq.is_user) {
1032  		rc = bnxt_qplib_alloc_init_swq(sq);
1033  		if (rc)
1034  			goto fail_sq;
1035  
1036  		if (psn_sz)
1037  			bnxt_qplib_init_psn_ptr(qp, psn_sz);
1038  	}
1039  	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1040  	pbl = &sq->hwq.pbl[PBL_LVL_0];
1041  	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1042  	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1043  		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1044  	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1045  	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1046  	req.sq_fwo_sq_sge =
1047  		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1048  			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1049  	req.scq_cid = cpu_to_le32(qp->scq->id);
1050  
1051  	/* RQ */
1052  	if (!qp->srq) {
1053  		rq->dbinfo.flags = 0;
1054  		hwq_attr.res = res;
1055  		hwq_attr.sginfo = &rq->sg_info;
1056  		hwq_attr.stride = sizeof(struct sq_sge);
1057  		hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1058  		hwq_attr.aux_stride = 0;
1059  		hwq_attr.aux_depth = 0;
1060  		hwq_attr.type = HWQ_TYPE_QUEUE;
1061  		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1062  		if (rc)
1063  			goto sq_swq;
1064  		if (!rq->hwq.is_user) {
1065  			rc = bnxt_qplib_alloc_init_swq(rq);
1066  			if (rc)
1067  				goto fail_rq;
1068  		}
1069  
1070  		req.rq_size = cpu_to_le32(rq->max_wqe);
1071  		pbl = &rq->hwq.pbl[PBL_LVL_0];
1072  		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1073  		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1074  			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1075  		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1076  		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1077  		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1078  			6 : rq->max_sge;
1079  		req.rq_fwo_rq_sge =
1080  			cpu_to_le16(((nsge &
1081  				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1082  				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1083  	} else {
1084  		/* SRQ */
1085  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1086  		req.srq_cid = cpu_to_le32(qp->srq->id);
1087  	}
1088  	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1089  
1090  	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1091  	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1092  	if (qp->sig_type)
1093  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1094  	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1095  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1096  	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1097  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1098  
1099  	req.qp_flags = cpu_to_le32(qp_flags);
1100  
1101  	/* ORRQ and IRRQ */
1102  	if (psn_sz) {
1103  		xrrq = &qp->orrq;
1104  		xrrq->max_elements =
1105  			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1106  		req_size = xrrq->max_elements *
1107  			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1108  		req_size &= ~(PAGE_SIZE - 1);
1109  		sginfo.pgsize = req_size;
1110  		sginfo.pgshft = PAGE_SHIFT;
1111  
1112  		hwq_attr.res = res;
1113  		hwq_attr.sginfo = &sginfo;
1114  		hwq_attr.depth = xrrq->max_elements;
1115  		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1116  		hwq_attr.aux_stride = 0;
1117  		hwq_attr.aux_depth = 0;
1118  		hwq_attr.type = HWQ_TYPE_CTX;
1119  		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1120  		if (rc)
1121  			goto rq_swq;
1122  		pbl = &xrrq->pbl[PBL_LVL_0];
1123  		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1124  
1125  		xrrq = &qp->irrq;
1126  		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1127  						qp->max_dest_rd_atomic);
1128  		req_size = xrrq->max_elements *
1129  			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1130  		req_size &= ~(PAGE_SIZE - 1);
1131  		sginfo.pgsize = req_size;
1132  		hwq_attr.depth =  xrrq->max_elements;
1133  		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1134  		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1135  		if (rc)
1136  			goto fail_orrq;
1137  
1138  		pbl = &xrrq->pbl[PBL_LVL_0];
1139  		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1140  	}
1141  	req.pd_id = cpu_to_le32(qp->pd->id);
1142  
1143  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1144  				sizeof(resp), 0);
1145  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1146  	if (rc)
1147  		goto fail;
1148  
1149  	qp->id = le32_to_cpu(resp.xid);
1150  	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1151  	INIT_LIST_HEAD(&qp->sq_flush);
1152  	INIT_LIST_HEAD(&qp->rq_flush);
1153  	qp->cctx = res->cctx;
1154  	sq->dbinfo.hwq = &sq->hwq;
1155  	sq->dbinfo.xid = qp->id;
1156  	sq->dbinfo.db = qp->dpi->dbr;
1157  	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1158  	if (rq->max_wqe) {
1159  		rq->dbinfo.hwq = &rq->hwq;
1160  		rq->dbinfo.xid = qp->id;
1161  		rq->dbinfo.db = qp->dpi->dbr;
1162  		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1163  	}
1164  	spin_lock_bh(&rcfw->tbl_lock);
1165  	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1166  	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1167  	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1168  	spin_unlock_bh(&rcfw->tbl_lock);
1169  
1170  	return 0;
1171  fail:
1172  	bnxt_qplib_free_hwq(res, &qp->irrq);
1173  fail_orrq:
1174  	bnxt_qplib_free_hwq(res, &qp->orrq);
1175  rq_swq:
1176  	kfree(rq->swq);
1177  fail_rq:
1178  	bnxt_qplib_free_hwq(res, &rq->hwq);
1179  sq_swq:
1180  	kfree(sq->swq);
1181  fail_sq:
1182  	bnxt_qplib_free_hwq(res, &sq->hwq);
1183  	return rc;
1184  }
1185  
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1186  static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1187  {
1188  	switch (qp->state) {
1189  	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1190  		/* INIT->RTR, configure the path_mtu to the default
1191  		 * 2048 if not being requested
1192  		 */
1193  		if (!(qp->modify_flags &
1194  		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1195  			qp->modify_flags |=
1196  				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1197  			qp->path_mtu =
1198  				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1199  		}
1200  		qp->modify_flags &=
1201  			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1202  		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1203  		if (qp->max_dest_rd_atomic < 1)
1204  			qp->max_dest_rd_atomic = 1;
1205  		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1206  		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1207  		if (!(qp->modify_flags &
1208  		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1209  			qp->modify_flags |=
1210  				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1211  			qp->ah.sgid_index = 0;
1212  		}
1213  		break;
1214  	default:
1215  		break;
1216  	}
1217  }
1218  
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1219  static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1220  {
1221  	switch (qp->state) {
1222  	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1223  		/* Bono FW requires the max_rd_atomic to be >= 1 */
1224  		if (qp->max_rd_atomic < 1)
1225  			qp->max_rd_atomic = 1;
1226  		/* Bono FW does not allow PKEY_INDEX,
1227  		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1228  		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1229  		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1230  		 * modification
1231  		 */
1232  		qp->modify_flags &=
1233  			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1234  			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1235  			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1236  			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1237  			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1238  			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1239  			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1240  			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1241  			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1242  			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1243  			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1244  			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1245  		break;
1246  	default:
1247  		break;
1248  	}
1249  }
1250  
__filter_modify_flags(struct bnxt_qplib_qp * qp)1251  static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1252  {
1253  	switch (qp->cur_qp_state) {
1254  	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1255  		break;
1256  	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1257  		__modify_flags_from_init_state(qp);
1258  		break;
1259  	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1260  		__modify_flags_from_rtr_state(qp);
1261  		break;
1262  	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1263  		break;
1264  	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1265  		break;
1266  	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1267  		break;
1268  	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1269  		break;
1270  	default:
1271  		break;
1272  	}
1273  }
1274  
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1275  int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1276  {
1277  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1278  	struct creq_modify_qp_resp resp = {};
1279  	struct bnxt_qplib_cmdqmsg msg = {};
1280  	struct cmdq_modify_qp req = {};
1281  	u32 temp32[4];
1282  	u32 bmask;
1283  	int rc;
1284  
1285  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1286  				 CMDQ_BASE_OPCODE_MODIFY_QP,
1287  				 sizeof(req));
1288  
1289  	/* Filter out the qp_attr_mask based on the state->new transition */
1290  	__filter_modify_flags(qp);
1291  	bmask = qp->modify_flags;
1292  	req.modify_mask = cpu_to_le32(qp->modify_flags);
1293  	req.qp_cid = cpu_to_le32(qp->id);
1294  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1295  		req.network_type_en_sqd_async_notify_new_state =
1296  				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1297  				(qp->en_sqd_async_notify ?
1298  					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1299  	}
1300  	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1301  
1302  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1303  		req.access = qp->access;
1304  
1305  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1306  		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1307  
1308  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1309  		req.qkey = cpu_to_le32(qp->qkey);
1310  
1311  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1312  		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1313  		req.dgid[0] = cpu_to_le32(temp32[0]);
1314  		req.dgid[1] = cpu_to_le32(temp32[1]);
1315  		req.dgid[2] = cpu_to_le32(temp32[2]);
1316  		req.dgid[3] = cpu_to_le32(temp32[3]);
1317  	}
1318  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1319  		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1320  
1321  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1322  		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1323  					     [qp->ah.sgid_index]);
1324  
1325  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1326  		req.hop_limit = qp->ah.hop_limit;
1327  
1328  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1329  		req.traffic_class = qp->ah.traffic_class;
1330  
1331  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1332  		memcpy(req.dest_mac, qp->ah.dmac, 6);
1333  
1334  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1335  		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1336  
1337  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1338  		req.timeout = qp->timeout;
1339  
1340  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1341  		req.retry_cnt = qp->retry_cnt;
1342  
1343  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1344  		req.rnr_retry = qp->rnr_retry;
1345  
1346  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1347  		req.min_rnr_timer = qp->min_rnr_timer;
1348  
1349  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1350  		req.rq_psn = cpu_to_le32(qp->rq.psn);
1351  
1352  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1353  		req.sq_psn = cpu_to_le32(qp->sq.psn);
1354  
1355  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1356  		req.max_rd_atomic =
1357  			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1358  
1359  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1360  		req.max_dest_rd_atomic =
1361  			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1362  
1363  	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1364  	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1365  	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1366  	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1367  	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1368  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1369  		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1370  
1371  	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1372  
1373  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1374  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1375  	if (rc)
1376  		return rc;
1377  	qp->cur_qp_state = qp->state;
1378  	return 0;
1379  }
1380  
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1381  int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1382  {
1383  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1384  	struct creq_query_qp_resp resp = {};
1385  	struct bnxt_qplib_cmdqmsg msg = {};
1386  	struct bnxt_qplib_rcfw_sbuf sbuf;
1387  	struct creq_query_qp_resp_sb *sb;
1388  	struct cmdq_query_qp req = {};
1389  	u32 temp32[4];
1390  	int i, rc;
1391  
1392  	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1393  	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1394  				     &sbuf.dma_addr, GFP_KERNEL);
1395  	if (!sbuf.sb)
1396  		return -ENOMEM;
1397  	sb = sbuf.sb;
1398  
1399  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1400  				 CMDQ_BASE_OPCODE_QUERY_QP,
1401  				 sizeof(req));
1402  
1403  	req.qp_cid = cpu_to_le32(qp->id);
1404  	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1405  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1406  				sizeof(resp), 0);
1407  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1408  	if (rc)
1409  		goto bail;
1410  	/* Extract the context from the side buffer */
1411  	qp->state = sb->en_sqd_async_notify_state &
1412  			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1413  	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1414  				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1415  	qp->access = sb->access;
1416  	qp->pkey_index = le16_to_cpu(sb->pkey);
1417  	qp->qkey = le32_to_cpu(sb->qkey);
1418  
1419  	temp32[0] = le32_to_cpu(sb->dgid[0]);
1420  	temp32[1] = le32_to_cpu(sb->dgid[1]);
1421  	temp32[2] = le32_to_cpu(sb->dgid[2]);
1422  	temp32[3] = le32_to_cpu(sb->dgid[3]);
1423  	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1424  
1425  	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1426  
1427  	qp->ah.sgid_index = 0;
1428  	for (i = 0; i < res->sgid_tbl.max; i++) {
1429  		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1430  			qp->ah.sgid_index = i;
1431  			break;
1432  		}
1433  	}
1434  	if (i == res->sgid_tbl.max)
1435  		dev_warn(&res->pdev->dev, "SGID not found??\n");
1436  
1437  	qp->ah.hop_limit = sb->hop_limit;
1438  	qp->ah.traffic_class = sb->traffic_class;
1439  	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1440  	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1441  				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1442  				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1443  	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1444  				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1445  				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1446  	qp->timeout = sb->timeout;
1447  	qp->retry_cnt = sb->retry_cnt;
1448  	qp->rnr_retry = sb->rnr_retry;
1449  	qp->min_rnr_timer = sb->min_rnr_timer;
1450  	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1451  	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1452  	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1453  	qp->max_dest_rd_atomic =
1454  			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1455  	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1456  	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1457  	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1458  	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1459  	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1460  	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1461  	memcpy(qp->smac, sb->src_mac, 6);
1462  	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1463  	qp->port_id = le16_to_cpu(sb->port_id);
1464  bail:
1465  	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1466  			  sbuf.sb, sbuf.dma_addr);
1467  	return rc;
1468  }
1469  
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1470  static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1471  {
1472  	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1473  	u32 peek_flags, peek_cons;
1474  	struct cq_base *hw_cqe;
1475  	int i;
1476  
1477  	peek_flags = cq->dbinfo.flags;
1478  	peek_cons = cq_hwq->cons;
1479  	for (i = 0; i < cq_hwq->max_elements; i++) {
1480  		hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1481  		if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1482  			continue;
1483  		/*
1484  		 * The valid test of the entry must be done first before
1485  		 * reading any further.
1486  		 */
1487  		dma_rmb();
1488  		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1489  		case CQ_BASE_CQE_TYPE_REQ:
1490  		case CQ_BASE_CQE_TYPE_TERMINAL:
1491  		{
1492  			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1493  
1494  			if (qp == le64_to_cpu(cqe->qp_handle))
1495  				cqe->qp_handle = 0;
1496  			break;
1497  		}
1498  		case CQ_BASE_CQE_TYPE_RES_RC:
1499  		case CQ_BASE_CQE_TYPE_RES_UD:
1500  		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1501  		{
1502  			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1503  
1504  			if (qp == le64_to_cpu(cqe->qp_handle))
1505  				cqe->qp_handle = 0;
1506  			break;
1507  		}
1508  		default:
1509  			break;
1510  		}
1511  		bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1512  					 1, &peek_flags);
1513  	}
1514  }
1515  
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1516  int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1517  			  struct bnxt_qplib_qp *qp)
1518  {
1519  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1520  	struct creq_destroy_qp_resp resp = {};
1521  	struct bnxt_qplib_cmdqmsg msg = {};
1522  	struct cmdq_destroy_qp req = {};
1523  	u32 tbl_indx;
1524  	int rc;
1525  
1526  	spin_lock_bh(&rcfw->tbl_lock);
1527  	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1528  	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1529  	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1530  	spin_unlock_bh(&rcfw->tbl_lock);
1531  
1532  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1533  				 CMDQ_BASE_OPCODE_DESTROY_QP,
1534  				 sizeof(req));
1535  
1536  	req.qp_cid = cpu_to_le32(qp->id);
1537  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1538  				sizeof(resp), 0);
1539  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1540  	if (rc) {
1541  		spin_lock_bh(&rcfw->tbl_lock);
1542  		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1543  		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1544  		spin_unlock_bh(&rcfw->tbl_lock);
1545  		return rc;
1546  	}
1547  
1548  	return 0;
1549  }
1550  
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1551  void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1552  			    struct bnxt_qplib_qp *qp)
1553  {
1554  	bnxt_qplib_free_qp_hdr_buf(res, qp);
1555  	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1556  	kfree(qp->sq.swq);
1557  
1558  	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1559  	kfree(qp->rq.swq);
1560  
1561  	if (qp->irrq.max_elements)
1562  		bnxt_qplib_free_hwq(res, &qp->irrq);
1563  	if (qp->orrq.max_elements)
1564  		bnxt_qplib_free_hwq(res, &qp->orrq);
1565  
1566  }
1567  
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1568  void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1569  				struct bnxt_qplib_sge *sge)
1570  {
1571  	struct bnxt_qplib_q *sq = &qp->sq;
1572  	u32 sw_prod;
1573  
1574  	memset(sge, 0, sizeof(*sge));
1575  
1576  	if (qp->sq_hdr_buf) {
1577  		sw_prod = sq->swq_start;
1578  		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1579  					 sw_prod * qp->sq_hdr_buf_size);
1580  		sge->lkey = 0xFFFFFFFF;
1581  		sge->size = qp->sq_hdr_buf_size;
1582  		return qp->sq_hdr_buf + sw_prod * sge->size;
1583  	}
1584  	return NULL;
1585  }
1586  
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1587  u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1588  {
1589  	struct bnxt_qplib_q *rq = &qp->rq;
1590  
1591  	return rq->swq_start;
1592  }
1593  
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1594  dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1595  {
1596  	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1597  }
1598  
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1599  void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1600  				struct bnxt_qplib_sge *sge)
1601  {
1602  	struct bnxt_qplib_q *rq = &qp->rq;
1603  	u32 sw_prod;
1604  
1605  	memset(sge, 0, sizeof(*sge));
1606  
1607  	if (qp->rq_hdr_buf) {
1608  		sw_prod = rq->swq_start;
1609  		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1610  					 sw_prod * qp->rq_hdr_buf_size);
1611  		sge->lkey = 0xFFFFFFFF;
1612  		sge->size = qp->rq_hdr_buf_size;
1613  		return qp->rq_hdr_buf + sw_prod * sge->size;
1614  	}
1615  	return NULL;
1616  }
1617  
1618  /* Fil the MSN table into the next psn row */
bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1619  static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1620  				       struct bnxt_qplib_swqe *wqe,
1621  				       struct bnxt_qplib_swq *swq)
1622  {
1623  	struct sq_msn_search *msns;
1624  	u32 start_psn, next_psn;
1625  	u16 start_idx;
1626  
1627  	msns = (struct sq_msn_search *)swq->psn_search;
1628  	msns->start_idx_next_psn_start_psn = 0;
1629  
1630  	start_psn = swq->start_psn;
1631  	next_psn = swq->next_psn;
1632  	start_idx = swq->slot_idx;
1633  	msns->start_idx_next_psn_start_psn |=
1634  		bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1635  	qp->msn++;
1636  	qp->msn %= qp->msn_tbl_sz;
1637  }
1638  
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1639  static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1640  				       struct bnxt_qplib_swqe *wqe,
1641  				       struct bnxt_qplib_swq *swq)
1642  {
1643  	struct sq_psn_search_ext *psns_ext;
1644  	struct sq_psn_search *psns;
1645  	u32 flg_npsn;
1646  	u32 op_spsn;
1647  
1648  	if (!swq->psn_search)
1649  		return;
1650  	/* Handle MSN differently on cap flags  */
1651  	if (qp->is_host_msn_tbl) {
1652  		bnxt_qplib_fill_msn_search(qp, wqe, swq);
1653  		return;
1654  	}
1655  	psns = (struct sq_psn_search *)swq->psn_search;
1656  	psns = swq->psn_search;
1657  	psns_ext = swq->psn_ext;
1658  
1659  	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1660  		    SQ_PSN_SEARCH_START_PSN_MASK);
1661  	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1662  		     SQ_PSN_SEARCH_OPCODE_MASK);
1663  	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1664  		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1665  
1666  	if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1667  		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1668  		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1669  		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1670  	} else {
1671  		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1672  		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1673  	}
1674  }
1675  
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1676  static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1677  				 struct bnxt_qplib_swqe *wqe,
1678  				 u16 *idx)
1679  {
1680  	struct bnxt_qplib_hwq *hwq;
1681  	int len, t_len, offt;
1682  	bool pull_dst = true;
1683  	void *il_dst = NULL;
1684  	void *il_src = NULL;
1685  	int t_cplen, cplen;
1686  	int indx;
1687  
1688  	hwq = &qp->sq.hwq;
1689  	t_len = 0;
1690  	for (indx = 0; indx < wqe->num_sge; indx++) {
1691  		len = wqe->sg_list[indx].size;
1692  		il_src = (void *)wqe->sg_list[indx].addr;
1693  		t_len += len;
1694  		if (t_len > qp->max_inline_data)
1695  			return -ENOMEM;
1696  		while (len) {
1697  			if (pull_dst) {
1698  				pull_dst = false;
1699  				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1700  				(*idx)++;
1701  				t_cplen = 0;
1702  				offt = 0;
1703  			}
1704  			cplen = min_t(int, len, sizeof(struct sq_sge));
1705  			cplen = min_t(int, cplen,
1706  					(sizeof(struct sq_sge) - offt));
1707  			memcpy(il_dst, il_src, cplen);
1708  			t_cplen += cplen;
1709  			il_src += cplen;
1710  			il_dst += cplen;
1711  			offt += cplen;
1712  			len -= cplen;
1713  			if (t_cplen == sizeof(struct sq_sge))
1714  				pull_dst = true;
1715  		}
1716  	}
1717  
1718  	return t_len;
1719  }
1720  
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1721  static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1722  			       struct bnxt_qplib_sge *ssge,
1723  			       u16 nsge, u16 *idx)
1724  {
1725  	struct sq_sge *dsge;
1726  	int indx, len = 0;
1727  
1728  	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1729  		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1730  		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1731  		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1732  		dsge->size = cpu_to_le32(ssge[indx].size);
1733  		len += ssge[indx].size;
1734  	}
1735  
1736  	return len;
1737  }
1738  
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1739  static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1740  				     struct bnxt_qplib_swqe *wqe,
1741  				     u16 *wqe_sz, u16 *qdf, u8 mode)
1742  {
1743  	u32 ilsize, bytes;
1744  	u16 nsge;
1745  	u16 slot;
1746  
1747  	nsge = wqe->num_sge;
1748  	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1749  	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1750  	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1751  		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1752  		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1753  		bytes += sizeof(struct sq_send_hdr);
1754  	}
1755  
1756  	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1757  	slot = bytes >> 4;
1758  	*wqe_sz = slot;
1759  	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1760  		slot = 8;
1761  	return slot;
1762  }
1763  
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp * qp,struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq,bool hw_retx)1764  static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1765  				     struct bnxt_qplib_swq *swq, bool hw_retx)
1766  {
1767  	struct bnxt_qplib_hwq *hwq;
1768  	u32 pg_num, pg_indx;
1769  	void *buff;
1770  	u32 tail;
1771  
1772  	hwq = &sq->hwq;
1773  	if (!hwq->pad_pg)
1774  		return;
1775  	tail = swq->slot_idx / sq->dbinfo.max_slot;
1776  	if (hw_retx) {
1777  		/* For HW retx use qp msn index */
1778  		tail = qp->msn;
1779  		tail %= qp->msn_tbl_sz;
1780  	}
1781  	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1782  	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1783  	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1784  	swq->psn_ext = buff;
1785  	swq->psn_search = buff;
1786  }
1787  
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1788  void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1789  {
1790  	struct bnxt_qplib_q *sq = &qp->sq;
1791  
1792  	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1793  }
1794  
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1795  int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1796  			 struct bnxt_qplib_swqe *wqe)
1797  {
1798  	struct bnxt_qplib_nq_work *nq_work = NULL;
1799  	int i, rc = 0, data_len = 0, pkt_num = 0;
1800  	struct bnxt_qplib_q *sq = &qp->sq;
1801  	struct bnxt_qplib_hwq *hwq;
1802  	struct bnxt_qplib_swq *swq;
1803  	bool sch_handler = false;
1804  	u16 wqe_sz, qdf = 0;
1805  	bool msn_update;
1806  	void *base_hdr;
1807  	void *ext_hdr;
1808  	__le32 temp32;
1809  	u32 wqe_idx;
1810  	u32 slots;
1811  	u16 idx;
1812  
1813  	hwq = &sq->hwq;
1814  	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1815  	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1816  		dev_err(&hwq->pdev->dev,
1817  			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1818  			qp->id, qp->state);
1819  		rc = -EINVAL;
1820  		goto done;
1821  	}
1822  
1823  	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1824  	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1825  		dev_err(&hwq->pdev->dev,
1826  			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1827  			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1828  		rc = -ENOMEM;
1829  		goto done;
1830  	}
1831  
1832  	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1833  	bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1834  
1835  	idx = 0;
1836  	swq->slot_idx = hwq->prod;
1837  	swq->slots = slots;
1838  	swq->wr_id = wqe->wr_id;
1839  	swq->type = wqe->type;
1840  	swq->flags = wqe->flags;
1841  	swq->start_psn = sq->psn & BTH_PSN_MASK;
1842  	if (qp->sig_type)
1843  		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1844  
1845  	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1846  		sch_handler = true;
1847  		dev_dbg(&hwq->pdev->dev,
1848  			"%s Error QP. Scheduling for poll_cq\n", __func__);
1849  		goto queue_err;
1850  	}
1851  
1852  	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1853  	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1854  	memset(base_hdr, 0, sizeof(struct sq_sge));
1855  	memset(ext_hdr, 0, sizeof(struct sq_sge));
1856  
1857  	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1858  		/* Copy the inline data */
1859  		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1860  	else
1861  		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1862  					       &idx);
1863  	if (data_len < 0)
1864  		goto queue_err;
1865  	/* Make sure we update MSN table only for wired wqes */
1866  	msn_update = true;
1867  	/* Specifics */
1868  	switch (wqe->type) {
1869  	case BNXT_QPLIB_SWQE_TYPE_SEND:
1870  		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1871  			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1872  			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1873  			/* Assemble info for Raw Ethertype QPs */
1874  
1875  			sqe->wqe_type = wqe->type;
1876  			sqe->flags = wqe->flags;
1877  			sqe->wqe_size = wqe_sz;
1878  			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1879  			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1880  			sqe->length = cpu_to_le32(data_len);
1881  			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1882  				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1883  				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1884  
1885  			break;
1886  		}
1887  		fallthrough;
1888  	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1889  	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1890  	{
1891  		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1892  		struct sq_send_hdr *sqe = base_hdr;
1893  
1894  		sqe->wqe_type = wqe->type;
1895  		sqe->flags = wqe->flags;
1896  		sqe->wqe_size = wqe_sz;
1897  		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1898  		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1899  		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1900  			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1901  			sqe->length = cpu_to_le32(data_len);
1902  			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1903  			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1904  						      SQ_SEND_DST_QP_MASK);
1905  			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1906  						    SQ_SEND_AVID_MASK);
1907  			msn_update = false;
1908  		} else {
1909  			sqe->length = cpu_to_le32(data_len);
1910  			if (qp->mtu)
1911  				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1912  			if (!pkt_num)
1913  				pkt_num = 1;
1914  			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1915  		}
1916  		break;
1917  	}
1918  	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1919  	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1920  	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1921  	{
1922  		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1923  		struct sq_rdma_hdr *sqe = base_hdr;
1924  
1925  		sqe->wqe_type = wqe->type;
1926  		sqe->flags = wqe->flags;
1927  		sqe->wqe_size = wqe_sz;
1928  		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1929  		sqe->length = cpu_to_le32((u32)data_len);
1930  		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1931  		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1932  		if (qp->mtu)
1933  			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1934  		if (!pkt_num)
1935  			pkt_num = 1;
1936  		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1937  		break;
1938  	}
1939  	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1940  	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1941  	{
1942  		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1943  		struct sq_atomic_hdr *sqe = base_hdr;
1944  
1945  		sqe->wqe_type = wqe->type;
1946  		sqe->flags = wqe->flags;
1947  		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1948  		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1949  		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1950  		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1951  		if (qp->mtu)
1952  			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1953  		if (!pkt_num)
1954  			pkt_num = 1;
1955  		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1956  		break;
1957  	}
1958  	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1959  	{
1960  		struct sq_localinvalidate *sqe = base_hdr;
1961  
1962  		sqe->wqe_type = wqe->type;
1963  		sqe->flags = wqe->flags;
1964  		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1965  		msn_update = false;
1966  		break;
1967  	}
1968  	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1969  	{
1970  		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1971  		struct sq_fr_pmr_hdr *sqe = base_hdr;
1972  
1973  		sqe->wqe_type = wqe->type;
1974  		sqe->flags = wqe->flags;
1975  		sqe->access_cntl = wqe->frmr.access_cntl |
1976  				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1977  		sqe->zero_based_page_size_log =
1978  			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1979  			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1980  			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1981  		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1982  		temp32 = cpu_to_le32(wqe->frmr.length);
1983  		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1984  		sqe->numlevels_pbl_page_size_log =
1985  			((wqe->frmr.pbl_pg_sz_log <<
1986  					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1987  					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1988  			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1989  					SQ_FR_PMR_NUMLEVELS_MASK);
1990  
1991  		for (i = 0; i < wqe->frmr.page_list_len; i++)
1992  			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1993  						wqe->frmr.page_list[i] |
1994  						PTU_PTE_VALID);
1995  		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1996  		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1997  		msn_update = false;
1998  
1999  		break;
2000  	}
2001  	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2002  	{
2003  		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2004  		struct sq_bind_hdr *sqe = base_hdr;
2005  
2006  		sqe->wqe_type = wqe->type;
2007  		sqe->flags = wqe->flags;
2008  		sqe->access_cntl = wqe->bind.access_cntl;
2009  		sqe->mw_type_zero_based = wqe->bind.mw_type |
2010  			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2011  		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2012  		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2013  		ext_sqe->va = cpu_to_le64(wqe->bind.va);
2014  		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2015  		msn_update = false;
2016  		break;
2017  	}
2018  	default:
2019  		/* Bad wqe, return error */
2020  		rc = -EINVAL;
2021  		goto done;
2022  	}
2023  	if (!qp->is_host_msn_tbl || msn_update) {
2024  		swq->next_psn = sq->psn & BTH_PSN_MASK;
2025  		bnxt_qplib_fill_psn_search(qp, wqe, swq);
2026  	}
2027  queue_err:
2028  	bnxt_qplib_swq_mod_start(sq, wqe_idx);
2029  	bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2030  	qp->wqe_cnt++;
2031  done:
2032  	if (sch_handler) {
2033  		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2034  		if (nq_work) {
2035  			nq_work->cq = qp->scq;
2036  			nq_work->nq = qp->scq->nq;
2037  			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2038  			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2039  		} else {
2040  			dev_err(&hwq->pdev->dev,
2041  				"FP: Failed to allocate SQ nq_work!\n");
2042  			rc = -ENOMEM;
2043  		}
2044  	}
2045  	return rc;
2046  }
2047  
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)2048  void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2049  {
2050  	struct bnxt_qplib_q *rq = &qp->rq;
2051  
2052  	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2053  }
2054  
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)2055  int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2056  			 struct bnxt_qplib_swqe *wqe)
2057  {
2058  	struct bnxt_qplib_nq_work *nq_work = NULL;
2059  	struct bnxt_qplib_q *rq = &qp->rq;
2060  	struct rq_wqe_hdr *base_hdr;
2061  	struct rq_ext_hdr *ext_hdr;
2062  	struct bnxt_qplib_hwq *hwq;
2063  	struct bnxt_qplib_swq *swq;
2064  	bool sch_handler = false;
2065  	u16 wqe_sz, idx;
2066  	u32 wqe_idx;
2067  	int rc = 0;
2068  
2069  	hwq = &rq->hwq;
2070  	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2071  		dev_err(&hwq->pdev->dev,
2072  			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2073  			qp->id, qp->state);
2074  		rc = -EINVAL;
2075  		goto done;
2076  	}
2077  
2078  	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2079  		dev_err(&hwq->pdev->dev,
2080  			"FP: QP (0x%x) RQ is full!\n", qp->id);
2081  		rc = -EINVAL;
2082  		goto done;
2083  	}
2084  
2085  	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2086  	swq->wr_id = wqe->wr_id;
2087  	swq->slots = rq->dbinfo.max_slot;
2088  
2089  	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2090  		sch_handler = true;
2091  		dev_dbg(&hwq->pdev->dev,
2092  			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2093  		goto queue_err;
2094  	}
2095  
2096  	idx = 0;
2097  	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2098  	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2099  	memset(base_hdr, 0, sizeof(struct sq_sge));
2100  	memset(ext_hdr, 0, sizeof(struct sq_sge));
2101  	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2102  	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2103  	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2104  	if (!wqe->num_sge) {
2105  		struct sq_sge *sge;
2106  
2107  		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2108  		sge->size = 0;
2109  		wqe_sz++;
2110  	}
2111  	base_hdr->wqe_type = wqe->type;
2112  	base_hdr->flags = wqe->flags;
2113  	base_hdr->wqe_size = wqe_sz;
2114  	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2115  queue_err:
2116  	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2117  	bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2118  done:
2119  	if (sch_handler) {
2120  		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2121  		if (nq_work) {
2122  			nq_work->cq = qp->rcq;
2123  			nq_work->nq = qp->rcq->nq;
2124  			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2125  			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2126  		} else {
2127  			dev_err(&hwq->pdev->dev,
2128  				"FP: Failed to allocate RQ nq_work!\n");
2129  			rc = -ENOMEM;
2130  		}
2131  	}
2132  
2133  	return rc;
2134  }
2135  
2136  /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2137  int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2138  {
2139  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2140  	struct bnxt_qplib_hwq_attr hwq_attr = {};
2141  	struct creq_create_cq_resp resp = {};
2142  	struct bnxt_qplib_cmdqmsg msg = {};
2143  	struct cmdq_create_cq req = {};
2144  	struct bnxt_qplib_pbl *pbl;
2145  	u32 pg_sz_lvl;
2146  	int rc;
2147  
2148  	if (!cq->dpi) {
2149  		dev_err(&rcfw->pdev->dev,
2150  			"FP: CREATE_CQ failed due to NULL DPI\n");
2151  		return -EINVAL;
2152  	}
2153  
2154  	cq->dbinfo.flags = 0;
2155  	hwq_attr.res = res;
2156  	hwq_attr.depth = cq->max_wqe;
2157  	hwq_attr.stride = sizeof(struct cq_base);
2158  	hwq_attr.type = HWQ_TYPE_QUEUE;
2159  	hwq_attr.sginfo = &cq->sg_info;
2160  	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2161  	if (rc)
2162  		return rc;
2163  
2164  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2165  				 CMDQ_BASE_OPCODE_CREATE_CQ,
2166  				 sizeof(req));
2167  
2168  	req.dpi = cpu_to_le32(cq->dpi->dpi);
2169  	req.cq_handle = cpu_to_le64(cq->cq_handle);
2170  	req.cq_size = cpu_to_le32(cq->max_wqe);
2171  	pbl = &cq->hwq.pbl[PBL_LVL_0];
2172  	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2173  		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2174  	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2175  	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2176  	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2177  	req.cq_fco_cnq_id = cpu_to_le32(
2178  			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2179  			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2180  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2181  				sizeof(resp), 0);
2182  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2183  	if (rc)
2184  		goto fail;
2185  
2186  	cq->id = le32_to_cpu(resp.xid);
2187  	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2188  	init_waitqueue_head(&cq->waitq);
2189  	INIT_LIST_HEAD(&cq->sqf_head);
2190  	INIT_LIST_HEAD(&cq->rqf_head);
2191  	spin_lock_init(&cq->compl_lock);
2192  	spin_lock_init(&cq->flush_lock);
2193  
2194  	cq->dbinfo.hwq = &cq->hwq;
2195  	cq->dbinfo.xid = cq->id;
2196  	cq->dbinfo.db = cq->dpi->dbr;
2197  	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2198  
2199  	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2200  
2201  	return 0;
2202  
2203  fail:
2204  	bnxt_qplib_free_hwq(res, &cq->hwq);
2205  	return rc;
2206  }
2207  
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2208  void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2209  				   struct bnxt_qplib_cq *cq)
2210  {
2211  	bnxt_qplib_free_hwq(res, &cq->hwq);
2212  	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2213         /* Reset only the cons bit in the flags */
2214  	cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2215  }
2216  
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2217  int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2218  			 int new_cqes)
2219  {
2220  	struct bnxt_qplib_hwq_attr hwq_attr = {};
2221  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2222  	struct creq_resize_cq_resp resp = {};
2223  	struct bnxt_qplib_cmdqmsg msg = {};
2224  	struct cmdq_resize_cq req = {};
2225  	struct bnxt_qplib_pbl *pbl;
2226  	u32 pg_sz, lvl, new_sz;
2227  	int rc;
2228  
2229  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2230  				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2231  				 sizeof(req));
2232  	hwq_attr.sginfo = &cq->sg_info;
2233  	hwq_attr.res = res;
2234  	hwq_attr.depth = new_cqes;
2235  	hwq_attr.stride = sizeof(struct cq_base);
2236  	hwq_attr.type = HWQ_TYPE_QUEUE;
2237  	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2238  	if (rc)
2239  		return rc;
2240  
2241  	req.cq_cid = cpu_to_le32(cq->id);
2242  	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2243  	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2244  	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2245  				       CMDQ_RESIZE_CQ_LVL_MASK;
2246  	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2247  		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2248  	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2249  	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2250  
2251  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2252  				sizeof(resp), 0);
2253  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2254  	return rc;
2255  }
2256  
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2257  int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2258  {
2259  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2260  	struct creq_destroy_cq_resp resp = {};
2261  	struct bnxt_qplib_cmdqmsg msg = {};
2262  	struct cmdq_destroy_cq req = {};
2263  	u16 total_cnq_events;
2264  	int rc;
2265  
2266  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2267  				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2268  				 sizeof(req));
2269  
2270  	req.cq_cid = cpu_to_le32(cq->id);
2271  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2272  				sizeof(resp), 0);
2273  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2274  	if (rc)
2275  		return rc;
2276  	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2277  	__wait_for_all_nqes(cq, total_cnq_events);
2278  	bnxt_qplib_free_hwq(res, &cq->hwq);
2279  	return 0;
2280  }
2281  
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2282  static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2283  		      struct bnxt_qplib_cqe **pcqe, int *budget)
2284  {
2285  	struct bnxt_qplib_cqe *cqe;
2286  	u32 start, last;
2287  	int rc = 0;
2288  
2289  	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2290  	start = sq->swq_start;
2291  	cqe = *pcqe;
2292  	while (*budget) {
2293  		last = sq->swq_last;
2294  		if (start == last)
2295  			break;
2296  		/* Skip the FENCE WQE completions */
2297  		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2298  			bnxt_qplib_cancel_phantom_processing(qp);
2299  			goto skip_compl;
2300  		}
2301  		memset(cqe, 0, sizeof(*cqe));
2302  		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2303  		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2304  		cqe->qp_handle = (u64)(unsigned long)qp;
2305  		cqe->wr_id = sq->swq[last].wr_id;
2306  		cqe->src_qp = qp->id;
2307  		cqe->type = sq->swq[last].type;
2308  		cqe++;
2309  		(*budget)--;
2310  skip_compl:
2311  		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2312  					 sq->swq[last].slots, &sq->dbinfo.flags);
2313  		sq->swq_last = sq->swq[last].next_idx;
2314  	}
2315  	*pcqe = cqe;
2316  	if (!(*budget) && sq->swq_last != start)
2317  		/* Out of budget */
2318  		rc = -EAGAIN;
2319  
2320  	return rc;
2321  }
2322  
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2323  static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2324  		      struct bnxt_qplib_cqe **pcqe, int *budget)
2325  {
2326  	struct bnxt_qplib_cqe *cqe;
2327  	u32 start, last;
2328  	int opcode = 0;
2329  	int rc = 0;
2330  
2331  	switch (qp->type) {
2332  	case CMDQ_CREATE_QP1_TYPE_GSI:
2333  		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2334  		break;
2335  	case CMDQ_CREATE_QP_TYPE_RC:
2336  		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2337  		break;
2338  	case CMDQ_CREATE_QP_TYPE_UD:
2339  	case CMDQ_CREATE_QP_TYPE_GSI:
2340  		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2341  		break;
2342  	}
2343  
2344  	/* Flush the rest of the RQ */
2345  	start = rq->swq_start;
2346  	cqe = *pcqe;
2347  	while (*budget) {
2348  		last = rq->swq_last;
2349  		if (last == start)
2350  			break;
2351  		memset(cqe, 0, sizeof(*cqe));
2352  		cqe->status =
2353  		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2354  		cqe->opcode = opcode;
2355  		cqe->qp_handle = (unsigned long)qp;
2356  		cqe->wr_id = rq->swq[last].wr_id;
2357  		cqe++;
2358  		(*budget)--;
2359  		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2360  					 rq->swq[last].slots, &rq->dbinfo.flags);
2361  		rq->swq_last = rq->swq[last].next_idx;
2362  	}
2363  	*pcqe = cqe;
2364  	if (!*budget && rq->swq_last != start)
2365  		/* Out of budget */
2366  		rc = -EAGAIN;
2367  
2368  	return rc;
2369  }
2370  
bnxt_qplib_mark_qp_error(void * qp_handle)2371  void bnxt_qplib_mark_qp_error(void *qp_handle)
2372  {
2373  	struct bnxt_qplib_qp *qp = qp_handle;
2374  
2375  	if (!qp)
2376  		return;
2377  
2378  	/* Must block new posting of SQ and RQ */
2379  	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2380  	bnxt_qplib_cancel_phantom_processing(qp);
2381  }
2382  
2383  /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2384   *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2385   */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2386  static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2387  		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2388  {
2389  	u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2390  	struct bnxt_qplib_q *sq = &qp->sq;
2391  	struct cq_req *peek_req_hwcqe;
2392  	struct bnxt_qplib_qp *peek_qp;
2393  	struct bnxt_qplib_q *peek_sq;
2394  	struct bnxt_qplib_swq *swq;
2395  	struct cq_base *peek_hwcqe;
2396  	int i, rc = 0;
2397  
2398  	/* Normal mode */
2399  	/* Check for the psn_search marking before completing */
2400  	swq = &sq->swq[swq_last];
2401  	if (swq->psn_search &&
2402  	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2403  		/* Unmark */
2404  		swq->psn_search->flags_next_psn = cpu_to_le32
2405  			(le32_to_cpu(swq->psn_search->flags_next_psn)
2406  				     & ~0x80000000);
2407  		dev_dbg(&cq->hwq.pdev->dev,
2408  			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2409  			cq_cons, qp->id, swq_last, cqe_sq_cons);
2410  		sq->condition = true;
2411  		sq->send_phantom = true;
2412  
2413  		/* TODO: Only ARM if the previous SQE is ARMALL */
2414  		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2415  		rc = -EAGAIN;
2416  		goto out;
2417  	}
2418  	if (sq->condition) {
2419  		/* Peek at the completions */
2420  		peek_flags = cq->dbinfo.flags;
2421  		peek_sw_cq_cons = cq_cons;
2422  		i = cq->hwq.max_elements;
2423  		while (i--) {
2424  			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2425  						       peek_sw_cq_cons, NULL);
2426  			/* If the next hwcqe is VALID */
2427  			if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2428  			/*
2429  			 * The valid test of the entry must be done first before
2430  			 * reading any further.
2431  			 */
2432  				dma_rmb();
2433  				/* If the next hwcqe is a REQ */
2434  				if ((peek_hwcqe->cqe_type_toggle &
2435  				    CQ_BASE_CQE_TYPE_MASK) ==
2436  				    CQ_BASE_CQE_TYPE_REQ) {
2437  					peek_req_hwcqe = (struct cq_req *)
2438  							 peek_hwcqe;
2439  					peek_qp = (struct bnxt_qplib_qp *)
2440  						((unsigned long)
2441  						 le64_to_cpu
2442  						 (peek_req_hwcqe->qp_handle));
2443  					peek_sq = &peek_qp->sq;
2444  					peek_sq_cons_idx =
2445  						((le16_to_cpu(
2446  						  peek_req_hwcqe->sq_cons_idx)
2447  						  - 1) % sq->max_wqe);
2448  					/* If the hwcqe's sq's wr_id matches */
2449  					if (peek_sq == sq &&
2450  					    sq->swq[peek_sq_cons_idx].wr_id ==
2451  					    BNXT_QPLIB_FENCE_WRID) {
2452  						/*
2453  						 *  Unbreak only if the phantom
2454  						 *  comes back
2455  						 */
2456  						dev_dbg(&cq->hwq.pdev->dev,
2457  							"FP: Got Phantom CQE\n");
2458  						sq->condition = false;
2459  						sq->single = true;
2460  						rc = 0;
2461  						goto out;
2462  					}
2463  				}
2464  				/* Valid but not the phantom, so keep looping */
2465  			} else {
2466  				/* Not valid yet, just exit and wait */
2467  				rc = -EINVAL;
2468  				goto out;
2469  			}
2470  			bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2471  						 &peek_sw_cq_cons,
2472  						 1, &peek_flags);
2473  		}
2474  		dev_err(&cq->hwq.pdev->dev,
2475  			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2476  			cq_cons, qp->id, swq_last, cqe_sq_cons);
2477  		rc = -EINVAL;
2478  	}
2479  out:
2480  	return rc;
2481  }
2482  
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2483  static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2484  				     struct cq_req *hwcqe,
2485  				     struct bnxt_qplib_cqe **pcqe, int *budget,
2486  				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2487  {
2488  	struct bnxt_qplib_swq *swq;
2489  	struct bnxt_qplib_cqe *cqe;
2490  	struct bnxt_qplib_qp *qp;
2491  	struct bnxt_qplib_q *sq;
2492  	u32 cqe_sq_cons;
2493  	int rc = 0;
2494  
2495  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2496  				      le64_to_cpu(hwcqe->qp_handle));
2497  	if (!qp) {
2498  		dev_err(&cq->hwq.pdev->dev,
2499  			"FP: Process Req qp is NULL\n");
2500  		return -EINVAL;
2501  	}
2502  	sq = &qp->sq;
2503  
2504  	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2505  	if (qp->sq.flushed) {
2506  		dev_dbg(&cq->hwq.pdev->dev,
2507  			"%s: QP in Flush QP = %p\n", __func__, qp);
2508  		goto done;
2509  	}
2510  	/* Require to walk the sq's swq to fabricate CQEs for all previously
2511  	 * signaled SWQEs due to CQE aggregation from the current sq cons
2512  	 * to the cqe_sq_cons
2513  	 */
2514  	cqe = *pcqe;
2515  	while (*budget) {
2516  		if (sq->swq_last == cqe_sq_cons)
2517  			/* Done */
2518  			break;
2519  
2520  		swq = &sq->swq[sq->swq_last];
2521  		memset(cqe, 0, sizeof(*cqe));
2522  		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2523  		cqe->qp_handle = (u64)(unsigned long)qp;
2524  		cqe->src_qp = qp->id;
2525  		cqe->wr_id = swq->wr_id;
2526  		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2527  			goto skip;
2528  		cqe->type = swq->type;
2529  
2530  		/* For the last CQE, check for status.  For errors, regardless
2531  		 * of the request being signaled or not, it must complete with
2532  		 * the hwcqe error status
2533  		 */
2534  		if (swq->next_idx == cqe_sq_cons &&
2535  		    hwcqe->status != CQ_REQ_STATUS_OK) {
2536  			cqe->status = hwcqe->status;
2537  			dev_err(&cq->hwq.pdev->dev,
2538  				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2539  				sq->swq_last, cqe->wr_id, cqe->status);
2540  			cqe++;
2541  			(*budget)--;
2542  			bnxt_qplib_mark_qp_error(qp);
2543  			/* Add qp to flush list of the CQ */
2544  			bnxt_qplib_add_flush_qp(qp);
2545  		} else {
2546  			/* Before we complete, do WA 9060 */
2547  			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2548  				if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2549  					      cqe_sq_cons)) {
2550  					*lib_qp = qp;
2551  					goto out;
2552  				}
2553  			}
2554  			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2555  				cqe->status = CQ_REQ_STATUS_OK;
2556  				cqe++;
2557  				(*budget)--;
2558  			}
2559  		}
2560  skip:
2561  		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2562  					 swq->slots, &sq->dbinfo.flags);
2563  		sq->swq_last = swq->next_idx;
2564  		if (sq->single)
2565  			break;
2566  	}
2567  out:
2568  	*pcqe = cqe;
2569  	if (sq->swq_last != cqe_sq_cons) {
2570  		/* Out of budget */
2571  		rc = -EAGAIN;
2572  		goto done;
2573  	}
2574  	/*
2575  	 * Back to normal completion mode only after it has completed all of
2576  	 * the WC for this CQE
2577  	 */
2578  	sq->single = false;
2579  done:
2580  	return rc;
2581  }
2582  
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2583  static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2584  {
2585  	spin_lock(&srq->hwq.lock);
2586  	srq->swq[srq->last_idx].next_idx = (int)tag;
2587  	srq->last_idx = (int)tag;
2588  	srq->swq[srq->last_idx].next_idx = -1;
2589  	bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2590  				 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2591  	spin_unlock(&srq->hwq.lock);
2592  }
2593  
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2594  static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2595  					struct cq_res_rc *hwcqe,
2596  					struct bnxt_qplib_cqe **pcqe,
2597  					int *budget)
2598  {
2599  	struct bnxt_qplib_srq *srq;
2600  	struct bnxt_qplib_cqe *cqe;
2601  	struct bnxt_qplib_qp *qp;
2602  	struct bnxt_qplib_q *rq;
2603  	u32 wr_id_idx;
2604  
2605  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2606  				      le64_to_cpu(hwcqe->qp_handle));
2607  	if (!qp) {
2608  		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2609  		return -EINVAL;
2610  	}
2611  	if (qp->rq.flushed) {
2612  		dev_dbg(&cq->hwq.pdev->dev,
2613  			"%s: QP in Flush QP = %p\n", __func__, qp);
2614  		return 0;
2615  	}
2616  
2617  	cqe = *pcqe;
2618  	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2619  	cqe->length = le32_to_cpu(hwcqe->length);
2620  	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2621  	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2622  	cqe->flags = le16_to_cpu(hwcqe->flags);
2623  	cqe->status = hwcqe->status;
2624  	cqe->qp_handle = (u64)(unsigned long)qp;
2625  
2626  	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2627  				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2628  	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2629  		srq = qp->srq;
2630  		if (!srq)
2631  			return -EINVAL;
2632  		if (wr_id_idx >= srq->hwq.max_elements) {
2633  			dev_err(&cq->hwq.pdev->dev,
2634  				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2635  				wr_id_idx, srq->hwq.max_elements);
2636  			return -EINVAL;
2637  		}
2638  		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2639  		bnxt_qplib_release_srqe(srq, wr_id_idx);
2640  		cqe++;
2641  		(*budget)--;
2642  		*pcqe = cqe;
2643  	} else {
2644  		struct bnxt_qplib_swq *swq;
2645  
2646  		rq = &qp->rq;
2647  		if (wr_id_idx > (rq->max_wqe - 1)) {
2648  			dev_err(&cq->hwq.pdev->dev,
2649  				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2650  				wr_id_idx, rq->max_wqe);
2651  			return -EINVAL;
2652  		}
2653  		if (wr_id_idx != rq->swq_last)
2654  			return -EINVAL;
2655  		swq = &rq->swq[rq->swq_last];
2656  		cqe->wr_id = swq->wr_id;
2657  		cqe++;
2658  		(*budget)--;
2659  		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2660  					 swq->slots, &rq->dbinfo.flags);
2661  		rq->swq_last = swq->next_idx;
2662  		*pcqe = cqe;
2663  
2664  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2665  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2666  			/* Add qp to flush list of the CQ */
2667  			bnxt_qplib_add_flush_qp(qp);
2668  		}
2669  	}
2670  
2671  	return 0;
2672  }
2673  
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2674  static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2675  					struct cq_res_ud *hwcqe,
2676  					struct bnxt_qplib_cqe **pcqe,
2677  					int *budget)
2678  {
2679  	struct bnxt_qplib_srq *srq;
2680  	struct bnxt_qplib_cqe *cqe;
2681  	struct bnxt_qplib_qp *qp;
2682  	struct bnxt_qplib_q *rq;
2683  	u32 wr_id_idx;
2684  
2685  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2686  				      le64_to_cpu(hwcqe->qp_handle));
2687  	if (!qp) {
2688  		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2689  		return -EINVAL;
2690  	}
2691  	if (qp->rq.flushed) {
2692  		dev_dbg(&cq->hwq.pdev->dev,
2693  			"%s: QP in Flush QP = %p\n", __func__, qp);
2694  		return 0;
2695  	}
2696  	cqe = *pcqe;
2697  	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2698  	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2699  	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2700  	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2701  	cqe->flags = le16_to_cpu(hwcqe->flags);
2702  	cqe->status = hwcqe->status;
2703  	cqe->qp_handle = (u64)(unsigned long)qp;
2704  	/*FIXME: Endianness fix needed for smace */
2705  	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2706  	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2707  				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2708  	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2709  				  ((le32_to_cpu(
2710  				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2711  				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2712  
2713  	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2714  		srq = qp->srq;
2715  		if (!srq)
2716  			return -EINVAL;
2717  
2718  		if (wr_id_idx >= srq->hwq.max_elements) {
2719  			dev_err(&cq->hwq.pdev->dev,
2720  				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2721  				wr_id_idx, srq->hwq.max_elements);
2722  			return -EINVAL;
2723  		}
2724  		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2725  		bnxt_qplib_release_srqe(srq, wr_id_idx);
2726  		cqe++;
2727  		(*budget)--;
2728  		*pcqe = cqe;
2729  	} else {
2730  		struct bnxt_qplib_swq *swq;
2731  
2732  		rq = &qp->rq;
2733  		if (wr_id_idx > (rq->max_wqe - 1)) {
2734  			dev_err(&cq->hwq.pdev->dev,
2735  				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2736  				wr_id_idx, rq->max_wqe);
2737  			return -EINVAL;
2738  		}
2739  
2740  		if (rq->swq_last != wr_id_idx)
2741  			return -EINVAL;
2742  		swq = &rq->swq[rq->swq_last];
2743  		cqe->wr_id = swq->wr_id;
2744  		cqe++;
2745  		(*budget)--;
2746  		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2747  					 swq->slots, &rq->dbinfo.flags);
2748  		rq->swq_last = swq->next_idx;
2749  		*pcqe = cqe;
2750  
2751  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2752  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2753  			/* Add qp to flush list of the CQ */
2754  			bnxt_qplib_add_flush_qp(qp);
2755  		}
2756  	}
2757  
2758  	return 0;
2759  }
2760  
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2761  bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2762  {
2763  	struct cq_base *hw_cqe;
2764  	bool rc = true;
2765  
2766  	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2767  	 /* Check for Valid bit. If the CQE is valid, return false */
2768  	rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2769  	return rc;
2770  }
2771  
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2772  static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2773  						struct cq_res_raweth_qp1 *hwcqe,
2774  						struct bnxt_qplib_cqe **pcqe,
2775  						int *budget)
2776  {
2777  	struct bnxt_qplib_qp *qp;
2778  	struct bnxt_qplib_q *rq;
2779  	struct bnxt_qplib_srq *srq;
2780  	struct bnxt_qplib_cqe *cqe;
2781  	u32 wr_id_idx;
2782  
2783  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2784  				      le64_to_cpu(hwcqe->qp_handle));
2785  	if (!qp) {
2786  		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2787  		return -EINVAL;
2788  	}
2789  	if (qp->rq.flushed) {
2790  		dev_dbg(&cq->hwq.pdev->dev,
2791  			"%s: QP in Flush QP = %p\n", __func__, qp);
2792  		return 0;
2793  	}
2794  	cqe = *pcqe;
2795  	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2796  	cqe->flags = le16_to_cpu(hwcqe->flags);
2797  	cqe->qp_handle = (u64)(unsigned long)qp;
2798  
2799  	wr_id_idx =
2800  		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2801  				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2802  	cqe->src_qp = qp->id;
2803  	if (qp->id == 1 && !cqe->length) {
2804  		/* Add workaround for the length misdetection */
2805  		cqe->length = 296;
2806  	} else {
2807  		cqe->length = le16_to_cpu(hwcqe->length);
2808  	}
2809  	cqe->pkey_index = qp->pkey_index;
2810  	memcpy(cqe->smac, qp->smac, 6);
2811  
2812  	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2813  	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2814  	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2815  
2816  	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2817  		srq = qp->srq;
2818  		if (!srq) {
2819  			dev_err(&cq->hwq.pdev->dev,
2820  				"FP: SRQ used but not defined??\n");
2821  			return -EINVAL;
2822  		}
2823  		if (wr_id_idx >= srq->hwq.max_elements) {
2824  			dev_err(&cq->hwq.pdev->dev,
2825  				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2826  				wr_id_idx, srq->hwq.max_elements);
2827  			return -EINVAL;
2828  		}
2829  		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2830  		bnxt_qplib_release_srqe(srq, wr_id_idx);
2831  		cqe++;
2832  		(*budget)--;
2833  		*pcqe = cqe;
2834  	} else {
2835  		struct bnxt_qplib_swq *swq;
2836  
2837  		rq = &qp->rq;
2838  		if (wr_id_idx > (rq->max_wqe - 1)) {
2839  			dev_err(&cq->hwq.pdev->dev,
2840  				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2841  				wr_id_idx, rq->max_wqe);
2842  			return -EINVAL;
2843  		}
2844  		if (rq->swq_last != wr_id_idx)
2845  			return -EINVAL;
2846  		swq = &rq->swq[rq->swq_last];
2847  		cqe->wr_id = swq->wr_id;
2848  		cqe++;
2849  		(*budget)--;
2850  		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2851  					 swq->slots, &rq->dbinfo.flags);
2852  		rq->swq_last = swq->next_idx;
2853  		*pcqe = cqe;
2854  
2855  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2856  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2857  			/* Add qp to flush list of the CQ */
2858  			bnxt_qplib_add_flush_qp(qp);
2859  		}
2860  	}
2861  
2862  	return 0;
2863  }
2864  
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2865  static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2866  					  struct cq_terminal *hwcqe,
2867  					  struct bnxt_qplib_cqe **pcqe,
2868  					  int *budget)
2869  {
2870  	struct bnxt_qplib_qp *qp;
2871  	struct bnxt_qplib_q *sq, *rq;
2872  	struct bnxt_qplib_cqe *cqe;
2873  	u32 swq_last = 0, cqe_cons;
2874  	int rc = 0;
2875  
2876  	/* Check the Status */
2877  	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2878  		dev_warn(&cq->hwq.pdev->dev,
2879  			 "FP: CQ Process Terminal Error status = 0x%x\n",
2880  			 hwcqe->status);
2881  
2882  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2883  				      le64_to_cpu(hwcqe->qp_handle));
2884  	if (!qp)
2885  		return -EINVAL;
2886  
2887  	/* Must block new posting of SQ and RQ */
2888  	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2889  
2890  	sq = &qp->sq;
2891  	rq = &qp->rq;
2892  
2893  	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2894  	if (cqe_cons == 0xFFFF)
2895  		goto do_rq;
2896  	cqe_cons %= sq->max_sw_wqe;
2897  
2898  	if (qp->sq.flushed) {
2899  		dev_dbg(&cq->hwq.pdev->dev,
2900  			"%s: QP in Flush QP = %p\n", __func__, qp);
2901  		goto sq_done;
2902  	}
2903  
2904  	/* Terminal CQE can also include aggregated successful CQEs prior.
2905  	 * So we must complete all CQEs from the current sq's cons to the
2906  	 * cq_cons with status OK
2907  	 */
2908  	cqe = *pcqe;
2909  	while (*budget) {
2910  		swq_last = sq->swq_last;
2911  		if (swq_last == cqe_cons)
2912  			break;
2913  		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2914  			memset(cqe, 0, sizeof(*cqe));
2915  			cqe->status = CQ_REQ_STATUS_OK;
2916  			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2917  			cqe->qp_handle = (u64)(unsigned long)qp;
2918  			cqe->src_qp = qp->id;
2919  			cqe->wr_id = sq->swq[swq_last].wr_id;
2920  			cqe->type = sq->swq[swq_last].type;
2921  			cqe++;
2922  			(*budget)--;
2923  		}
2924  		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2925  					 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2926  		sq->swq_last = sq->swq[swq_last].next_idx;
2927  	}
2928  	*pcqe = cqe;
2929  	if (!(*budget) && swq_last != cqe_cons) {
2930  		/* Out of budget */
2931  		rc = -EAGAIN;
2932  		goto sq_done;
2933  	}
2934  sq_done:
2935  	if (rc)
2936  		return rc;
2937  do_rq:
2938  	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2939  	if (cqe_cons == 0xFFFF) {
2940  		goto done;
2941  	} else if (cqe_cons > rq->max_wqe - 1) {
2942  		dev_err(&cq->hwq.pdev->dev,
2943  			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2944  			cqe_cons, rq->max_wqe);
2945  		rc = -EINVAL;
2946  		goto done;
2947  	}
2948  
2949  	if (qp->rq.flushed) {
2950  		dev_dbg(&cq->hwq.pdev->dev,
2951  			"%s: QP in Flush QP = %p\n", __func__, qp);
2952  		rc = 0;
2953  		goto done;
2954  	}
2955  
2956  	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2957  	 * from the current rq->cons to the rq->prod regardless what the
2958  	 * rq->cons the terminal CQE indicates
2959  	 */
2960  
2961  	/* Add qp to flush list of the CQ */
2962  	bnxt_qplib_add_flush_qp(qp);
2963  done:
2964  	return rc;
2965  }
2966  
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2967  static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2968  					struct cq_cutoff *hwcqe)
2969  {
2970  	/* Check the Status */
2971  	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2972  		dev_err(&cq->hwq.pdev->dev,
2973  			"FP: CQ Process Cutoff Error status = 0x%x\n",
2974  			hwcqe->status);
2975  		return -EINVAL;
2976  	}
2977  	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2978  	wake_up_interruptible(&cq->waitq);
2979  
2980  	return 0;
2981  }
2982  
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)2983  int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2984  				  struct bnxt_qplib_cqe *cqe,
2985  				  int num_cqes)
2986  {
2987  	struct bnxt_qplib_qp *qp = NULL;
2988  	u32 budget = num_cqes;
2989  	unsigned long flags;
2990  
2991  	spin_lock_irqsave(&cq->flush_lock, flags);
2992  	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2993  		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2994  		__flush_sq(&qp->sq, qp, &cqe, &budget);
2995  	}
2996  
2997  	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2998  		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2999  		__flush_rq(&qp->rq, qp, &cqe, &budget);
3000  	}
3001  	spin_unlock_irqrestore(&cq->flush_lock, flags);
3002  
3003  	return num_cqes - budget;
3004  }
3005  
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)3006  int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3007  		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
3008  {
3009  	struct cq_base *hw_cqe;
3010  	int budget, rc = 0;
3011  	u32 hw_polled = 0;
3012  	u8 type;
3013  
3014  	budget = num_cqes;
3015  
3016  	while (budget) {
3017  		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3018  
3019  		/* Check for Valid bit */
3020  		if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3021  			break;
3022  
3023  		/*
3024  		 * The valid test of the entry must be done first before
3025  		 * reading any further.
3026  		 */
3027  		dma_rmb();
3028  		/* From the device's respective CQE format to qplib_wc*/
3029  		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3030  		switch (type) {
3031  		case CQ_BASE_CQE_TYPE_REQ:
3032  			rc = bnxt_qplib_cq_process_req(cq,
3033  						       (struct cq_req *)hw_cqe,
3034  						       &cqe, &budget,
3035  						       cq->hwq.cons, lib_qp);
3036  			break;
3037  		case CQ_BASE_CQE_TYPE_RES_RC:
3038  			rc = bnxt_qplib_cq_process_res_rc(cq,
3039  							  (struct cq_res_rc *)
3040  							  hw_cqe, &cqe,
3041  							  &budget);
3042  			break;
3043  		case CQ_BASE_CQE_TYPE_RES_UD:
3044  			rc = bnxt_qplib_cq_process_res_ud
3045  					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
3046  					 &budget);
3047  			break;
3048  		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3049  			rc = bnxt_qplib_cq_process_res_raweth_qp1
3050  					(cq, (struct cq_res_raweth_qp1 *)
3051  					 hw_cqe, &cqe, &budget);
3052  			break;
3053  		case CQ_BASE_CQE_TYPE_TERMINAL:
3054  			rc = bnxt_qplib_cq_process_terminal
3055  					(cq, (struct cq_terminal *)hw_cqe,
3056  					 &cqe, &budget);
3057  			break;
3058  		case CQ_BASE_CQE_TYPE_CUT_OFF:
3059  			bnxt_qplib_cq_process_cutoff
3060  					(cq, (struct cq_cutoff *)hw_cqe);
3061  			/* Done processing this CQ */
3062  			goto exit;
3063  		default:
3064  			dev_err(&cq->hwq.pdev->dev,
3065  				"process_cq unknown type 0x%lx\n",
3066  				hw_cqe->cqe_type_toggle &
3067  				CQ_BASE_CQE_TYPE_MASK);
3068  			rc = -EINVAL;
3069  			break;
3070  		}
3071  		if (rc < 0) {
3072  			if (rc == -EAGAIN)
3073  				break;
3074  			/* Error while processing the CQE, just skip to the
3075  			 * next one
3076  			 */
3077  			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3078  				dev_err(&cq->hwq.pdev->dev,
3079  					"process_cqe error rc = 0x%x\n", rc);
3080  		}
3081  		hw_polled++;
3082  		bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3083  					 1, &cq->dbinfo.flags);
3084  
3085  	}
3086  	if (hw_polled)
3087  		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3088  exit:
3089  	return num_cqes - budget;
3090  }
3091  
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3092  void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3093  {
3094  	if (arm_type)
3095  		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3096  	/* Using cq->arm_state variable to track whether to issue cq handler */
3097  	atomic_set(&cq->arm_state, 1);
3098  }
3099  
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3100  void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3101  {
3102  	flush_workqueue(qp->scq->nq->cqn_wq);
3103  	if (qp->scq != qp->rcq)
3104  		flush_workqueue(qp->rcq->nq->cqn_wq);
3105  }
3106