1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
65 }
66
67 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 struct bnxt_qplib_cq *scq, *rcq;
71
72 scq = qp->scq;
73 rcq = qp->rcq;
74
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
81 }
82 if (!qp->srq) {
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
88 }
89 }
90 }
91
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 unsigned long *flags)
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
99 else
100 spin_lock(&qp->rcq->flush_lock);
101 }
102
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
109 else
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 unsigned long flags;
117
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
128 }
129 if (!qp->srq) {
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
133 }
134 }
135 }
136
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 unsigned long flags;
140
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 qp->rq.hwq.prod = 0;
147 qp->rq.hwq.cons = 0;
148
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152
bnxt_qpn_cqn_sched_task(struct work_struct * work)153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
157
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
160
161 if (cq && nq) {
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
166 __func__, cq, nq);
167 nq->cqn_handler(nq, cq);
168 }
169 spin_unlock_bh(&cq->compl_lock);
170 }
171 kfree(nq_work);
172 }
173
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
176 {
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
179
180 if (qp->rq_hdr_buf)
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 if (qp->sq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
194 }
195
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
198 {
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
201 int rc = 0;
202
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
208 rc = -ENOMEM;
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
211 goto fail;
212 }
213 }
214
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 rq->max_wqe *
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
220 GFP_KERNEL);
221 if (!qp->rq_hdr_buf) {
222 rc = -ENOMEM;
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
225 goto fail;
226 }
227 }
228 return 0;
229
230 fail:
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
232 return rc;
233 }
234
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
240 uintptr_t q_handle;
241 u16 type;
242
243 spin_lock_bh(&hwq->lock);
244 /* Service the NQ until empty */
245 while (budget--) {
246 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
249 break;
250
251 /*
252 * The valid test of the entry must be done first before
253 * reading any further.
254 */
255 dma_rmb();
256
257 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
258 switch (type) {
259 case NQ_BASE_TYPE_CQ_NOTIFICATION:
260 {
261 struct nq_cn *nqcne = (struct nq_cn *)nqe;
262
263 q_handle = le32_to_cpu(nqcne->cq_handle_low);
264 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
265 << 32;
266 if ((unsigned long)cq == q_handle) {
267 nqcne->cq_handle_low = 0;
268 nqcne->cq_handle_high = 0;
269 cq->cnq_events++;
270 }
271 break;
272 }
273 default:
274 break;
275 }
276 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277 1, &nq->nq_db.dbinfo.flags);
278 }
279 spin_unlock_bh(&hwq->lock);
280 }
281
282 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
283 * this CQ.
284 */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)285 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
286 {
287 u32 retry_cnt = 100;
288
289 while (retry_cnt--) {
290 if (cnq_events == cq->cnq_events)
291 return;
292 usleep_range(50, 100);
293 clean_nq(cq->nq, cq);
294 }
295 }
296
bnxt_qplib_service_nq(struct tasklet_struct * t)297 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
298 {
299 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300 struct bnxt_qplib_hwq *hwq = &nq->hwq;
301 struct bnxt_qplib_cq *cq;
302 int budget = nq->budget;
303 struct nq_base *nqe;
304 uintptr_t q_handle;
305 u32 hw_polled = 0;
306 u16 type;
307
308 spin_lock_bh(&hwq->lock);
309 /* Service the NQ until empty */
310 while (budget--) {
311 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
313 break;
314
315 /*
316 * The valid test of the entry must be done first before
317 * reading any further.
318 */
319 dma_rmb();
320
321 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
322 switch (type) {
323 case NQ_BASE_TYPE_CQ_NOTIFICATION:
324 {
325 struct nq_cn *nqcne = (struct nq_cn *)nqe;
326
327 q_handle = le32_to_cpu(nqcne->cq_handle_low);
328 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
329 << 32;
330 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
331 if (!cq)
332 break;
333 bnxt_qplib_armen_db(&cq->dbinfo,
334 DBC_DBC_TYPE_CQ_ARMENA);
335 spin_lock_bh(&cq->compl_lock);
336 atomic_set(&cq->arm_state, 0);
337 if (nq->cqn_handler(nq, (cq)))
338 dev_warn(&nq->pdev->dev,
339 "cqn - type 0x%x not handled\n", type);
340 cq->cnq_events++;
341 spin_unlock_bh(&cq->compl_lock);
342 break;
343 }
344 case NQ_BASE_TYPE_SRQ_EVENT:
345 {
346 struct bnxt_qplib_srq *srq;
347 struct nq_srq_event *nqsrqe =
348 (struct nq_srq_event *)nqe;
349
350 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
351 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
352 << 32;
353 srq = (struct bnxt_qplib_srq *)q_handle;
354 bnxt_qplib_armen_db(&srq->dbinfo,
355 DBC_DBC_TYPE_SRQ_ARMENA);
356 if (nq->srqn_handler(nq,
357 (struct bnxt_qplib_srq *)q_handle,
358 nqsrqe->event))
359 dev_warn(&nq->pdev->dev,
360 "SRQ event 0x%x not handled\n",
361 nqsrqe->event);
362 break;
363 }
364 case NQ_BASE_TYPE_DBQ_EVENT:
365 break;
366 default:
367 dev_warn(&nq->pdev->dev,
368 "nqe with type = 0x%x not handled\n", type);
369 break;
370 }
371 hw_polled++;
372 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
373 1, &nq->nq_db.dbinfo.flags);
374 }
375 if (hw_polled)
376 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
377 spin_unlock_bh(&hwq->lock);
378 }
379
380 /* bnxt_re_synchronize_nq - self polling notification queue.
381 * @nq - notification queue pointer
382 *
383 * This function will start polling entries of a given notification queue
384 * for all pending entries.
385 * This function is useful to synchronize notification entries while resources
386 * are going away.
387 */
388
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)389 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
390 {
391 int budget = nq->budget;
392
393 nq->budget = nq->hwq.max_elements;
394 bnxt_qplib_service_nq(&nq->nq_tasklet);
395 nq->budget = budget;
396 }
397
bnxt_qplib_nq_irq(int irq,void * dev_instance)398 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
399 {
400 struct bnxt_qplib_nq *nq = dev_instance;
401 struct bnxt_qplib_hwq *hwq = &nq->hwq;
402 u32 sw_cons;
403
404 /* Prefetch the NQ element */
405 sw_cons = HWQ_CMP(hwq->cons, hwq);
406 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
407
408 /* Fan out to CPU affinitized kthreads? */
409 tasklet_schedule(&nq->nq_tasklet);
410
411 return IRQ_HANDLED;
412 }
413
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)414 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
415 {
416 if (!nq->requested)
417 return;
418
419 nq->requested = false;
420 /* Mask h/w interrupt */
421 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
422 /* Sync with last running IRQ handler */
423 synchronize_irq(nq->msix_vec);
424 irq_set_affinity_hint(nq->msix_vec, NULL);
425 free_irq(nq->msix_vec, nq);
426 kfree(nq->name);
427 nq->name = NULL;
428
429 if (kill)
430 tasklet_kill(&nq->nq_tasklet);
431 tasklet_disable(&nq->nq_tasklet);
432 }
433
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)434 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
435 {
436 if (nq->cqn_wq) {
437 destroy_workqueue(nq->cqn_wq);
438 nq->cqn_wq = NULL;
439 }
440
441 /* Make sure the HW is stopped! */
442 bnxt_qplib_nq_stop_irq(nq, true);
443
444 if (nq->nq_db.reg.bar_reg) {
445 iounmap(nq->nq_db.reg.bar_reg);
446 nq->nq_db.reg.bar_reg = NULL;
447 }
448
449 nq->cqn_handler = NULL;
450 nq->srqn_handler = NULL;
451 nq->msix_vec = 0;
452 }
453
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)454 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
455 int msix_vector, bool need_init)
456 {
457 struct bnxt_qplib_res *res = nq->res;
458 int rc;
459
460 if (nq->requested)
461 return -EFAULT;
462
463 nq->msix_vec = msix_vector;
464 if (need_init)
465 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
466 else
467 tasklet_enable(&nq->nq_tasklet);
468
469 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
470 nq_indx, pci_name(res->pdev));
471 if (!nq->name)
472 return -ENOMEM;
473 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
474 if (rc) {
475 kfree(nq->name);
476 nq->name = NULL;
477 tasklet_disable(&nq->nq_tasklet);
478 return rc;
479 }
480
481 cpumask_clear(&nq->mask);
482 cpumask_set_cpu(nq_indx, &nq->mask);
483 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
484 if (rc) {
485 dev_warn(&nq->pdev->dev,
486 "set affinity failed; vector: %d nq_idx: %d\n",
487 nq->msix_vec, nq_indx);
488 }
489 nq->requested = true;
490 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
491
492 return rc;
493 }
494
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)495 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
496 {
497 resource_size_t reg_base;
498 struct bnxt_qplib_nq_db *nq_db;
499 struct pci_dev *pdev;
500
501 pdev = nq->pdev;
502 nq_db = &nq->nq_db;
503
504 nq_db->dbinfo.flags = 0;
505 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
506 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
507 if (!nq_db->reg.bar_base) {
508 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
509 nq_db->reg.bar_id);
510 return -ENOMEM;
511 }
512
513 reg_base = nq_db->reg.bar_base + reg_offt;
514 /* Unconditionally map 8 bytes to support 57500 series */
515 nq_db->reg.len = 8;
516 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
517 if (!nq_db->reg.bar_reg) {
518 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
519 nq_db->reg.bar_id);
520 return -ENOMEM;
521 }
522
523 nq_db->dbinfo.db = nq_db->reg.bar_reg;
524 nq_db->dbinfo.hwq = &nq->hwq;
525 nq_db->dbinfo.xid = nq->ring_id;
526
527 return 0;
528 }
529
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)530 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
531 int nq_idx, int msix_vector, int bar_reg_offset,
532 cqn_handler_t cqn_handler,
533 srqn_handler_t srqn_handler)
534 {
535 int rc;
536
537 nq->pdev = pdev;
538 nq->cqn_handler = cqn_handler;
539 nq->srqn_handler = srqn_handler;
540
541 /* Have a task to schedule CQ notifiers in post send case */
542 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
543 if (!nq->cqn_wq)
544 return -ENOMEM;
545
546 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
547 if (rc)
548 goto fail;
549
550 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
551 if (rc) {
552 dev_err(&nq->pdev->dev,
553 "Failed to request irq for nq-idx %d\n", nq_idx);
554 goto fail;
555 }
556
557 return 0;
558 fail:
559 bnxt_qplib_disable_nq(nq);
560 return rc;
561 }
562
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)563 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
564 {
565 if (nq->hwq.max_elements) {
566 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
567 nq->hwq.max_elements = 0;
568 }
569 }
570
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)571 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
572 {
573 struct bnxt_qplib_hwq_attr hwq_attr = {};
574 struct bnxt_qplib_sg_info sginfo = {};
575
576 nq->pdev = res->pdev;
577 nq->res = res;
578 if (!nq->hwq.max_elements ||
579 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
580 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
581
582 sginfo.pgsize = PAGE_SIZE;
583 sginfo.pgshft = PAGE_SHIFT;
584 hwq_attr.res = res;
585 hwq_attr.sginfo = &sginfo;
586 hwq_attr.depth = nq->hwq.max_elements;
587 hwq_attr.stride = sizeof(struct nq_base);
588 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
589 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
590 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
591 return -ENOMEM;
592 }
593 nq->budget = 8;
594 return 0;
595 }
596
597 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)598 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
599 struct bnxt_qplib_srq *srq)
600 {
601 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
602 struct creq_destroy_srq_resp resp = {};
603 struct bnxt_qplib_cmdqmsg msg = {};
604 struct cmdq_destroy_srq req = {};
605 int rc;
606
607 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
608 CMDQ_BASE_OPCODE_DESTROY_SRQ,
609 sizeof(req));
610
611 /* Configure the request */
612 req.srq_cid = cpu_to_le32(srq->id);
613
614 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
615 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
616 kfree(srq->swq);
617 if (rc)
618 return;
619 bnxt_qplib_free_hwq(res, &srq->hwq);
620 }
621
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)622 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
623 struct bnxt_qplib_srq *srq)
624 {
625 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
626 struct bnxt_qplib_hwq_attr hwq_attr = {};
627 struct creq_create_srq_resp resp = {};
628 struct bnxt_qplib_cmdqmsg msg = {};
629 struct cmdq_create_srq req = {};
630 struct bnxt_qplib_pbl *pbl;
631 u16 pg_sz_lvl;
632 int rc, idx;
633
634 hwq_attr.res = res;
635 hwq_attr.sginfo = &srq->sg_info;
636 hwq_attr.depth = srq->max_wqe;
637 hwq_attr.stride = srq->wqe_size;
638 hwq_attr.type = HWQ_TYPE_QUEUE;
639 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
640 if (rc)
641 return rc;
642 srq->dbinfo.flags = 0;
643 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
644 CMDQ_BASE_OPCODE_CREATE_SRQ,
645 sizeof(req));
646
647 /* Configure the request */
648 req.dpi = cpu_to_le32(srq->dpi->dpi);
649 req.srq_handle = cpu_to_le64((uintptr_t)srq);
650
651 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
652 pbl = &srq->hwq.pbl[PBL_LVL_0];
653 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
654 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
655 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
656 CMDQ_CREATE_SRQ_LVL_SFT;
657 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
658 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
659 req.pd_id = cpu_to_le32(srq->pd->id);
660 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
661
662 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
663 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
664 if (rc)
665 goto fail;
666
667 spin_lock_init(&srq->lock);
668 srq->start_idx = 0;
669 srq->last_idx = srq->hwq.max_elements - 1;
670 if (!srq->hwq.is_user) {
671 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
672 GFP_KERNEL);
673 if (!srq->swq) {
674 rc = -ENOMEM;
675 goto fail;
676 }
677 for (idx = 0; idx < srq->hwq.max_elements; idx++)
678 srq->swq[idx].next_idx = idx + 1;
679 srq->swq[srq->last_idx].next_idx = -1;
680 }
681
682 srq->id = le32_to_cpu(resp.xid);
683 srq->dbinfo.hwq = &srq->hwq;
684 srq->dbinfo.xid = srq->id;
685 srq->dbinfo.db = srq->dpi->dbr;
686 srq->dbinfo.max_slot = 1;
687 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
688 if (srq->threshold)
689 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
690 srq->arm_req = false;
691
692 return 0;
693 fail:
694 bnxt_qplib_free_hwq(res, &srq->hwq);
695 kfree(srq->swq);
696
697 return rc;
698 }
699
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)700 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
701 struct bnxt_qplib_srq *srq)
702 {
703 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
704 u32 count;
705
706 count = __bnxt_qplib_get_avail(srq_hwq);
707 if (count > srq->threshold) {
708 srq->arm_req = false;
709 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
710 } else {
711 /* Deferred arming */
712 srq->arm_req = true;
713 }
714
715 return 0;
716 }
717
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)718 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
719 struct bnxt_qplib_srq *srq)
720 {
721 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
722 struct creq_query_srq_resp resp = {};
723 struct bnxt_qplib_cmdqmsg msg = {};
724 struct bnxt_qplib_rcfw_sbuf sbuf;
725 struct creq_query_srq_resp_sb *sb;
726 struct cmdq_query_srq req = {};
727 int rc;
728
729 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
730 CMDQ_BASE_OPCODE_QUERY_SRQ,
731 sizeof(req));
732
733 /* Configure the request */
734 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
735 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
736 &sbuf.dma_addr, GFP_KERNEL);
737 if (!sbuf.sb)
738 return -ENOMEM;
739 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
740 req.srq_cid = cpu_to_le32(srq->id);
741 sb = sbuf.sb;
742 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
743 sizeof(resp), 0);
744 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
745 if (!rc)
746 srq->threshold = le16_to_cpu(sb->srq_limit);
747 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
748 sbuf.sb, sbuf.dma_addr);
749
750 return rc;
751 }
752
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)753 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
754 struct bnxt_qplib_swqe *wqe)
755 {
756 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
757 struct rq_wqe *srqe;
758 struct sq_sge *hw_sge;
759 u32 count = 0;
760 int i, next;
761
762 spin_lock(&srq_hwq->lock);
763 if (srq->start_idx == srq->last_idx) {
764 dev_err(&srq_hwq->pdev->dev,
765 "FP: SRQ (0x%x) is full!\n", srq->id);
766 spin_unlock(&srq_hwq->lock);
767 return -EINVAL;
768 }
769 next = srq->start_idx;
770 srq->start_idx = srq->swq[next].next_idx;
771 spin_unlock(&srq_hwq->lock);
772
773 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
774 memset(srqe, 0, srq->wqe_size);
775 /* Calculate wqe_size16 and data_len */
776 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
777 i < wqe->num_sge; i++, hw_sge++) {
778 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
779 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
780 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
781 }
782 srqe->wqe_type = wqe->type;
783 srqe->flags = wqe->flags;
784 srqe->wqe_size = wqe->num_sge +
785 ((offsetof(typeof(*srqe), data) + 15) >> 4);
786 srqe->wr_id[0] = cpu_to_le32((u32)next);
787 srq->swq[next].wr_id = wqe->wr_id;
788
789 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
790
791 spin_lock(&srq_hwq->lock);
792 count = __bnxt_qplib_get_avail(srq_hwq);
793 spin_unlock(&srq_hwq->lock);
794 /* Ring DB */
795 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
796 if (srq->arm_req == true && count > srq->threshold) {
797 srq->arm_req = false;
798 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
799 }
800
801 return 0;
802 }
803
804 /* QP */
805
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)806 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
807 {
808 int indx;
809
810 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
811 if (!que->swq)
812 return -ENOMEM;
813
814 que->swq_start = 0;
815 que->swq_last = que->max_sw_wqe - 1;
816 for (indx = 0; indx < que->max_sw_wqe; indx++)
817 que->swq[indx].next_idx = indx + 1;
818 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
819 que->swq_last = 0;
820
821 return 0;
822 }
823
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)824 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
825 {
826 struct bnxt_qplib_hwq_attr hwq_attr = {};
827 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
828 struct creq_create_qp1_resp resp = {};
829 struct bnxt_qplib_cmdqmsg msg = {};
830 struct bnxt_qplib_q *sq = &qp->sq;
831 struct bnxt_qplib_q *rq = &qp->rq;
832 struct cmdq_create_qp1 req = {};
833 struct bnxt_qplib_pbl *pbl;
834 u32 qp_flags = 0;
835 u8 pg_sz_lvl;
836 u32 tbl_indx;
837 int rc;
838
839 sq->dbinfo.flags = 0;
840 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
841 CMDQ_BASE_OPCODE_CREATE_QP1,
842 sizeof(req));
843 /* General */
844 req.type = qp->type;
845 req.dpi = cpu_to_le32(qp->dpi->dpi);
846 req.qp_handle = cpu_to_le64(qp->qp_handle);
847
848 /* SQ */
849 hwq_attr.res = res;
850 hwq_attr.sginfo = &sq->sg_info;
851 hwq_attr.stride = sizeof(struct sq_sge);
852 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
853 hwq_attr.type = HWQ_TYPE_QUEUE;
854 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
855 if (rc)
856 return rc;
857
858 rc = bnxt_qplib_alloc_init_swq(sq);
859 if (rc)
860 goto fail_sq;
861
862 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
863 pbl = &sq->hwq.pbl[PBL_LVL_0];
864 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
865 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
866 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
867 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
868 req.sq_pg_size_sq_lvl = pg_sz_lvl;
869 req.sq_fwo_sq_sge =
870 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
871 CMDQ_CREATE_QP1_SQ_SGE_SFT);
872 req.scq_cid = cpu_to_le32(qp->scq->id);
873
874 /* RQ */
875 if (rq->max_wqe) {
876 rq->dbinfo.flags = 0;
877 hwq_attr.res = res;
878 hwq_attr.sginfo = &rq->sg_info;
879 hwq_attr.stride = sizeof(struct sq_sge);
880 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
881 hwq_attr.type = HWQ_TYPE_QUEUE;
882 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
883 if (rc)
884 goto sq_swq;
885 rc = bnxt_qplib_alloc_init_swq(rq);
886 if (rc)
887 goto fail_rq;
888 req.rq_size = cpu_to_le32(rq->max_wqe);
889 pbl = &rq->hwq.pbl[PBL_LVL_0];
890 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
891 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
892 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
893 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
894 req.rq_pg_size_rq_lvl = pg_sz_lvl;
895 req.rq_fwo_rq_sge =
896 cpu_to_le16((rq->max_sge &
897 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
898 CMDQ_CREATE_QP1_RQ_SGE_SFT);
899 }
900 req.rcq_cid = cpu_to_le32(qp->rcq->id);
901 /* Header buffer - allow hdr_buf pass in */
902 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
903 if (rc) {
904 rc = -ENOMEM;
905 goto rq_rwq;
906 }
907 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
908 req.qp_flags = cpu_to_le32(qp_flags);
909 req.pd_id = cpu_to_le32(qp->pd->id);
910
911 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
912 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
913 if (rc)
914 goto fail;
915
916 qp->id = le32_to_cpu(resp.xid);
917 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
918 qp->cctx = res->cctx;
919 sq->dbinfo.hwq = &sq->hwq;
920 sq->dbinfo.xid = qp->id;
921 sq->dbinfo.db = qp->dpi->dbr;
922 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
923 if (rq->max_wqe) {
924 rq->dbinfo.hwq = &rq->hwq;
925 rq->dbinfo.xid = qp->id;
926 rq->dbinfo.db = qp->dpi->dbr;
927 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
928 }
929 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
930 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
931 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
932
933 return 0;
934
935 fail:
936 bnxt_qplib_free_qp_hdr_buf(res, qp);
937 rq_rwq:
938 kfree(rq->swq);
939 fail_rq:
940 bnxt_qplib_free_hwq(res, &rq->hwq);
941 sq_swq:
942 kfree(sq->swq);
943 fail_sq:
944 bnxt_qplib_free_hwq(res, &sq->hwq);
945 return rc;
946 }
947
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)948 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
949 {
950 struct bnxt_qplib_hwq *hwq;
951 struct bnxt_qplib_q *sq;
952 u64 fpsne, psn_pg;
953 u16 indx_pad = 0;
954
955 sq = &qp->sq;
956 hwq = &sq->hwq;
957 /* First psn entry */
958 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
959 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
960 indx_pad = (fpsne & ~PAGE_MASK) / size;
961 hwq->pad_pgofft = indx_pad;
962 hwq->pad_pg = (u64 *)psn_pg;
963 hwq->pad_stride = size;
964 }
965
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)966 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
967 {
968 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
969 struct bnxt_qplib_hwq_attr hwq_attr = {};
970 struct bnxt_qplib_sg_info sginfo = {};
971 struct creq_create_qp_resp resp = {};
972 struct bnxt_qplib_cmdqmsg msg = {};
973 struct bnxt_qplib_q *sq = &qp->sq;
974 struct bnxt_qplib_q *rq = &qp->rq;
975 struct cmdq_create_qp req = {};
976 int rc, req_size, psn_sz = 0;
977 struct bnxt_qplib_hwq *xrrq;
978 struct bnxt_qplib_pbl *pbl;
979 u32 qp_flags = 0;
980 u8 pg_sz_lvl;
981 u32 tbl_indx;
982 u16 nsge;
983
984 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
985 sq->dbinfo.flags = 0;
986 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
987 CMDQ_BASE_OPCODE_CREATE_QP,
988 sizeof(req));
989
990 /* General */
991 req.type = qp->type;
992 req.dpi = cpu_to_le32(qp->dpi->dpi);
993 req.qp_handle = cpu_to_le64(qp->qp_handle);
994
995 /* SQ */
996 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
997 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
998 sizeof(struct sq_psn_search_ext) :
999 sizeof(struct sq_psn_search);
1000
1001 if (qp->is_host_msn_tbl) {
1002 psn_sz = sizeof(struct sq_msn_search);
1003 qp->msn = 0;
1004 }
1005 }
1006
1007 hwq_attr.res = res;
1008 hwq_attr.sginfo = &sq->sg_info;
1009 hwq_attr.stride = sizeof(struct sq_sge);
1010 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1011 hwq_attr.aux_stride = psn_sz;
1012 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1013 : 0;
1014 /* Update msn tbl size */
1015 if (qp->is_host_msn_tbl && psn_sz) {
1016 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1017 hwq_attr.aux_depth =
1018 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1019 else
1020 hwq_attr.aux_depth =
1021 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1022 qp->msn_tbl_sz = hwq_attr.aux_depth;
1023 qp->msn = 0;
1024 }
1025
1026 hwq_attr.type = HWQ_TYPE_QUEUE;
1027 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1028 if (rc)
1029 return rc;
1030
1031 if (!sq->hwq.is_user) {
1032 rc = bnxt_qplib_alloc_init_swq(sq);
1033 if (rc)
1034 goto fail_sq;
1035
1036 if (psn_sz)
1037 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1038 }
1039 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1040 pbl = &sq->hwq.pbl[PBL_LVL_0];
1041 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1042 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1043 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1044 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1045 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1046 req.sq_fwo_sq_sge =
1047 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1048 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1049 req.scq_cid = cpu_to_le32(qp->scq->id);
1050
1051 /* RQ */
1052 if (!qp->srq) {
1053 rq->dbinfo.flags = 0;
1054 hwq_attr.res = res;
1055 hwq_attr.sginfo = &rq->sg_info;
1056 hwq_attr.stride = sizeof(struct sq_sge);
1057 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1058 hwq_attr.aux_stride = 0;
1059 hwq_attr.aux_depth = 0;
1060 hwq_attr.type = HWQ_TYPE_QUEUE;
1061 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1062 if (rc)
1063 goto sq_swq;
1064 if (!rq->hwq.is_user) {
1065 rc = bnxt_qplib_alloc_init_swq(rq);
1066 if (rc)
1067 goto fail_rq;
1068 }
1069
1070 req.rq_size = cpu_to_le32(rq->max_wqe);
1071 pbl = &rq->hwq.pbl[PBL_LVL_0];
1072 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1073 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1074 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1075 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1076 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1077 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1078 6 : rq->max_sge;
1079 req.rq_fwo_rq_sge =
1080 cpu_to_le16(((nsge &
1081 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1082 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1083 } else {
1084 /* SRQ */
1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1086 req.srq_cid = cpu_to_le32(qp->srq->id);
1087 }
1088 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1089
1090 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1091 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1092 if (qp->sig_type)
1093 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1094 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1095 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1096 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1097 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1098
1099 req.qp_flags = cpu_to_le32(qp_flags);
1100
1101 /* ORRQ and IRRQ */
1102 if (psn_sz) {
1103 xrrq = &qp->orrq;
1104 xrrq->max_elements =
1105 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1106 req_size = xrrq->max_elements *
1107 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1108 req_size &= ~(PAGE_SIZE - 1);
1109 sginfo.pgsize = req_size;
1110 sginfo.pgshft = PAGE_SHIFT;
1111
1112 hwq_attr.res = res;
1113 hwq_attr.sginfo = &sginfo;
1114 hwq_attr.depth = xrrq->max_elements;
1115 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1116 hwq_attr.aux_stride = 0;
1117 hwq_attr.aux_depth = 0;
1118 hwq_attr.type = HWQ_TYPE_CTX;
1119 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1120 if (rc)
1121 goto rq_swq;
1122 pbl = &xrrq->pbl[PBL_LVL_0];
1123 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1124
1125 xrrq = &qp->irrq;
1126 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1127 qp->max_dest_rd_atomic);
1128 req_size = xrrq->max_elements *
1129 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1130 req_size &= ~(PAGE_SIZE - 1);
1131 sginfo.pgsize = req_size;
1132 hwq_attr.depth = xrrq->max_elements;
1133 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1134 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1135 if (rc)
1136 goto fail_orrq;
1137
1138 pbl = &xrrq->pbl[PBL_LVL_0];
1139 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1140 }
1141 req.pd_id = cpu_to_le32(qp->pd->id);
1142
1143 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1144 sizeof(resp), 0);
1145 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1146 if (rc)
1147 goto fail;
1148
1149 qp->id = le32_to_cpu(resp.xid);
1150 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1151 INIT_LIST_HEAD(&qp->sq_flush);
1152 INIT_LIST_HEAD(&qp->rq_flush);
1153 qp->cctx = res->cctx;
1154 sq->dbinfo.hwq = &sq->hwq;
1155 sq->dbinfo.xid = qp->id;
1156 sq->dbinfo.db = qp->dpi->dbr;
1157 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1158 if (rq->max_wqe) {
1159 rq->dbinfo.hwq = &rq->hwq;
1160 rq->dbinfo.xid = qp->id;
1161 rq->dbinfo.db = qp->dpi->dbr;
1162 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1163 }
1164 spin_lock_bh(&rcfw->tbl_lock);
1165 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1166 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1167 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1168 spin_unlock_bh(&rcfw->tbl_lock);
1169
1170 return 0;
1171 fail:
1172 bnxt_qplib_free_hwq(res, &qp->irrq);
1173 fail_orrq:
1174 bnxt_qplib_free_hwq(res, &qp->orrq);
1175 rq_swq:
1176 kfree(rq->swq);
1177 fail_rq:
1178 bnxt_qplib_free_hwq(res, &rq->hwq);
1179 sq_swq:
1180 kfree(sq->swq);
1181 fail_sq:
1182 bnxt_qplib_free_hwq(res, &sq->hwq);
1183 return rc;
1184 }
1185
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1186 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1187 {
1188 switch (qp->state) {
1189 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1190 /* INIT->RTR, configure the path_mtu to the default
1191 * 2048 if not being requested
1192 */
1193 if (!(qp->modify_flags &
1194 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1195 qp->modify_flags |=
1196 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1197 qp->path_mtu =
1198 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1199 }
1200 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1201 if (qp->max_dest_rd_atomic < 1)
1202 qp->max_dest_rd_atomic = 1;
1203 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1204 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1205 if (!(qp->modify_flags &
1206 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1207 qp->modify_flags |=
1208 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1209 qp->ah.sgid_index = 0;
1210 }
1211 break;
1212 default:
1213 break;
1214 }
1215 }
1216
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1217 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1218 {
1219 switch (qp->state) {
1220 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1221 /* Bono FW requires the max_rd_atomic to be >= 1 */
1222 if (qp->max_rd_atomic < 1)
1223 qp->max_rd_atomic = 1;
1224 /* Bono FW does not allow PKEY_INDEX,
1225 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1226 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1227 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1228 * modification
1229 */
1230 qp->modify_flags &=
1231 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1234 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1235 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1236 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1237 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1238 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1239 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1240 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1241 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1242 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1243 break;
1244 default:
1245 break;
1246 }
1247 }
1248
__filter_modify_flags(struct bnxt_qplib_qp * qp)1249 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1250 {
1251 switch (qp->cur_qp_state) {
1252 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1253 break;
1254 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1255 __modify_flags_from_init_state(qp);
1256 break;
1257 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1258 __modify_flags_from_rtr_state(qp);
1259 break;
1260 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1261 break;
1262 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1263 break;
1264 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1265 break;
1266 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1267 break;
1268 default:
1269 break;
1270 }
1271 }
1272
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1273 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1274 {
1275 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1276 struct creq_modify_qp_resp resp = {};
1277 struct bnxt_qplib_cmdqmsg msg = {};
1278 struct cmdq_modify_qp req = {};
1279 u32 temp32[4];
1280 u32 bmask;
1281 int rc;
1282
1283 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1284 CMDQ_BASE_OPCODE_MODIFY_QP,
1285 sizeof(req));
1286
1287 /* Filter out the qp_attr_mask based on the state->new transition */
1288 __filter_modify_flags(qp);
1289 bmask = qp->modify_flags;
1290 req.modify_mask = cpu_to_le32(qp->modify_flags);
1291 req.qp_cid = cpu_to_le32(qp->id);
1292 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1293 req.network_type_en_sqd_async_notify_new_state =
1294 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1295 (qp->en_sqd_async_notify ?
1296 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1297 }
1298 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1299
1300 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1301 req.access = qp->access;
1302
1303 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1304 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1305
1306 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1307 req.qkey = cpu_to_le32(qp->qkey);
1308
1309 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1310 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1311 req.dgid[0] = cpu_to_le32(temp32[0]);
1312 req.dgid[1] = cpu_to_le32(temp32[1]);
1313 req.dgid[2] = cpu_to_le32(temp32[2]);
1314 req.dgid[3] = cpu_to_le32(temp32[3]);
1315 }
1316 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1317 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1318
1319 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1320 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1321 [qp->ah.sgid_index]);
1322
1323 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1324 req.hop_limit = qp->ah.hop_limit;
1325
1326 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1327 req.traffic_class = qp->ah.traffic_class;
1328
1329 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1330 memcpy(req.dest_mac, qp->ah.dmac, 6);
1331
1332 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1333 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1334
1335 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1336 req.timeout = qp->timeout;
1337
1338 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1339 req.retry_cnt = qp->retry_cnt;
1340
1341 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1342 req.rnr_retry = qp->rnr_retry;
1343
1344 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1345 req.min_rnr_timer = qp->min_rnr_timer;
1346
1347 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1348 req.rq_psn = cpu_to_le32(qp->rq.psn);
1349
1350 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1351 req.sq_psn = cpu_to_le32(qp->sq.psn);
1352
1353 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1354 req.max_rd_atomic =
1355 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1356
1357 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1358 req.max_dest_rd_atomic =
1359 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1360
1361 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1362 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1363 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1364 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1365 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1366 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1367 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1368
1369 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1370
1371 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1372 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1373 if (rc)
1374 return rc;
1375 qp->cur_qp_state = qp->state;
1376 return 0;
1377 }
1378
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1379 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1380 {
1381 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1382 struct creq_query_qp_resp resp = {};
1383 struct bnxt_qplib_cmdqmsg msg = {};
1384 struct bnxt_qplib_rcfw_sbuf sbuf;
1385 struct creq_query_qp_resp_sb *sb;
1386 struct cmdq_query_qp req = {};
1387 u32 temp32[4];
1388 int i, rc;
1389
1390 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1391 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1392 &sbuf.dma_addr, GFP_KERNEL);
1393 if (!sbuf.sb)
1394 return -ENOMEM;
1395 sb = sbuf.sb;
1396
1397 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1398 CMDQ_BASE_OPCODE_QUERY_QP,
1399 sizeof(req));
1400
1401 req.qp_cid = cpu_to_le32(qp->id);
1402 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1403 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1404 sizeof(resp), 0);
1405 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1406 if (rc)
1407 goto bail;
1408 /* Extract the context from the side buffer */
1409 qp->state = sb->en_sqd_async_notify_state &
1410 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1411 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1412 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1413 qp->access = sb->access;
1414 qp->pkey_index = le16_to_cpu(sb->pkey);
1415 qp->qkey = le32_to_cpu(sb->qkey);
1416
1417 temp32[0] = le32_to_cpu(sb->dgid[0]);
1418 temp32[1] = le32_to_cpu(sb->dgid[1]);
1419 temp32[2] = le32_to_cpu(sb->dgid[2]);
1420 temp32[3] = le32_to_cpu(sb->dgid[3]);
1421 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1422
1423 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1424
1425 qp->ah.sgid_index = 0;
1426 for (i = 0; i < res->sgid_tbl.max; i++) {
1427 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1428 qp->ah.sgid_index = i;
1429 break;
1430 }
1431 }
1432 if (i == res->sgid_tbl.max)
1433 dev_warn(&res->pdev->dev, "SGID not found??\n");
1434
1435 qp->ah.hop_limit = sb->hop_limit;
1436 qp->ah.traffic_class = sb->traffic_class;
1437 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1438 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1439 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1440 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1441 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1442 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1443 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1444 qp->timeout = sb->timeout;
1445 qp->retry_cnt = sb->retry_cnt;
1446 qp->rnr_retry = sb->rnr_retry;
1447 qp->min_rnr_timer = sb->min_rnr_timer;
1448 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1449 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1450 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1451 qp->max_dest_rd_atomic =
1452 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1453 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1454 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1455 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1456 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1457 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1458 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1459 memcpy(qp->smac, sb->src_mac, 6);
1460 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1461 qp->port_id = le16_to_cpu(sb->port_id);
1462 bail:
1463 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1464 sbuf.sb, sbuf.dma_addr);
1465 return rc;
1466 }
1467
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1468 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1469 {
1470 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1471 u32 peek_flags, peek_cons;
1472 struct cq_base *hw_cqe;
1473 int i;
1474
1475 peek_flags = cq->dbinfo.flags;
1476 peek_cons = cq_hwq->cons;
1477 for (i = 0; i < cq_hwq->max_elements; i++) {
1478 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1479 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1480 continue;
1481 /*
1482 * The valid test of the entry must be done first before
1483 * reading any further.
1484 */
1485 dma_rmb();
1486 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1487 case CQ_BASE_CQE_TYPE_REQ:
1488 case CQ_BASE_CQE_TYPE_TERMINAL:
1489 {
1490 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1491
1492 if (qp == le64_to_cpu(cqe->qp_handle))
1493 cqe->qp_handle = 0;
1494 break;
1495 }
1496 case CQ_BASE_CQE_TYPE_RES_RC:
1497 case CQ_BASE_CQE_TYPE_RES_UD:
1498 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1499 {
1500 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1501
1502 if (qp == le64_to_cpu(cqe->qp_handle))
1503 cqe->qp_handle = 0;
1504 break;
1505 }
1506 default:
1507 break;
1508 }
1509 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1510 1, &peek_flags);
1511 }
1512 }
1513
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1514 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1515 struct bnxt_qplib_qp *qp)
1516 {
1517 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1518 struct creq_destroy_qp_resp resp = {};
1519 struct bnxt_qplib_cmdqmsg msg = {};
1520 struct cmdq_destroy_qp req = {};
1521 u32 tbl_indx;
1522 int rc;
1523
1524 spin_lock_bh(&rcfw->tbl_lock);
1525 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1526 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1527 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1528 spin_unlock_bh(&rcfw->tbl_lock);
1529
1530 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1531 CMDQ_BASE_OPCODE_DESTROY_QP,
1532 sizeof(req));
1533
1534 req.qp_cid = cpu_to_le32(qp->id);
1535 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1536 sizeof(resp), 0);
1537 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1538 if (rc) {
1539 spin_lock_bh(&rcfw->tbl_lock);
1540 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1541 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1542 spin_unlock_bh(&rcfw->tbl_lock);
1543 return rc;
1544 }
1545
1546 return 0;
1547 }
1548
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1549 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1550 struct bnxt_qplib_qp *qp)
1551 {
1552 bnxt_qplib_free_qp_hdr_buf(res, qp);
1553 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1554 kfree(qp->sq.swq);
1555
1556 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1557 kfree(qp->rq.swq);
1558
1559 if (qp->irrq.max_elements)
1560 bnxt_qplib_free_hwq(res, &qp->irrq);
1561 if (qp->orrq.max_elements)
1562 bnxt_qplib_free_hwq(res, &qp->orrq);
1563
1564 }
1565
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1566 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1567 struct bnxt_qplib_sge *sge)
1568 {
1569 struct bnxt_qplib_q *sq = &qp->sq;
1570 u32 sw_prod;
1571
1572 memset(sge, 0, sizeof(*sge));
1573
1574 if (qp->sq_hdr_buf) {
1575 sw_prod = sq->swq_start;
1576 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1577 sw_prod * qp->sq_hdr_buf_size);
1578 sge->lkey = 0xFFFFFFFF;
1579 sge->size = qp->sq_hdr_buf_size;
1580 return qp->sq_hdr_buf + sw_prod * sge->size;
1581 }
1582 return NULL;
1583 }
1584
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1585 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1586 {
1587 struct bnxt_qplib_q *rq = &qp->rq;
1588
1589 return rq->swq_start;
1590 }
1591
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1592 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1593 {
1594 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1595 }
1596
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1597 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1598 struct bnxt_qplib_sge *sge)
1599 {
1600 struct bnxt_qplib_q *rq = &qp->rq;
1601 u32 sw_prod;
1602
1603 memset(sge, 0, sizeof(*sge));
1604
1605 if (qp->rq_hdr_buf) {
1606 sw_prod = rq->swq_start;
1607 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1608 sw_prod * qp->rq_hdr_buf_size);
1609 sge->lkey = 0xFFFFFFFF;
1610 sge->size = qp->rq_hdr_buf_size;
1611 return qp->rq_hdr_buf + sw_prod * sge->size;
1612 }
1613 return NULL;
1614 }
1615
1616 /* Fil the MSN table into the next psn row */
bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1617 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1618 struct bnxt_qplib_swqe *wqe,
1619 struct bnxt_qplib_swq *swq)
1620 {
1621 struct sq_msn_search *msns;
1622 u32 start_psn, next_psn;
1623 u16 start_idx;
1624
1625 msns = (struct sq_msn_search *)swq->psn_search;
1626 msns->start_idx_next_psn_start_psn = 0;
1627
1628 start_psn = swq->start_psn;
1629 next_psn = swq->next_psn;
1630 start_idx = swq->slot_idx;
1631 msns->start_idx_next_psn_start_psn |=
1632 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1633 qp->msn++;
1634 qp->msn %= qp->msn_tbl_sz;
1635 }
1636
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1637 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1638 struct bnxt_qplib_swqe *wqe,
1639 struct bnxt_qplib_swq *swq)
1640 {
1641 struct sq_psn_search_ext *psns_ext;
1642 struct sq_psn_search *psns;
1643 u32 flg_npsn;
1644 u32 op_spsn;
1645
1646 if (!swq->psn_search)
1647 return;
1648 /* Handle MSN differently on cap flags */
1649 if (qp->is_host_msn_tbl) {
1650 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1651 return;
1652 }
1653 psns = (struct sq_psn_search *)swq->psn_search;
1654 psns = swq->psn_search;
1655 psns_ext = swq->psn_ext;
1656
1657 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1658 SQ_PSN_SEARCH_START_PSN_MASK);
1659 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1660 SQ_PSN_SEARCH_OPCODE_MASK);
1661 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1662 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1663
1664 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1665 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1666 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1667 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1668 } else {
1669 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1670 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1671 }
1672 }
1673
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1674 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1675 struct bnxt_qplib_swqe *wqe,
1676 u16 *idx)
1677 {
1678 struct bnxt_qplib_hwq *hwq;
1679 int len, t_len, offt;
1680 bool pull_dst = true;
1681 void *il_dst = NULL;
1682 void *il_src = NULL;
1683 int t_cplen, cplen;
1684 int indx;
1685
1686 hwq = &qp->sq.hwq;
1687 t_len = 0;
1688 for (indx = 0; indx < wqe->num_sge; indx++) {
1689 len = wqe->sg_list[indx].size;
1690 il_src = (void *)wqe->sg_list[indx].addr;
1691 t_len += len;
1692 if (t_len > qp->max_inline_data)
1693 return -ENOMEM;
1694 while (len) {
1695 if (pull_dst) {
1696 pull_dst = false;
1697 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1698 (*idx)++;
1699 t_cplen = 0;
1700 offt = 0;
1701 }
1702 cplen = min_t(int, len, sizeof(struct sq_sge));
1703 cplen = min_t(int, cplen,
1704 (sizeof(struct sq_sge) - offt));
1705 memcpy(il_dst, il_src, cplen);
1706 t_cplen += cplen;
1707 il_src += cplen;
1708 il_dst += cplen;
1709 offt += cplen;
1710 len -= cplen;
1711 if (t_cplen == sizeof(struct sq_sge))
1712 pull_dst = true;
1713 }
1714 }
1715
1716 return t_len;
1717 }
1718
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1719 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1720 struct bnxt_qplib_sge *ssge,
1721 u16 nsge, u16 *idx)
1722 {
1723 struct sq_sge *dsge;
1724 int indx, len = 0;
1725
1726 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1727 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1728 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1729 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1730 dsge->size = cpu_to_le32(ssge[indx].size);
1731 len += ssge[indx].size;
1732 }
1733
1734 return len;
1735 }
1736
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1737 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1738 struct bnxt_qplib_swqe *wqe,
1739 u16 *wqe_sz, u16 *qdf, u8 mode)
1740 {
1741 u32 ilsize, bytes;
1742 u16 nsge;
1743 u16 slot;
1744
1745 nsge = wqe->num_sge;
1746 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1747 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1748 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1749 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1750 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1751 bytes += sizeof(struct sq_send_hdr);
1752 }
1753
1754 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1755 slot = bytes >> 4;
1756 *wqe_sz = slot;
1757 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1758 slot = 8;
1759 return slot;
1760 }
1761
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp * qp,struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq,bool hw_retx)1762 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1763 struct bnxt_qplib_swq *swq, bool hw_retx)
1764 {
1765 struct bnxt_qplib_hwq *hwq;
1766 u32 pg_num, pg_indx;
1767 void *buff;
1768 u32 tail;
1769
1770 hwq = &sq->hwq;
1771 if (!hwq->pad_pg)
1772 return;
1773 tail = swq->slot_idx / sq->dbinfo.max_slot;
1774 if (hw_retx) {
1775 /* For HW retx use qp msn index */
1776 tail = qp->msn;
1777 tail %= qp->msn_tbl_sz;
1778 }
1779 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1780 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1781 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1782 swq->psn_ext = buff;
1783 swq->psn_search = buff;
1784 }
1785
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1786 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1787 {
1788 struct bnxt_qplib_q *sq = &qp->sq;
1789
1790 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1791 }
1792
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1793 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1794 struct bnxt_qplib_swqe *wqe)
1795 {
1796 struct bnxt_qplib_nq_work *nq_work = NULL;
1797 int i, rc = 0, data_len = 0, pkt_num = 0;
1798 struct bnxt_qplib_q *sq = &qp->sq;
1799 struct bnxt_qplib_hwq *hwq;
1800 struct bnxt_qplib_swq *swq;
1801 bool sch_handler = false;
1802 u16 wqe_sz, qdf = 0;
1803 bool msn_update;
1804 void *base_hdr;
1805 void *ext_hdr;
1806 __le32 temp32;
1807 u32 wqe_idx;
1808 u32 slots;
1809 u16 idx;
1810
1811 hwq = &sq->hwq;
1812 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1813 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1814 dev_err(&hwq->pdev->dev,
1815 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1816 qp->id, qp->state);
1817 rc = -EINVAL;
1818 goto done;
1819 }
1820
1821 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1822 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1823 dev_err(&hwq->pdev->dev,
1824 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1825 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1826 rc = -ENOMEM;
1827 goto done;
1828 }
1829
1830 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1831 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1832
1833 idx = 0;
1834 swq->slot_idx = hwq->prod;
1835 swq->slots = slots;
1836 swq->wr_id = wqe->wr_id;
1837 swq->type = wqe->type;
1838 swq->flags = wqe->flags;
1839 swq->start_psn = sq->psn & BTH_PSN_MASK;
1840 if (qp->sig_type)
1841 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1842
1843 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1844 sch_handler = true;
1845 dev_dbg(&hwq->pdev->dev,
1846 "%s Error QP. Scheduling for poll_cq\n", __func__);
1847 goto queue_err;
1848 }
1849
1850 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1851 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1852 memset(base_hdr, 0, sizeof(struct sq_sge));
1853 memset(ext_hdr, 0, sizeof(struct sq_sge));
1854
1855 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1856 /* Copy the inline data */
1857 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1858 else
1859 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1860 &idx);
1861 if (data_len < 0)
1862 goto queue_err;
1863 /* Make sure we update MSN table only for wired wqes */
1864 msn_update = true;
1865 /* Specifics */
1866 switch (wqe->type) {
1867 case BNXT_QPLIB_SWQE_TYPE_SEND:
1868 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1869 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1870 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1871 /* Assemble info for Raw Ethertype QPs */
1872
1873 sqe->wqe_type = wqe->type;
1874 sqe->flags = wqe->flags;
1875 sqe->wqe_size = wqe_sz;
1876 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1877 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1878 sqe->length = cpu_to_le32(data_len);
1879 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1880 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1881 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1882
1883 break;
1884 }
1885 fallthrough;
1886 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1887 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1888 {
1889 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1890 struct sq_send_hdr *sqe = base_hdr;
1891
1892 sqe->wqe_type = wqe->type;
1893 sqe->flags = wqe->flags;
1894 sqe->wqe_size = wqe_sz;
1895 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1896 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1897 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1898 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1899 sqe->length = cpu_to_le32(data_len);
1900 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1901 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1902 SQ_SEND_DST_QP_MASK);
1903 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1904 SQ_SEND_AVID_MASK);
1905 msn_update = false;
1906 } else {
1907 sqe->length = cpu_to_le32(data_len);
1908 if (qp->mtu)
1909 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1910 if (!pkt_num)
1911 pkt_num = 1;
1912 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1913 }
1914 break;
1915 }
1916 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1917 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1918 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1919 {
1920 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1921 struct sq_rdma_hdr *sqe = base_hdr;
1922
1923 sqe->wqe_type = wqe->type;
1924 sqe->flags = wqe->flags;
1925 sqe->wqe_size = wqe_sz;
1926 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1927 sqe->length = cpu_to_le32((u32)data_len);
1928 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1929 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1930 if (qp->mtu)
1931 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1932 if (!pkt_num)
1933 pkt_num = 1;
1934 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1935 break;
1936 }
1937 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1938 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1939 {
1940 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1941 struct sq_atomic_hdr *sqe = base_hdr;
1942
1943 sqe->wqe_type = wqe->type;
1944 sqe->flags = wqe->flags;
1945 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1946 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1947 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1948 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1949 if (qp->mtu)
1950 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1951 if (!pkt_num)
1952 pkt_num = 1;
1953 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1954 break;
1955 }
1956 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1957 {
1958 struct sq_localinvalidate *sqe = base_hdr;
1959
1960 sqe->wqe_type = wqe->type;
1961 sqe->flags = wqe->flags;
1962 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1963 msn_update = false;
1964 break;
1965 }
1966 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1967 {
1968 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1969 struct sq_fr_pmr_hdr *sqe = base_hdr;
1970
1971 sqe->wqe_type = wqe->type;
1972 sqe->flags = wqe->flags;
1973 sqe->access_cntl = wqe->frmr.access_cntl |
1974 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1975 sqe->zero_based_page_size_log =
1976 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1977 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1978 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1979 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1980 temp32 = cpu_to_le32(wqe->frmr.length);
1981 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1982 sqe->numlevels_pbl_page_size_log =
1983 ((wqe->frmr.pbl_pg_sz_log <<
1984 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1985 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1986 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1987 SQ_FR_PMR_NUMLEVELS_MASK);
1988
1989 for (i = 0; i < wqe->frmr.page_list_len; i++)
1990 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1991 wqe->frmr.page_list[i] |
1992 PTU_PTE_VALID);
1993 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1994 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1995 msn_update = false;
1996
1997 break;
1998 }
1999 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2000 {
2001 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2002 struct sq_bind_hdr *sqe = base_hdr;
2003
2004 sqe->wqe_type = wqe->type;
2005 sqe->flags = wqe->flags;
2006 sqe->access_cntl = wqe->bind.access_cntl;
2007 sqe->mw_type_zero_based = wqe->bind.mw_type |
2008 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2009 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2010 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2011 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2012 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2013 msn_update = false;
2014 break;
2015 }
2016 default:
2017 /* Bad wqe, return error */
2018 rc = -EINVAL;
2019 goto done;
2020 }
2021 if (!qp->is_host_msn_tbl || msn_update) {
2022 swq->next_psn = sq->psn & BTH_PSN_MASK;
2023 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2024 }
2025 queue_err:
2026 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2027 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2028 qp->wqe_cnt++;
2029 done:
2030 if (sch_handler) {
2031 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2032 if (nq_work) {
2033 nq_work->cq = qp->scq;
2034 nq_work->nq = qp->scq->nq;
2035 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2036 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2037 } else {
2038 dev_err(&hwq->pdev->dev,
2039 "FP: Failed to allocate SQ nq_work!\n");
2040 rc = -ENOMEM;
2041 }
2042 }
2043 return rc;
2044 }
2045
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)2046 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2047 {
2048 struct bnxt_qplib_q *rq = &qp->rq;
2049
2050 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2051 }
2052
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)2053 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2054 struct bnxt_qplib_swqe *wqe)
2055 {
2056 struct bnxt_qplib_nq_work *nq_work = NULL;
2057 struct bnxt_qplib_q *rq = &qp->rq;
2058 struct rq_wqe_hdr *base_hdr;
2059 struct rq_ext_hdr *ext_hdr;
2060 struct bnxt_qplib_hwq *hwq;
2061 struct bnxt_qplib_swq *swq;
2062 bool sch_handler = false;
2063 u16 wqe_sz, idx;
2064 u32 wqe_idx;
2065 int rc = 0;
2066
2067 hwq = &rq->hwq;
2068 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2069 dev_err(&hwq->pdev->dev,
2070 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2071 qp->id, qp->state);
2072 rc = -EINVAL;
2073 goto done;
2074 }
2075
2076 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2077 dev_err(&hwq->pdev->dev,
2078 "FP: QP (0x%x) RQ is full!\n", qp->id);
2079 rc = -EINVAL;
2080 goto done;
2081 }
2082
2083 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2084 swq->wr_id = wqe->wr_id;
2085 swq->slots = rq->dbinfo.max_slot;
2086
2087 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2088 sch_handler = true;
2089 dev_dbg(&hwq->pdev->dev,
2090 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2091 goto queue_err;
2092 }
2093
2094 idx = 0;
2095 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2096 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2097 memset(base_hdr, 0, sizeof(struct sq_sge));
2098 memset(ext_hdr, 0, sizeof(struct sq_sge));
2099 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2100 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2101 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2102 if (!wqe->num_sge) {
2103 struct sq_sge *sge;
2104
2105 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2106 sge->size = 0;
2107 wqe_sz++;
2108 }
2109 base_hdr->wqe_type = wqe->type;
2110 base_hdr->flags = wqe->flags;
2111 base_hdr->wqe_size = wqe_sz;
2112 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2113 queue_err:
2114 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2115 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2116 done:
2117 if (sch_handler) {
2118 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2119 if (nq_work) {
2120 nq_work->cq = qp->rcq;
2121 nq_work->nq = qp->rcq->nq;
2122 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2123 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2124 } else {
2125 dev_err(&hwq->pdev->dev,
2126 "FP: Failed to allocate RQ nq_work!\n");
2127 rc = -ENOMEM;
2128 }
2129 }
2130
2131 return rc;
2132 }
2133
2134 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2135 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2136 {
2137 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2138 struct bnxt_qplib_hwq_attr hwq_attr = {};
2139 struct creq_create_cq_resp resp = {};
2140 struct bnxt_qplib_cmdqmsg msg = {};
2141 struct cmdq_create_cq req = {};
2142 struct bnxt_qplib_pbl *pbl;
2143 u32 pg_sz_lvl;
2144 int rc;
2145
2146 if (!cq->dpi) {
2147 dev_err(&rcfw->pdev->dev,
2148 "FP: CREATE_CQ failed due to NULL DPI\n");
2149 return -EINVAL;
2150 }
2151
2152 cq->dbinfo.flags = 0;
2153 hwq_attr.res = res;
2154 hwq_attr.depth = cq->max_wqe;
2155 hwq_attr.stride = sizeof(struct cq_base);
2156 hwq_attr.type = HWQ_TYPE_QUEUE;
2157 hwq_attr.sginfo = &cq->sg_info;
2158 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2159 if (rc)
2160 return rc;
2161
2162 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2163 CMDQ_BASE_OPCODE_CREATE_CQ,
2164 sizeof(req));
2165
2166 req.dpi = cpu_to_le32(cq->dpi->dpi);
2167 req.cq_handle = cpu_to_le64(cq->cq_handle);
2168 req.cq_size = cpu_to_le32(cq->max_wqe);
2169 pbl = &cq->hwq.pbl[PBL_LVL_0];
2170 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2171 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2172 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2173 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2174 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2175 req.cq_fco_cnq_id = cpu_to_le32(
2176 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2177 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2178 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2179 sizeof(resp), 0);
2180 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2181 if (rc)
2182 goto fail;
2183
2184 cq->id = le32_to_cpu(resp.xid);
2185 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2186 init_waitqueue_head(&cq->waitq);
2187 INIT_LIST_HEAD(&cq->sqf_head);
2188 INIT_LIST_HEAD(&cq->rqf_head);
2189 spin_lock_init(&cq->compl_lock);
2190 spin_lock_init(&cq->flush_lock);
2191
2192 cq->dbinfo.hwq = &cq->hwq;
2193 cq->dbinfo.xid = cq->id;
2194 cq->dbinfo.db = cq->dpi->dbr;
2195 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2196
2197 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2198
2199 return 0;
2200
2201 fail:
2202 bnxt_qplib_free_hwq(res, &cq->hwq);
2203 return rc;
2204 }
2205
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2206 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2207 struct bnxt_qplib_cq *cq)
2208 {
2209 bnxt_qplib_free_hwq(res, &cq->hwq);
2210 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2211 /* Reset only the cons bit in the flags */
2212 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2213 }
2214
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2215 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2216 int new_cqes)
2217 {
2218 struct bnxt_qplib_hwq_attr hwq_attr = {};
2219 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2220 struct creq_resize_cq_resp resp = {};
2221 struct bnxt_qplib_cmdqmsg msg = {};
2222 struct cmdq_resize_cq req = {};
2223 struct bnxt_qplib_pbl *pbl;
2224 u32 pg_sz, lvl, new_sz;
2225 int rc;
2226
2227 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2228 CMDQ_BASE_OPCODE_RESIZE_CQ,
2229 sizeof(req));
2230 hwq_attr.sginfo = &cq->sg_info;
2231 hwq_attr.res = res;
2232 hwq_attr.depth = new_cqes;
2233 hwq_attr.stride = sizeof(struct cq_base);
2234 hwq_attr.type = HWQ_TYPE_QUEUE;
2235 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2236 if (rc)
2237 return rc;
2238
2239 req.cq_cid = cpu_to_le32(cq->id);
2240 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2241 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2242 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2243 CMDQ_RESIZE_CQ_LVL_MASK;
2244 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2245 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2246 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2247 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2248
2249 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2250 sizeof(resp), 0);
2251 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2252 return rc;
2253 }
2254
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2255 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2256 {
2257 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2258 struct creq_destroy_cq_resp resp = {};
2259 struct bnxt_qplib_cmdqmsg msg = {};
2260 struct cmdq_destroy_cq req = {};
2261 u16 total_cnq_events;
2262 int rc;
2263
2264 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2265 CMDQ_BASE_OPCODE_DESTROY_CQ,
2266 sizeof(req));
2267
2268 req.cq_cid = cpu_to_le32(cq->id);
2269 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2270 sizeof(resp), 0);
2271 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2272 if (rc)
2273 return rc;
2274 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2275 __wait_for_all_nqes(cq, total_cnq_events);
2276 bnxt_qplib_free_hwq(res, &cq->hwq);
2277 return 0;
2278 }
2279
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2280 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2281 struct bnxt_qplib_cqe **pcqe, int *budget)
2282 {
2283 struct bnxt_qplib_cqe *cqe;
2284 u32 start, last;
2285 int rc = 0;
2286
2287 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2288 start = sq->swq_start;
2289 cqe = *pcqe;
2290 while (*budget) {
2291 last = sq->swq_last;
2292 if (start == last)
2293 break;
2294 /* Skip the FENCE WQE completions */
2295 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2296 bnxt_qplib_cancel_phantom_processing(qp);
2297 goto skip_compl;
2298 }
2299 memset(cqe, 0, sizeof(*cqe));
2300 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2301 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2302 cqe->qp_handle = (u64)(unsigned long)qp;
2303 cqe->wr_id = sq->swq[last].wr_id;
2304 cqe->src_qp = qp->id;
2305 cqe->type = sq->swq[last].type;
2306 cqe++;
2307 (*budget)--;
2308 skip_compl:
2309 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2310 sq->swq[last].slots, &sq->dbinfo.flags);
2311 sq->swq_last = sq->swq[last].next_idx;
2312 }
2313 *pcqe = cqe;
2314 if (!(*budget) && sq->swq_last != start)
2315 /* Out of budget */
2316 rc = -EAGAIN;
2317
2318 return rc;
2319 }
2320
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2321 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2322 struct bnxt_qplib_cqe **pcqe, int *budget)
2323 {
2324 struct bnxt_qplib_cqe *cqe;
2325 u32 start, last;
2326 int opcode = 0;
2327 int rc = 0;
2328
2329 switch (qp->type) {
2330 case CMDQ_CREATE_QP1_TYPE_GSI:
2331 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2332 break;
2333 case CMDQ_CREATE_QP_TYPE_RC:
2334 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2335 break;
2336 case CMDQ_CREATE_QP_TYPE_UD:
2337 case CMDQ_CREATE_QP_TYPE_GSI:
2338 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2339 break;
2340 }
2341
2342 /* Flush the rest of the RQ */
2343 start = rq->swq_start;
2344 cqe = *pcqe;
2345 while (*budget) {
2346 last = rq->swq_last;
2347 if (last == start)
2348 break;
2349 memset(cqe, 0, sizeof(*cqe));
2350 cqe->status =
2351 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2352 cqe->opcode = opcode;
2353 cqe->qp_handle = (unsigned long)qp;
2354 cqe->wr_id = rq->swq[last].wr_id;
2355 cqe++;
2356 (*budget)--;
2357 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2358 rq->swq[last].slots, &rq->dbinfo.flags);
2359 rq->swq_last = rq->swq[last].next_idx;
2360 }
2361 *pcqe = cqe;
2362 if (!*budget && rq->swq_last != start)
2363 /* Out of budget */
2364 rc = -EAGAIN;
2365
2366 return rc;
2367 }
2368
bnxt_qplib_mark_qp_error(void * qp_handle)2369 void bnxt_qplib_mark_qp_error(void *qp_handle)
2370 {
2371 struct bnxt_qplib_qp *qp = qp_handle;
2372
2373 if (!qp)
2374 return;
2375
2376 /* Must block new posting of SQ and RQ */
2377 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2378 bnxt_qplib_cancel_phantom_processing(qp);
2379 }
2380
2381 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2382 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2383 */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2384 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2385 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2386 {
2387 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2388 struct bnxt_qplib_q *sq = &qp->sq;
2389 struct cq_req *peek_req_hwcqe;
2390 struct bnxt_qplib_qp *peek_qp;
2391 struct bnxt_qplib_q *peek_sq;
2392 struct bnxt_qplib_swq *swq;
2393 struct cq_base *peek_hwcqe;
2394 int i, rc = 0;
2395
2396 /* Normal mode */
2397 /* Check for the psn_search marking before completing */
2398 swq = &sq->swq[swq_last];
2399 if (swq->psn_search &&
2400 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2401 /* Unmark */
2402 swq->psn_search->flags_next_psn = cpu_to_le32
2403 (le32_to_cpu(swq->psn_search->flags_next_psn)
2404 & ~0x80000000);
2405 dev_dbg(&cq->hwq.pdev->dev,
2406 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2407 cq_cons, qp->id, swq_last, cqe_sq_cons);
2408 sq->condition = true;
2409 sq->send_phantom = true;
2410
2411 /* TODO: Only ARM if the previous SQE is ARMALL */
2412 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2413 rc = -EAGAIN;
2414 goto out;
2415 }
2416 if (sq->condition) {
2417 /* Peek at the completions */
2418 peek_flags = cq->dbinfo.flags;
2419 peek_sw_cq_cons = cq_cons;
2420 i = cq->hwq.max_elements;
2421 while (i--) {
2422 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2423 peek_sw_cq_cons, NULL);
2424 /* If the next hwcqe is VALID */
2425 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2426 /*
2427 * The valid test of the entry must be done first before
2428 * reading any further.
2429 */
2430 dma_rmb();
2431 /* If the next hwcqe is a REQ */
2432 if ((peek_hwcqe->cqe_type_toggle &
2433 CQ_BASE_CQE_TYPE_MASK) ==
2434 CQ_BASE_CQE_TYPE_REQ) {
2435 peek_req_hwcqe = (struct cq_req *)
2436 peek_hwcqe;
2437 peek_qp = (struct bnxt_qplib_qp *)
2438 ((unsigned long)
2439 le64_to_cpu
2440 (peek_req_hwcqe->qp_handle));
2441 peek_sq = &peek_qp->sq;
2442 peek_sq_cons_idx =
2443 ((le16_to_cpu(
2444 peek_req_hwcqe->sq_cons_idx)
2445 - 1) % sq->max_wqe);
2446 /* If the hwcqe's sq's wr_id matches */
2447 if (peek_sq == sq &&
2448 sq->swq[peek_sq_cons_idx].wr_id ==
2449 BNXT_QPLIB_FENCE_WRID) {
2450 /*
2451 * Unbreak only if the phantom
2452 * comes back
2453 */
2454 dev_dbg(&cq->hwq.pdev->dev,
2455 "FP: Got Phantom CQE\n");
2456 sq->condition = false;
2457 sq->single = true;
2458 rc = 0;
2459 goto out;
2460 }
2461 }
2462 /* Valid but not the phantom, so keep looping */
2463 } else {
2464 /* Not valid yet, just exit and wait */
2465 rc = -EINVAL;
2466 goto out;
2467 }
2468 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2469 &peek_sw_cq_cons,
2470 1, &peek_flags);
2471 }
2472 dev_err(&cq->hwq.pdev->dev,
2473 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2474 cq_cons, qp->id, swq_last, cqe_sq_cons);
2475 rc = -EINVAL;
2476 }
2477 out:
2478 return rc;
2479 }
2480
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2481 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2482 struct cq_req *hwcqe,
2483 struct bnxt_qplib_cqe **pcqe, int *budget,
2484 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2485 {
2486 struct bnxt_qplib_swq *swq;
2487 struct bnxt_qplib_cqe *cqe;
2488 struct bnxt_qplib_qp *qp;
2489 struct bnxt_qplib_q *sq;
2490 u32 cqe_sq_cons;
2491 int rc = 0;
2492
2493 qp = (struct bnxt_qplib_qp *)((unsigned long)
2494 le64_to_cpu(hwcqe->qp_handle));
2495 if (!qp) {
2496 dev_err(&cq->hwq.pdev->dev,
2497 "FP: Process Req qp is NULL\n");
2498 return -EINVAL;
2499 }
2500 sq = &qp->sq;
2501
2502 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2503 if (qp->sq.flushed) {
2504 dev_dbg(&cq->hwq.pdev->dev,
2505 "%s: QP in Flush QP = %p\n", __func__, qp);
2506 goto done;
2507 }
2508 /* Require to walk the sq's swq to fabricate CQEs for all previously
2509 * signaled SWQEs due to CQE aggregation from the current sq cons
2510 * to the cqe_sq_cons
2511 */
2512 cqe = *pcqe;
2513 while (*budget) {
2514 if (sq->swq_last == cqe_sq_cons)
2515 /* Done */
2516 break;
2517
2518 swq = &sq->swq[sq->swq_last];
2519 memset(cqe, 0, sizeof(*cqe));
2520 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2521 cqe->qp_handle = (u64)(unsigned long)qp;
2522 cqe->src_qp = qp->id;
2523 cqe->wr_id = swq->wr_id;
2524 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2525 goto skip;
2526 cqe->type = swq->type;
2527
2528 /* For the last CQE, check for status. For errors, regardless
2529 * of the request being signaled or not, it must complete with
2530 * the hwcqe error status
2531 */
2532 if (swq->next_idx == cqe_sq_cons &&
2533 hwcqe->status != CQ_REQ_STATUS_OK) {
2534 cqe->status = hwcqe->status;
2535 dev_err(&cq->hwq.pdev->dev,
2536 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2537 sq->swq_last, cqe->wr_id, cqe->status);
2538 cqe++;
2539 (*budget)--;
2540 bnxt_qplib_mark_qp_error(qp);
2541 /* Add qp to flush list of the CQ */
2542 bnxt_qplib_add_flush_qp(qp);
2543 } else {
2544 /* Before we complete, do WA 9060 */
2545 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2546 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2547 cqe_sq_cons)) {
2548 *lib_qp = qp;
2549 goto out;
2550 }
2551 }
2552 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2553 cqe->status = CQ_REQ_STATUS_OK;
2554 cqe++;
2555 (*budget)--;
2556 }
2557 }
2558 skip:
2559 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2560 swq->slots, &sq->dbinfo.flags);
2561 sq->swq_last = swq->next_idx;
2562 if (sq->single)
2563 break;
2564 }
2565 out:
2566 *pcqe = cqe;
2567 if (sq->swq_last != cqe_sq_cons) {
2568 /* Out of budget */
2569 rc = -EAGAIN;
2570 goto done;
2571 }
2572 /*
2573 * Back to normal completion mode only after it has completed all of
2574 * the WC for this CQE
2575 */
2576 sq->single = false;
2577 done:
2578 return rc;
2579 }
2580
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2581 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2582 {
2583 spin_lock(&srq->hwq.lock);
2584 srq->swq[srq->last_idx].next_idx = (int)tag;
2585 srq->last_idx = (int)tag;
2586 srq->swq[srq->last_idx].next_idx = -1;
2587 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2588 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2589 spin_unlock(&srq->hwq.lock);
2590 }
2591
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2592 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2593 struct cq_res_rc *hwcqe,
2594 struct bnxt_qplib_cqe **pcqe,
2595 int *budget)
2596 {
2597 struct bnxt_qplib_srq *srq;
2598 struct bnxt_qplib_cqe *cqe;
2599 struct bnxt_qplib_qp *qp;
2600 struct bnxt_qplib_q *rq;
2601 u32 wr_id_idx;
2602
2603 qp = (struct bnxt_qplib_qp *)((unsigned long)
2604 le64_to_cpu(hwcqe->qp_handle));
2605 if (!qp) {
2606 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2607 return -EINVAL;
2608 }
2609 if (qp->rq.flushed) {
2610 dev_dbg(&cq->hwq.pdev->dev,
2611 "%s: QP in Flush QP = %p\n", __func__, qp);
2612 return 0;
2613 }
2614
2615 cqe = *pcqe;
2616 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2617 cqe->length = le32_to_cpu(hwcqe->length);
2618 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2619 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2620 cqe->flags = le16_to_cpu(hwcqe->flags);
2621 cqe->status = hwcqe->status;
2622 cqe->qp_handle = (u64)(unsigned long)qp;
2623
2624 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2625 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2626 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2627 srq = qp->srq;
2628 if (!srq)
2629 return -EINVAL;
2630 if (wr_id_idx >= srq->hwq.max_elements) {
2631 dev_err(&cq->hwq.pdev->dev,
2632 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2633 wr_id_idx, srq->hwq.max_elements);
2634 return -EINVAL;
2635 }
2636 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2637 bnxt_qplib_release_srqe(srq, wr_id_idx);
2638 cqe++;
2639 (*budget)--;
2640 *pcqe = cqe;
2641 } else {
2642 struct bnxt_qplib_swq *swq;
2643
2644 rq = &qp->rq;
2645 if (wr_id_idx > (rq->max_wqe - 1)) {
2646 dev_err(&cq->hwq.pdev->dev,
2647 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2648 wr_id_idx, rq->max_wqe);
2649 return -EINVAL;
2650 }
2651 if (wr_id_idx != rq->swq_last)
2652 return -EINVAL;
2653 swq = &rq->swq[rq->swq_last];
2654 cqe->wr_id = swq->wr_id;
2655 cqe++;
2656 (*budget)--;
2657 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2658 swq->slots, &rq->dbinfo.flags);
2659 rq->swq_last = swq->next_idx;
2660 *pcqe = cqe;
2661
2662 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2663 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2664 /* Add qp to flush list of the CQ */
2665 bnxt_qplib_add_flush_qp(qp);
2666 }
2667 }
2668
2669 return 0;
2670 }
2671
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2672 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2673 struct cq_res_ud *hwcqe,
2674 struct bnxt_qplib_cqe **pcqe,
2675 int *budget)
2676 {
2677 struct bnxt_qplib_srq *srq;
2678 struct bnxt_qplib_cqe *cqe;
2679 struct bnxt_qplib_qp *qp;
2680 struct bnxt_qplib_q *rq;
2681 u32 wr_id_idx;
2682
2683 qp = (struct bnxt_qplib_qp *)((unsigned long)
2684 le64_to_cpu(hwcqe->qp_handle));
2685 if (!qp) {
2686 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2687 return -EINVAL;
2688 }
2689 if (qp->rq.flushed) {
2690 dev_dbg(&cq->hwq.pdev->dev,
2691 "%s: QP in Flush QP = %p\n", __func__, qp);
2692 return 0;
2693 }
2694 cqe = *pcqe;
2695 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2696 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2697 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2698 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2699 cqe->flags = le16_to_cpu(hwcqe->flags);
2700 cqe->status = hwcqe->status;
2701 cqe->qp_handle = (u64)(unsigned long)qp;
2702 /*FIXME: Endianness fix needed for smace */
2703 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2704 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2705 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2706 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2707 ((le32_to_cpu(
2708 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2709 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2710
2711 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2712 srq = qp->srq;
2713 if (!srq)
2714 return -EINVAL;
2715
2716 if (wr_id_idx >= srq->hwq.max_elements) {
2717 dev_err(&cq->hwq.pdev->dev,
2718 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2719 wr_id_idx, srq->hwq.max_elements);
2720 return -EINVAL;
2721 }
2722 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2723 bnxt_qplib_release_srqe(srq, wr_id_idx);
2724 cqe++;
2725 (*budget)--;
2726 *pcqe = cqe;
2727 } else {
2728 struct bnxt_qplib_swq *swq;
2729
2730 rq = &qp->rq;
2731 if (wr_id_idx > (rq->max_wqe - 1)) {
2732 dev_err(&cq->hwq.pdev->dev,
2733 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2734 wr_id_idx, rq->max_wqe);
2735 return -EINVAL;
2736 }
2737
2738 if (rq->swq_last != wr_id_idx)
2739 return -EINVAL;
2740 swq = &rq->swq[rq->swq_last];
2741 cqe->wr_id = swq->wr_id;
2742 cqe++;
2743 (*budget)--;
2744 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2745 swq->slots, &rq->dbinfo.flags);
2746 rq->swq_last = swq->next_idx;
2747 *pcqe = cqe;
2748
2749 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2750 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2751 /* Add qp to flush list of the CQ */
2752 bnxt_qplib_add_flush_qp(qp);
2753 }
2754 }
2755
2756 return 0;
2757 }
2758
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2759 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2760 {
2761 struct cq_base *hw_cqe;
2762 bool rc = true;
2763
2764 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2765 /* Check for Valid bit. If the CQE is valid, return false */
2766 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2767 return rc;
2768 }
2769
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2770 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2771 struct cq_res_raweth_qp1 *hwcqe,
2772 struct bnxt_qplib_cqe **pcqe,
2773 int *budget)
2774 {
2775 struct bnxt_qplib_qp *qp;
2776 struct bnxt_qplib_q *rq;
2777 struct bnxt_qplib_srq *srq;
2778 struct bnxt_qplib_cqe *cqe;
2779 u32 wr_id_idx;
2780
2781 qp = (struct bnxt_qplib_qp *)((unsigned long)
2782 le64_to_cpu(hwcqe->qp_handle));
2783 if (!qp) {
2784 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2785 return -EINVAL;
2786 }
2787 if (qp->rq.flushed) {
2788 dev_dbg(&cq->hwq.pdev->dev,
2789 "%s: QP in Flush QP = %p\n", __func__, qp);
2790 return 0;
2791 }
2792 cqe = *pcqe;
2793 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2794 cqe->flags = le16_to_cpu(hwcqe->flags);
2795 cqe->qp_handle = (u64)(unsigned long)qp;
2796
2797 wr_id_idx =
2798 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2799 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2800 cqe->src_qp = qp->id;
2801 if (qp->id == 1 && !cqe->length) {
2802 /* Add workaround for the length misdetection */
2803 cqe->length = 296;
2804 } else {
2805 cqe->length = le16_to_cpu(hwcqe->length);
2806 }
2807 cqe->pkey_index = qp->pkey_index;
2808 memcpy(cqe->smac, qp->smac, 6);
2809
2810 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2811 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2812 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2813
2814 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2815 srq = qp->srq;
2816 if (!srq) {
2817 dev_err(&cq->hwq.pdev->dev,
2818 "FP: SRQ used but not defined??\n");
2819 return -EINVAL;
2820 }
2821 if (wr_id_idx >= srq->hwq.max_elements) {
2822 dev_err(&cq->hwq.pdev->dev,
2823 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2824 wr_id_idx, srq->hwq.max_elements);
2825 return -EINVAL;
2826 }
2827 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2828 bnxt_qplib_release_srqe(srq, wr_id_idx);
2829 cqe++;
2830 (*budget)--;
2831 *pcqe = cqe;
2832 } else {
2833 struct bnxt_qplib_swq *swq;
2834
2835 rq = &qp->rq;
2836 if (wr_id_idx > (rq->max_wqe - 1)) {
2837 dev_err(&cq->hwq.pdev->dev,
2838 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2839 wr_id_idx, rq->max_wqe);
2840 return -EINVAL;
2841 }
2842 if (rq->swq_last != wr_id_idx)
2843 return -EINVAL;
2844 swq = &rq->swq[rq->swq_last];
2845 cqe->wr_id = swq->wr_id;
2846 cqe++;
2847 (*budget)--;
2848 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2849 swq->slots, &rq->dbinfo.flags);
2850 rq->swq_last = swq->next_idx;
2851 *pcqe = cqe;
2852
2853 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2854 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2855 /* Add qp to flush list of the CQ */
2856 bnxt_qplib_add_flush_qp(qp);
2857 }
2858 }
2859
2860 return 0;
2861 }
2862
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2863 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2864 struct cq_terminal *hwcqe,
2865 struct bnxt_qplib_cqe **pcqe,
2866 int *budget)
2867 {
2868 struct bnxt_qplib_qp *qp;
2869 struct bnxt_qplib_q *sq, *rq;
2870 struct bnxt_qplib_cqe *cqe;
2871 u32 swq_last = 0, cqe_cons;
2872 int rc = 0;
2873
2874 /* Check the Status */
2875 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2876 dev_warn(&cq->hwq.pdev->dev,
2877 "FP: CQ Process Terminal Error status = 0x%x\n",
2878 hwcqe->status);
2879
2880 qp = (struct bnxt_qplib_qp *)((unsigned long)
2881 le64_to_cpu(hwcqe->qp_handle));
2882 if (!qp)
2883 return -EINVAL;
2884
2885 /* Must block new posting of SQ and RQ */
2886 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2887
2888 sq = &qp->sq;
2889 rq = &qp->rq;
2890
2891 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2892 if (cqe_cons == 0xFFFF)
2893 goto do_rq;
2894 cqe_cons %= sq->max_sw_wqe;
2895
2896 if (qp->sq.flushed) {
2897 dev_dbg(&cq->hwq.pdev->dev,
2898 "%s: QP in Flush QP = %p\n", __func__, qp);
2899 goto sq_done;
2900 }
2901
2902 /* Terminal CQE can also include aggregated successful CQEs prior.
2903 * So we must complete all CQEs from the current sq's cons to the
2904 * cq_cons with status OK
2905 */
2906 cqe = *pcqe;
2907 while (*budget) {
2908 swq_last = sq->swq_last;
2909 if (swq_last == cqe_cons)
2910 break;
2911 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2912 memset(cqe, 0, sizeof(*cqe));
2913 cqe->status = CQ_REQ_STATUS_OK;
2914 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2915 cqe->qp_handle = (u64)(unsigned long)qp;
2916 cqe->src_qp = qp->id;
2917 cqe->wr_id = sq->swq[swq_last].wr_id;
2918 cqe->type = sq->swq[swq_last].type;
2919 cqe++;
2920 (*budget)--;
2921 }
2922 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2923 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2924 sq->swq_last = sq->swq[swq_last].next_idx;
2925 }
2926 *pcqe = cqe;
2927 if (!(*budget) && swq_last != cqe_cons) {
2928 /* Out of budget */
2929 rc = -EAGAIN;
2930 goto sq_done;
2931 }
2932 sq_done:
2933 if (rc)
2934 return rc;
2935 do_rq:
2936 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2937 if (cqe_cons == 0xFFFF) {
2938 goto done;
2939 } else if (cqe_cons > rq->max_wqe - 1) {
2940 dev_err(&cq->hwq.pdev->dev,
2941 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2942 cqe_cons, rq->max_wqe);
2943 rc = -EINVAL;
2944 goto done;
2945 }
2946
2947 if (qp->rq.flushed) {
2948 dev_dbg(&cq->hwq.pdev->dev,
2949 "%s: QP in Flush QP = %p\n", __func__, qp);
2950 rc = 0;
2951 goto done;
2952 }
2953
2954 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2955 * from the current rq->cons to the rq->prod regardless what the
2956 * rq->cons the terminal CQE indicates
2957 */
2958
2959 /* Add qp to flush list of the CQ */
2960 bnxt_qplib_add_flush_qp(qp);
2961 done:
2962 return rc;
2963 }
2964
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2965 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2966 struct cq_cutoff *hwcqe)
2967 {
2968 /* Check the Status */
2969 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2970 dev_err(&cq->hwq.pdev->dev,
2971 "FP: CQ Process Cutoff Error status = 0x%x\n",
2972 hwcqe->status);
2973 return -EINVAL;
2974 }
2975 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2976 wake_up_interruptible(&cq->waitq);
2977
2978 return 0;
2979 }
2980
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)2981 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2982 struct bnxt_qplib_cqe *cqe,
2983 int num_cqes)
2984 {
2985 struct bnxt_qplib_qp *qp = NULL;
2986 u32 budget = num_cqes;
2987 unsigned long flags;
2988
2989 spin_lock_irqsave(&cq->flush_lock, flags);
2990 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2991 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2992 __flush_sq(&qp->sq, qp, &cqe, &budget);
2993 }
2994
2995 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2996 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2997 __flush_rq(&qp->rq, qp, &cqe, &budget);
2998 }
2999 spin_unlock_irqrestore(&cq->flush_lock, flags);
3000
3001 return num_cqes - budget;
3002 }
3003
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)3004 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3005 int num_cqes, struct bnxt_qplib_qp **lib_qp)
3006 {
3007 struct cq_base *hw_cqe;
3008 int budget, rc = 0;
3009 u32 hw_polled = 0;
3010 u8 type;
3011
3012 budget = num_cqes;
3013
3014 while (budget) {
3015 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3016
3017 /* Check for Valid bit */
3018 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3019 break;
3020
3021 /*
3022 * The valid test of the entry must be done first before
3023 * reading any further.
3024 */
3025 dma_rmb();
3026 /* From the device's respective CQE format to qplib_wc*/
3027 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3028 switch (type) {
3029 case CQ_BASE_CQE_TYPE_REQ:
3030 rc = bnxt_qplib_cq_process_req(cq,
3031 (struct cq_req *)hw_cqe,
3032 &cqe, &budget,
3033 cq->hwq.cons, lib_qp);
3034 break;
3035 case CQ_BASE_CQE_TYPE_RES_RC:
3036 rc = bnxt_qplib_cq_process_res_rc(cq,
3037 (struct cq_res_rc *)
3038 hw_cqe, &cqe,
3039 &budget);
3040 break;
3041 case CQ_BASE_CQE_TYPE_RES_UD:
3042 rc = bnxt_qplib_cq_process_res_ud
3043 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3044 &budget);
3045 break;
3046 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3047 rc = bnxt_qplib_cq_process_res_raweth_qp1
3048 (cq, (struct cq_res_raweth_qp1 *)
3049 hw_cqe, &cqe, &budget);
3050 break;
3051 case CQ_BASE_CQE_TYPE_TERMINAL:
3052 rc = bnxt_qplib_cq_process_terminal
3053 (cq, (struct cq_terminal *)hw_cqe,
3054 &cqe, &budget);
3055 break;
3056 case CQ_BASE_CQE_TYPE_CUT_OFF:
3057 bnxt_qplib_cq_process_cutoff
3058 (cq, (struct cq_cutoff *)hw_cqe);
3059 /* Done processing this CQ */
3060 goto exit;
3061 default:
3062 dev_err(&cq->hwq.pdev->dev,
3063 "process_cq unknown type 0x%lx\n",
3064 hw_cqe->cqe_type_toggle &
3065 CQ_BASE_CQE_TYPE_MASK);
3066 rc = -EINVAL;
3067 break;
3068 }
3069 if (rc < 0) {
3070 if (rc == -EAGAIN)
3071 break;
3072 /* Error while processing the CQE, just skip to the
3073 * next one
3074 */
3075 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3076 dev_err(&cq->hwq.pdev->dev,
3077 "process_cqe error rc = 0x%x\n", rc);
3078 }
3079 hw_polled++;
3080 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3081 1, &cq->dbinfo.flags);
3082
3083 }
3084 if (hw_polled)
3085 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3086 exit:
3087 return num_cqes - budget;
3088 }
3089
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3090 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3091 {
3092 if (arm_type)
3093 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3094 /* Using cq->arm_state variable to track whether to issue cq handler */
3095 atomic_set(&cq->arm_state, 1);
3096 }
3097
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3098 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3099 {
3100 flush_workqueue(qp->scq->nq->cqn_wq);
3101 if (qp->scq != qp->rcq)
3102 flush_workqueue(qp->rcq->nq->cqn_wq);
3103 }
3104