1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
52 
53 #include "bnxt_ulp.h"
54 
55 #include "roce_hsi.h"
56 #include "qplib_res.h"
57 #include "qplib_sp.h"
58 #include "qplib_fp.h"
59 #include "qplib_rcfw.h"
60 
61 #include "bnxt_re.h"
62 #include "ib_verbs.h"
63 #include <rdma/bnxt_re-abi.h>
64 
65 static int __from_ib_access_flags(int iflags)
66 {
67 	int qflags = 0;
68 
69 	if (iflags & IB_ACCESS_LOCAL_WRITE)
70 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 	if (iflags & IB_ACCESS_REMOTE_READ)
72 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 	if (iflags & IB_ACCESS_REMOTE_WRITE)
74 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 	if (iflags & IB_ACCESS_MW_BIND)
78 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 	if (iflags & IB_ZERO_BASED)
80 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 	if (iflags & IB_ACCESS_ON_DEMAND)
82 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 	return qflags;
84 };
85 
86 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 {
88 	enum ib_access_flags iflags = 0;
89 
90 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 		iflags |= IB_ACCESS_LOCAL_WRITE;
92 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 		iflags |= IB_ACCESS_REMOTE_WRITE;
94 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 		iflags |= IB_ACCESS_REMOTE_READ;
96 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 		iflags |= IB_ACCESS_MW_BIND;
100 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 		iflags |= IB_ZERO_BASED;
102 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 		iflags |= IB_ACCESS_ON_DEMAND;
104 	return iflags;
105 };
106 
107 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 			     struct bnxt_qplib_sge *sg_list, int num)
109 {
110 	int i, total = 0;
111 
112 	for (i = 0; i < num; i++) {
113 		sg_list[i].addr = ib_sg_list[i].addr;
114 		sg_list[i].lkey = ib_sg_list[i].lkey;
115 		sg_list[i].size = ib_sg_list[i].length;
116 		total += sg_list[i].size;
117 	}
118 	return total;
119 }
120 
121 /* Device */
122 int bnxt_re_query_device(struct ib_device *ibdev,
123 			 struct ib_device_attr *ib_attr,
124 			 struct ib_udata *udata)
125 {
126 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128 
129 	memset(ib_attr, 0, sizeof(*ib_attr));
130 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 	       min(sizeof(dev_attr->fw_ver),
132 		   sizeof(ib_attr->fw_ver)));
133 	bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 			    (u8 *)&ib_attr->sys_image_guid);
135 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137 
138 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 	ib_attr->max_qp = dev_attr->max_qp;
142 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 	ib_attr->device_cap_flags =
144 				    IB_DEVICE_CURR_QP_STATE_MOD
145 				    | IB_DEVICE_RC_RNR_NAK_GEN
146 				    | IB_DEVICE_SHUTDOWN_PORT
147 				    | IB_DEVICE_SYS_IMAGE_GUID
148 				    | IB_DEVICE_LOCAL_DMA_LKEY
149 				    | IB_DEVICE_RESIZE_MAX_WR
150 				    | IB_DEVICE_PORT_ACTIVE_EVENT
151 				    | IB_DEVICE_N_NOTIFY_CQ
152 				    | IB_DEVICE_MEM_WINDOW
153 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 	ib_attr->max_cq = dev_attr->max_cq;
159 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 	ib_attr->max_mr = dev_attr->max_mr;
161 	ib_attr->max_pd = dev_attr->max_pd;
162 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 	ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166 
167 	ib_attr->max_ee_rd_atom = 0;
168 	ib_attr->max_res_rd_atom = 0;
169 	ib_attr->max_ee_init_rd_atom = 0;
170 	ib_attr->max_ee = 0;
171 	ib_attr->max_rdd = 0;
172 	ib_attr->max_mw = dev_attr->max_mw;
173 	ib_attr->max_raw_ipv6_qp = 0;
174 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
175 	ib_attr->max_mcast_grp = 0;
176 	ib_attr->max_mcast_qp_attach = 0;
177 	ib_attr->max_total_mcast_qp_attach = 0;
178 	ib_attr->max_ah = dev_attr->max_ah;
179 
180 	ib_attr->max_srq = dev_attr->max_srq;
181 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
182 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
183 
184 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
185 
186 	ib_attr->max_pkeys = 1;
187 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
188 	return 0;
189 }
190 
191 /* Port */
192 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
193 		       struct ib_port_attr *port_attr)
194 {
195 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
196 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
197 
198 	memset(port_attr, 0, sizeof(*port_attr));
199 
200 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
201 		port_attr->state = IB_PORT_ACTIVE;
202 		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
203 	} else {
204 		port_attr->state = IB_PORT_DOWN;
205 		port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
206 	}
207 	port_attr->max_mtu = IB_MTU_4096;
208 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
209 	port_attr->gid_tbl_len = dev_attr->max_sgid;
210 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
211 				    IB_PORT_DEVICE_MGMT_SUP |
212 				    IB_PORT_VENDOR_CLASS_SUP;
213 	port_attr->ip_gids = true;
214 
215 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
216 	port_attr->bad_pkey_cntr = 0;
217 	port_attr->qkey_viol_cntr = 0;
218 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
219 	port_attr->lid = 0;
220 	port_attr->sm_lid = 0;
221 	port_attr->lmc = 0;
222 	port_attr->max_vl_num = 4;
223 	port_attr->sm_sl = 0;
224 	port_attr->subnet_timeout = 0;
225 	port_attr->init_type_reply = 0;
226 	port_attr->active_speed = rdev->active_speed;
227 	port_attr->active_width = rdev->active_width;
228 
229 	return 0;
230 }
231 
232 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
233 			       struct ib_port_immutable *immutable)
234 {
235 	struct ib_port_attr port_attr;
236 
237 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
238 		return -EINVAL;
239 
240 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
241 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
242 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
243 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
244 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
245 	return 0;
246 }
247 
248 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
249 {
250 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
251 
252 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
253 		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
254 		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
255 }
256 
257 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
258 		       u16 index, u16 *pkey)
259 {
260 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
261 
262 	/* Ignore port_num */
263 
264 	memset(pkey, 0, sizeof(*pkey));
265 	return bnxt_qplib_get_pkey(&rdev->qplib_res,
266 				   &rdev->qplib_res.pkey_tbl, index, pkey);
267 }
268 
269 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
270 		      int index, union ib_gid *gid)
271 {
272 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
273 	int rc = 0;
274 
275 	/* Ignore port_num */
276 	memset(gid, 0, sizeof(*gid));
277 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
278 				 &rdev->qplib_res.sgid_tbl, index,
279 				 (struct bnxt_qplib_gid *)gid);
280 	return rc;
281 }
282 
283 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
284 {
285 	int rc = 0;
286 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
287 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
288 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
289 	struct bnxt_qplib_gid *gid_to_del;
290 	u16 vlan_id = 0xFFFF;
291 
292 	/* Delete the entry from the hardware */
293 	ctx = *context;
294 	if (!ctx)
295 		return -EINVAL;
296 
297 	if (sgid_tbl && sgid_tbl->active) {
298 		if (ctx->idx >= sgid_tbl->max)
299 			return -EINVAL;
300 		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
301 		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
302 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
303 		 * or via the ib_unregister_device path. In the former case QP1
304 		 * may not be destroyed yet, in which case just return as FW
305 		 * needs that entry to be present and will fail it's deletion.
306 		 * We could get invoked again after QP1 is destroyed OR get an
307 		 * ADD_GID call with a different GID value for the same index
308 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
309 		 */
310 		if (ctx->idx == 0 &&
311 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
312 		    ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
313 			ibdev_dbg(&rdev->ibdev,
314 				  "Trying to delete GID0 while QP1 is alive\n");
315 			return -EFAULT;
316 		}
317 		ctx->refcnt--;
318 		if (!ctx->refcnt) {
319 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
320 						 vlan_id,  true);
321 			if (rc) {
322 				ibdev_err(&rdev->ibdev,
323 					  "Failed to remove GID: %#x", rc);
324 			} else {
325 				ctx_tbl = sgid_tbl->ctx;
326 				ctx_tbl[ctx->idx] = NULL;
327 				kfree(ctx);
328 			}
329 		}
330 	} else {
331 		return -EINVAL;
332 	}
333 	return rc;
334 }
335 
336 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
337 {
338 	int rc;
339 	u32 tbl_idx = 0;
340 	u16 vlan_id = 0xFFFF;
341 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
342 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
343 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
344 
345 	rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
346 	if (rc)
347 		return rc;
348 
349 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
350 				 rdev->qplib_res.netdev->dev_addr,
351 				 vlan_id, true, &tbl_idx);
352 	if (rc == -EALREADY) {
353 		ctx_tbl = sgid_tbl->ctx;
354 		ctx_tbl[tbl_idx]->refcnt++;
355 		*context = ctx_tbl[tbl_idx];
356 		return 0;
357 	}
358 
359 	if (rc < 0) {
360 		ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
361 		return rc;
362 	}
363 
364 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
365 	if (!ctx)
366 		return -ENOMEM;
367 	ctx_tbl = sgid_tbl->ctx;
368 	ctx->idx = tbl_idx;
369 	ctx->refcnt = 1;
370 	ctx_tbl[tbl_idx] = ctx;
371 	*context = ctx;
372 
373 	return rc;
374 }
375 
376 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
377 					    u8 port_num)
378 {
379 	return IB_LINK_LAYER_ETHERNET;
380 }
381 
382 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
383 
384 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
385 {
386 	struct bnxt_re_fence_data *fence = &pd->fence;
387 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
388 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
389 
390 	memset(wqe, 0, sizeof(*wqe));
391 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
392 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
393 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
394 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
395 	wqe->bind.zero_based = false;
396 	wqe->bind.parent_l_key = ib_mr->lkey;
397 	wqe->bind.va = (u64)(unsigned long)fence->va;
398 	wqe->bind.length = fence->size;
399 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
400 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
401 
402 	/* Save the initial rkey in fence structure for now;
403 	 * wqe->bind.r_key will be set at (re)bind time.
404 	 */
405 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
406 }
407 
408 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
409 {
410 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
411 					     qplib_qp);
412 	struct ib_pd *ib_pd = qp->ib_qp.pd;
413 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
414 	struct bnxt_re_fence_data *fence = &pd->fence;
415 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
416 	struct bnxt_qplib_swqe wqe;
417 	int rc;
418 
419 	memcpy(&wqe, fence_wqe, sizeof(wqe));
420 	wqe.bind.r_key = fence->bind_rkey;
421 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
422 
423 	ibdev_dbg(&qp->rdev->ibdev,
424 		  "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
425 		wqe.bind.r_key, qp->qplib_qp.id, pd);
426 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
427 	if (rc) {
428 		ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
429 		return rc;
430 	}
431 	bnxt_qplib_post_send_db(&qp->qplib_qp);
432 
433 	return rc;
434 }
435 
436 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
437 {
438 	struct bnxt_re_fence_data *fence = &pd->fence;
439 	struct bnxt_re_dev *rdev = pd->rdev;
440 	struct device *dev = &rdev->en_dev->pdev->dev;
441 	struct bnxt_re_mr *mr = fence->mr;
442 
443 	if (fence->mw) {
444 		bnxt_re_dealloc_mw(fence->mw);
445 		fence->mw = NULL;
446 	}
447 	if (mr) {
448 		if (mr->ib_mr.rkey)
449 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
450 					     true);
451 		if (mr->ib_mr.lkey)
452 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
453 		kfree(mr);
454 		fence->mr = NULL;
455 	}
456 	if (fence->dma_addr) {
457 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
458 				 DMA_BIDIRECTIONAL);
459 		fence->dma_addr = 0;
460 	}
461 }
462 
463 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
464 {
465 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
466 	struct bnxt_re_fence_data *fence = &pd->fence;
467 	struct bnxt_re_dev *rdev = pd->rdev;
468 	struct device *dev = &rdev->en_dev->pdev->dev;
469 	struct bnxt_re_mr *mr = NULL;
470 	dma_addr_t dma_addr = 0;
471 	struct ib_mw *mw;
472 	u64 pbl_tbl;
473 	int rc;
474 
475 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
476 				  DMA_BIDIRECTIONAL);
477 	rc = dma_mapping_error(dev, dma_addr);
478 	if (rc) {
479 		ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
480 		rc = -EIO;
481 		fence->dma_addr = 0;
482 		goto fail;
483 	}
484 	fence->dma_addr = dma_addr;
485 
486 	/* Allocate a MR */
487 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
488 	if (!mr) {
489 		rc = -ENOMEM;
490 		goto fail;
491 	}
492 	fence->mr = mr;
493 	mr->rdev = rdev;
494 	mr->qplib_mr.pd = &pd->qplib_pd;
495 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
496 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
497 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
498 	if (rc) {
499 		ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
500 		goto fail;
501 	}
502 
503 	/* Register MR */
504 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
505 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
506 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
507 	pbl_tbl = dma_addr;
508 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
509 			       BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
510 	if (rc) {
511 		ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
512 		goto fail;
513 	}
514 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
515 
516 	/* Create a fence MW only for kernel consumers */
517 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
518 	if (IS_ERR(mw)) {
519 		ibdev_err(&rdev->ibdev,
520 			  "Failed to create fence-MW for PD: %p\n", pd);
521 		rc = PTR_ERR(mw);
522 		goto fail;
523 	}
524 	fence->mw = mw;
525 
526 	bnxt_re_create_fence_wqe(pd);
527 	return 0;
528 
529 fail:
530 	bnxt_re_destroy_fence_mr(pd);
531 	return rc;
532 }
533 
534 /* Protection Domains */
535 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
536 {
537 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
538 	struct bnxt_re_dev *rdev = pd->rdev;
539 
540 	bnxt_re_destroy_fence_mr(pd);
541 
542 	if (pd->qplib_pd.id)
543 		bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
544 				      &pd->qplib_pd);
545 	return 0;
546 }
547 
548 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
549 {
550 	struct ib_device *ibdev = ibpd->device;
551 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
552 	struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
553 		udata, struct bnxt_re_ucontext, ib_uctx);
554 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
555 	int rc;
556 
557 	pd->rdev = rdev;
558 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
559 		ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
560 		rc = -ENOMEM;
561 		goto fail;
562 	}
563 
564 	if (udata) {
565 		struct bnxt_re_pd_resp resp;
566 
567 		if (!ucntx->dpi.dbr) {
568 			/* Allocate DPI in alloc_pd to avoid failing of
569 			 * ibv_devinfo and family of application when DPIs
570 			 * are depleted.
571 			 */
572 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
573 						 &ucntx->dpi, ucntx)) {
574 				rc = -ENOMEM;
575 				goto dbfail;
576 			}
577 		}
578 
579 		resp.pdid = pd->qplib_pd.id;
580 		/* Still allow mapping this DBR to the new user PD. */
581 		resp.dpi = ucntx->dpi.dpi;
582 		resp.dbr = (u64)ucntx->dpi.umdbr;
583 
584 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
585 		if (rc) {
586 			ibdev_err(&rdev->ibdev,
587 				  "Failed to copy user response\n");
588 			goto dbfail;
589 		}
590 	}
591 
592 	if (!udata)
593 		if (bnxt_re_create_fence_mr(pd))
594 			ibdev_warn(&rdev->ibdev,
595 				   "Failed to create Fence-MR\n");
596 	return 0;
597 dbfail:
598 	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
599 			      &pd->qplib_pd);
600 fail:
601 	return rc;
602 }
603 
604 /* Address Handles */
605 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
606 {
607 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
608 	struct bnxt_re_dev *rdev = ah->rdev;
609 
610 	bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
611 			      !(flags & RDMA_DESTROY_AH_SLEEPABLE));
612 	return 0;
613 }
614 
615 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
616 {
617 	u8 nw_type;
618 
619 	switch (ntype) {
620 	case RDMA_NETWORK_IPV4:
621 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
622 		break;
623 	case RDMA_NETWORK_IPV6:
624 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
625 		break;
626 	default:
627 		nw_type = CMDQ_CREATE_AH_TYPE_V1;
628 		break;
629 	}
630 	return nw_type;
631 }
632 
633 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
634 		      struct ib_udata *udata)
635 {
636 	struct ib_pd *ib_pd = ib_ah->pd;
637 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
638 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
639 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
640 	struct bnxt_re_dev *rdev = pd->rdev;
641 	const struct ib_gid_attr *sgid_attr;
642 	struct bnxt_re_gid_ctx *ctx;
643 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
644 	u8 nw_type;
645 	int rc;
646 
647 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
648 		ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
649 		return -EINVAL;
650 	}
651 
652 	ah->rdev = rdev;
653 	ah->qplib_ah.pd = &pd->qplib_pd;
654 
655 	/* Supply the configuration for the HW */
656 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
657 	       sizeof(union ib_gid));
658 	sgid_attr = grh->sgid_attr;
659 	/* Get the HW context of the GID. The reference
660 	 * of GID table entry is already taken by the caller.
661 	 */
662 	ctx = rdma_read_gid_hw_context(sgid_attr);
663 	ah->qplib_ah.sgid_index = ctx->idx;
664 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
665 	ah->qplib_ah.traffic_class = grh->traffic_class;
666 	ah->qplib_ah.flow_label = grh->flow_label;
667 	ah->qplib_ah.hop_limit = grh->hop_limit;
668 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
669 
670 	/* Get network header type for this GID */
671 	nw_type = rdma_gid_attr_network_type(sgid_attr);
672 	ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
673 
674 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
675 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
676 				  !(init_attr->flags &
677 				    RDMA_CREATE_AH_SLEEPABLE));
678 	if (rc) {
679 		ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
680 		return rc;
681 	}
682 
683 	/* Write AVID to shared page. */
684 	if (udata) {
685 		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
686 			udata, struct bnxt_re_ucontext, ib_uctx);
687 		unsigned long flag;
688 		u32 *wrptr;
689 
690 		spin_lock_irqsave(&uctx->sh_lock, flag);
691 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
692 		*wrptr = ah->qplib_ah.id;
693 		wmb(); /* make sure cache is updated. */
694 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
695 	}
696 
697 	return 0;
698 }
699 
700 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
701 {
702 	return 0;
703 }
704 
705 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
706 {
707 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
708 
709 	ah_attr->type = ib_ah->type;
710 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
711 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
712 	rdma_ah_set_grh(ah_attr, NULL, 0,
713 			ah->qplib_ah.host_sgid_index,
714 			0, ah->qplib_ah.traffic_class);
715 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
716 	rdma_ah_set_port_num(ah_attr, 1);
717 	rdma_ah_set_static_rate(ah_attr, 0);
718 	return 0;
719 }
720 
721 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
722 	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
723 {
724 	unsigned long flags;
725 
726 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
727 	if (qp->rcq != qp->scq)
728 		spin_lock(&qp->rcq->cq_lock);
729 	else
730 		__acquire(&qp->rcq->cq_lock);
731 
732 	return flags;
733 }
734 
735 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
736 			unsigned long flags)
737 	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
738 {
739 	if (qp->rcq != qp->scq)
740 		spin_unlock(&qp->rcq->cq_lock);
741 	else
742 		__release(&qp->rcq->cq_lock);
743 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
744 }
745 
746 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
747 {
748 	struct bnxt_re_qp *gsi_sqp;
749 	struct bnxt_re_ah *gsi_sah;
750 	struct bnxt_re_dev *rdev;
751 	int rc = 0;
752 
753 	rdev = qp->rdev;
754 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
755 	gsi_sah = rdev->gsi_ctx.gsi_sah;
756 
757 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
758 	bnxt_qplib_destroy_ah(&rdev->qplib_res,
759 			      &gsi_sah->qplib_ah,
760 			      true);
761 	bnxt_qplib_clean_qp(&qp->qplib_qp);
762 
763 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
764 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
765 	if (rc) {
766 		ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
767 		goto fail;
768 	}
769 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
770 
771 	/* remove from active qp list */
772 	mutex_lock(&rdev->qp_lock);
773 	list_del(&gsi_sqp->list);
774 	mutex_unlock(&rdev->qp_lock);
775 	atomic_dec(&rdev->qp_count);
776 
777 	kfree(rdev->gsi_ctx.sqp_tbl);
778 	kfree(gsi_sah);
779 	kfree(gsi_sqp);
780 	rdev->gsi_ctx.gsi_sqp = NULL;
781 	rdev->gsi_ctx.gsi_sah = NULL;
782 	rdev->gsi_ctx.sqp_tbl = NULL;
783 
784 	return 0;
785 fail:
786 	return rc;
787 }
788 
789 /* Queue Pairs */
790 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
791 {
792 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
793 	struct bnxt_re_dev *rdev = qp->rdev;
794 	unsigned int flags;
795 	int rc;
796 
797 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
798 
799 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
800 	if (rc) {
801 		ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
802 		return rc;
803 	}
804 
805 	if (rdma_is_kernel_res(&qp->ib_qp.res)) {
806 		flags = bnxt_re_lock_cqs(qp);
807 		bnxt_qplib_clean_qp(&qp->qplib_qp);
808 		bnxt_re_unlock_cqs(qp, flags);
809 	}
810 
811 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
812 
813 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
814 		rc = bnxt_re_destroy_gsi_sqp(qp);
815 		if (rc)
816 			goto sh_fail;
817 	}
818 
819 	mutex_lock(&rdev->qp_lock);
820 	list_del(&qp->list);
821 	mutex_unlock(&rdev->qp_lock);
822 	atomic_dec(&rdev->qp_count);
823 
824 	ib_umem_release(qp->rumem);
825 	ib_umem_release(qp->sumem);
826 
827 	kfree(qp);
828 	return 0;
829 sh_fail:
830 	return rc;
831 }
832 
833 static u8 __from_ib_qp_type(enum ib_qp_type type)
834 {
835 	switch (type) {
836 	case IB_QPT_GSI:
837 		return CMDQ_CREATE_QP1_TYPE_GSI;
838 	case IB_QPT_RC:
839 		return CMDQ_CREATE_QP_TYPE_RC;
840 	case IB_QPT_UD:
841 		return CMDQ_CREATE_QP_TYPE_UD;
842 	default:
843 		return IB_QPT_MAX;
844 	}
845 }
846 
847 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
848 				   int rsge, int max)
849 {
850 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
851 		rsge = max;
852 	return bnxt_re_get_rwqe_size(rsge);
853 }
854 
855 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
856 {
857 	u16 wqe_size, calc_ils;
858 
859 	wqe_size = bnxt_re_get_swqe_size(nsge);
860 	if (ilsize) {
861 		calc_ils = sizeof(struct sq_send_hdr) + ilsize;
862 		wqe_size = max_t(u16, calc_ils, wqe_size);
863 		wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
864 	}
865 	return wqe_size;
866 }
867 
868 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
869 				   struct ib_qp_init_attr *init_attr)
870 {
871 	struct bnxt_qplib_dev_attr *dev_attr;
872 	struct bnxt_qplib_qp *qplqp;
873 	struct bnxt_re_dev *rdev;
874 	struct bnxt_qplib_q *sq;
875 	int align, ilsize;
876 
877 	rdev = qp->rdev;
878 	qplqp = &qp->qplib_qp;
879 	sq = &qplqp->sq;
880 	dev_attr = &rdev->dev_attr;
881 
882 	align = sizeof(struct sq_send_hdr);
883 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
884 
885 	sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
886 	if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
887 		return -EINVAL;
888 	/* For gen p4 and gen p5 backward compatibility mode
889 	 * wqe size is fixed to 128 bytes
890 	 */
891 	if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
892 			qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
893 		sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
894 
895 	if (init_attr->cap.max_inline_data) {
896 		qplqp->max_inline_data = sq->wqe_size -
897 			sizeof(struct sq_send_hdr);
898 		init_attr->cap.max_inline_data = qplqp->max_inline_data;
899 		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
900 			sq->max_sge = qplqp->max_inline_data /
901 				sizeof(struct sq_sge);
902 	}
903 
904 	return 0;
905 }
906 
907 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
908 				struct bnxt_re_qp *qp, struct ib_udata *udata)
909 {
910 	struct bnxt_qplib_qp *qplib_qp;
911 	struct bnxt_re_ucontext *cntx;
912 	struct bnxt_re_qp_req ureq;
913 	int bytes = 0, psn_sz;
914 	struct ib_umem *umem;
915 	int psn_nume;
916 
917 	qplib_qp = &qp->qplib_qp;
918 	cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
919 					 ib_uctx);
920 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
921 		return -EFAULT;
922 
923 	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
924 	/* Consider mapping PSN search memory only for RC QPs. */
925 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
926 		psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
927 						   sizeof(struct sq_psn_search_ext) :
928 						   sizeof(struct sq_psn_search);
929 		psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
930 			    qplib_qp->sq.max_wqe :
931 			    ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
932 			      sizeof(struct bnxt_qplib_sge));
933 		bytes += (psn_nume * psn_sz);
934 	}
935 
936 	bytes = PAGE_ALIGN(bytes);
937 	umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
938 			   IB_ACCESS_LOCAL_WRITE);
939 	if (IS_ERR(umem))
940 		return PTR_ERR(umem);
941 
942 	qp->sumem = umem;
943 	qplib_qp->sq.sg_info.umem = umem;
944 	qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
945 	qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
946 	qplib_qp->qp_handle = ureq.qp_handle;
947 
948 	if (!qp->qplib_qp.srq) {
949 		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
950 		bytes = PAGE_ALIGN(bytes);
951 		umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
952 				   IB_ACCESS_LOCAL_WRITE);
953 		if (IS_ERR(umem))
954 			goto rqfail;
955 		qp->rumem = umem;
956 		qplib_qp->rq.sg_info.umem = umem;
957 		qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
958 		qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
959 	}
960 
961 	qplib_qp->dpi = &cntx->dpi;
962 	return 0;
963 rqfail:
964 	ib_umem_release(qp->sumem);
965 	qp->sumem = NULL;
966 	memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
967 
968 	return PTR_ERR(umem);
969 }
970 
971 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
972 				(struct bnxt_re_pd *pd,
973 				 struct bnxt_qplib_res *qp1_res,
974 				 struct bnxt_qplib_qp *qp1_qp)
975 {
976 	struct bnxt_re_dev *rdev = pd->rdev;
977 	struct bnxt_re_ah *ah;
978 	union ib_gid sgid;
979 	int rc;
980 
981 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
982 	if (!ah)
983 		return NULL;
984 
985 	ah->rdev = rdev;
986 	ah->qplib_ah.pd = &pd->qplib_pd;
987 
988 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
989 	if (rc)
990 		goto fail;
991 
992 	/* supply the dgid data same as sgid */
993 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
994 	       sizeof(union ib_gid));
995 	ah->qplib_ah.sgid_index = 0;
996 
997 	ah->qplib_ah.traffic_class = 0;
998 	ah->qplib_ah.flow_label = 0;
999 	ah->qplib_ah.hop_limit = 1;
1000 	ah->qplib_ah.sl = 0;
1001 	/* Have DMAC same as SMAC */
1002 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1003 
1004 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1005 	if (rc) {
1006 		ibdev_err(&rdev->ibdev,
1007 			  "Failed to allocate HW AH for Shadow QP");
1008 		goto fail;
1009 	}
1010 
1011 	return ah;
1012 
1013 fail:
1014 	kfree(ah);
1015 	return NULL;
1016 }
1017 
1018 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1019 				(struct bnxt_re_pd *pd,
1020 				 struct bnxt_qplib_res *qp1_res,
1021 				 struct bnxt_qplib_qp *qp1_qp)
1022 {
1023 	struct bnxt_re_dev *rdev = pd->rdev;
1024 	struct bnxt_re_qp *qp;
1025 	int rc;
1026 
1027 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1028 	if (!qp)
1029 		return NULL;
1030 
1031 	qp->rdev = rdev;
1032 
1033 	/* Initialize the shadow QP structure from the QP1 values */
1034 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1035 
1036 	qp->qplib_qp.pd = &pd->qplib_pd;
1037 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1038 	qp->qplib_qp.type = IB_QPT_UD;
1039 
1040 	qp->qplib_qp.max_inline_data = 0;
1041 	qp->qplib_qp.sig_type = true;
1042 
1043 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1044 	qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1045 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1046 	qp->qplib_qp.sq.max_sge = 2;
1047 	/* Q full delta can be 1 since it is internal QP */
1048 	qp->qplib_qp.sq.q_full_delta = 1;
1049 	qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1050 	qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1051 
1052 	qp->qplib_qp.scq = qp1_qp->scq;
1053 	qp->qplib_qp.rcq = qp1_qp->rcq;
1054 
1055 	qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1056 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1057 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1058 	/* Q full delta can be 1 since it is internal QP */
1059 	qp->qplib_qp.rq.q_full_delta = 1;
1060 	qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1061 	qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1062 
1063 	qp->qplib_qp.mtu = qp1_qp->mtu;
1064 
1065 	qp->qplib_qp.sq_hdr_buf_size = 0;
1066 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1067 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1068 
1069 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1070 	if (rc)
1071 		goto fail;
1072 
1073 	spin_lock_init(&qp->sq_lock);
1074 	INIT_LIST_HEAD(&qp->list);
1075 	mutex_lock(&rdev->qp_lock);
1076 	list_add_tail(&qp->list, &rdev->qp_list);
1077 	atomic_inc(&rdev->qp_count);
1078 	mutex_unlock(&rdev->qp_lock);
1079 	return qp;
1080 fail:
1081 	kfree(qp);
1082 	return NULL;
1083 }
1084 
1085 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1086 				struct ib_qp_init_attr *init_attr)
1087 {
1088 	struct bnxt_qplib_dev_attr *dev_attr;
1089 	struct bnxt_qplib_qp *qplqp;
1090 	struct bnxt_re_dev *rdev;
1091 	struct bnxt_qplib_q *rq;
1092 	int entries;
1093 
1094 	rdev = qp->rdev;
1095 	qplqp = &qp->qplib_qp;
1096 	rq = &qplqp->rq;
1097 	dev_attr = &rdev->dev_attr;
1098 
1099 	if (init_attr->srq) {
1100 		struct bnxt_re_srq *srq;
1101 
1102 		srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1103 		if (!srq) {
1104 			ibdev_err(&rdev->ibdev, "SRQ not found");
1105 			return -EINVAL;
1106 		}
1107 		qplqp->srq = &srq->qplib_srq;
1108 		rq->max_wqe = 0;
1109 	} else {
1110 		rq->max_sge = init_attr->cap.max_recv_sge;
1111 		if (rq->max_sge > dev_attr->max_qp_sges)
1112 			rq->max_sge = dev_attr->max_qp_sges;
1113 		init_attr->cap.max_recv_sge = rq->max_sge;
1114 		rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1115 						       dev_attr->max_qp_sges);
1116 		/* Allocate 1 more than what's provided so posting max doesn't
1117 		 * mean empty.
1118 		 */
1119 		entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1120 		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1121 		rq->q_full_delta = 0;
1122 		rq->sg_info.pgsize = PAGE_SIZE;
1123 		rq->sg_info.pgshft = PAGE_SHIFT;
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1130 {
1131 	struct bnxt_qplib_dev_attr *dev_attr;
1132 	struct bnxt_qplib_qp *qplqp;
1133 	struct bnxt_re_dev *rdev;
1134 
1135 	rdev = qp->rdev;
1136 	qplqp = &qp->qplib_qp;
1137 	dev_attr = &rdev->dev_attr;
1138 
1139 	if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1140 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1141 		if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1142 			qplqp->rq.max_sge = dev_attr->max_qp_sges;
1143 		qplqp->rq.max_sge = 6;
1144 	}
1145 }
1146 
1147 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1148 				struct ib_qp_init_attr *init_attr,
1149 				struct ib_udata *udata)
1150 {
1151 	struct bnxt_qplib_dev_attr *dev_attr;
1152 	struct bnxt_qplib_qp *qplqp;
1153 	struct bnxt_re_dev *rdev;
1154 	struct bnxt_qplib_q *sq;
1155 	int entries;
1156 	int diff;
1157 	int rc;
1158 
1159 	rdev = qp->rdev;
1160 	qplqp = &qp->qplib_qp;
1161 	sq = &qplqp->sq;
1162 	dev_attr = &rdev->dev_attr;
1163 
1164 	sq->max_sge = init_attr->cap.max_send_sge;
1165 	if (sq->max_sge > dev_attr->max_qp_sges) {
1166 		sq->max_sge = dev_attr->max_qp_sges;
1167 		init_attr->cap.max_send_sge = sq->max_sge;
1168 	}
1169 
1170 	rc = bnxt_re_setup_swqe_size(qp, init_attr);
1171 	if (rc)
1172 		return rc;
1173 
1174 	entries = init_attr->cap.max_send_wr;
1175 	/* Allocate 128 + 1 more than what's provided */
1176 	diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1177 		0 : BNXT_QPLIB_RESERVED_QP_WRS;
1178 	entries = roundup_pow_of_two(entries + diff + 1);
1179 	sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1180 	sq->q_full_delta = diff + 1;
1181 	/*
1182 	 * Reserving one slot for Phantom WQE. Application can
1183 	 * post one extra entry in this case. But allowing this to avoid
1184 	 * unexpected Queue full condition
1185 	 */
1186 	qplqp->sq.q_full_delta -= 1;
1187 	qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1188 	qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1189 
1190 	return 0;
1191 }
1192 
1193 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1194 				       struct ib_qp_init_attr *init_attr)
1195 {
1196 	struct bnxt_qplib_dev_attr *dev_attr;
1197 	struct bnxt_qplib_qp *qplqp;
1198 	struct bnxt_re_dev *rdev;
1199 	int entries;
1200 
1201 	rdev = qp->rdev;
1202 	qplqp = &qp->qplib_qp;
1203 	dev_attr = &rdev->dev_attr;
1204 
1205 	if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1206 		entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1207 		qplqp->sq.max_wqe = min_t(u32, entries,
1208 					  dev_attr->max_qp_wqes + 1);
1209 		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1210 			init_attr->cap.max_send_wr;
1211 		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1212 		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1213 			qplqp->sq.max_sge = dev_attr->max_qp_sges;
1214 	}
1215 }
1216 
1217 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1218 				struct ib_qp_init_attr *init_attr)
1219 {
1220 	struct bnxt_qplib_chip_ctx *chip_ctx;
1221 	int qptype;
1222 
1223 	chip_ctx = rdev->chip_ctx;
1224 
1225 	qptype = __from_ib_qp_type(init_attr->qp_type);
1226 	if (qptype == IB_QPT_MAX) {
1227 		ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1228 		qptype = -EOPNOTSUPP;
1229 		goto out;
1230 	}
1231 
1232 	if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1233 	    init_attr->qp_type == IB_QPT_GSI)
1234 		qptype = CMDQ_CREATE_QP_TYPE_GSI;
1235 out:
1236 	return qptype;
1237 }
1238 
1239 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1240 				struct ib_qp_init_attr *init_attr,
1241 				struct ib_udata *udata)
1242 {
1243 	struct bnxt_qplib_dev_attr *dev_attr;
1244 	struct bnxt_qplib_qp *qplqp;
1245 	struct bnxt_re_dev *rdev;
1246 	struct bnxt_re_cq *cq;
1247 	int rc = 0, qptype;
1248 
1249 	rdev = qp->rdev;
1250 	qplqp = &qp->qplib_qp;
1251 	dev_attr = &rdev->dev_attr;
1252 
1253 	/* Setup misc params */
1254 	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1255 	qplqp->pd = &pd->qplib_pd;
1256 	qplqp->qp_handle = (u64)qplqp;
1257 	qplqp->max_inline_data = init_attr->cap.max_inline_data;
1258 	qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1259 			    true : false);
1260 	qptype = bnxt_re_init_qp_type(rdev, init_attr);
1261 	if (qptype < 0) {
1262 		rc = qptype;
1263 		goto out;
1264 	}
1265 	qplqp->type = (u8)qptype;
1266 	qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1267 
1268 	if (init_attr->qp_type == IB_QPT_RC) {
1269 		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1270 		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1271 	}
1272 	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1273 	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1274 	if (init_attr->create_flags) {
1275 		ibdev_dbg(&rdev->ibdev,
1276 			  "QP create flags 0x%x not supported",
1277 			  init_attr->create_flags);
1278 		return -EOPNOTSUPP;
1279 	}
1280 
1281 	/* Setup CQs */
1282 	if (init_attr->send_cq) {
1283 		cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1284 		if (!cq) {
1285 			ibdev_err(&rdev->ibdev, "Send CQ not found");
1286 			rc = -EINVAL;
1287 			goto out;
1288 		}
1289 		qplqp->scq = &cq->qplib_cq;
1290 		qp->scq = cq;
1291 	}
1292 
1293 	if (init_attr->recv_cq) {
1294 		cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1295 		if (!cq) {
1296 			ibdev_err(&rdev->ibdev, "Receive CQ not found");
1297 			rc = -EINVAL;
1298 			goto out;
1299 		}
1300 		qplqp->rcq = &cq->qplib_cq;
1301 		qp->rcq = cq;
1302 	}
1303 
1304 	/* Setup RQ/SRQ */
1305 	rc = bnxt_re_init_rq_attr(qp, init_attr);
1306 	if (rc)
1307 		goto out;
1308 	if (init_attr->qp_type == IB_QPT_GSI)
1309 		bnxt_re_adjust_gsi_rq_attr(qp);
1310 
1311 	/* Setup SQ */
1312 	rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1313 	if (rc)
1314 		goto out;
1315 	if (init_attr->qp_type == IB_QPT_GSI)
1316 		bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1317 
1318 	if (udata) /* This will update DPI and qp_handle */
1319 		rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1320 out:
1321 	return rc;
1322 }
1323 
1324 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1325 				     struct bnxt_re_pd *pd)
1326 {
1327 	struct bnxt_re_sqp_entries *sqp_tbl = NULL;
1328 	struct bnxt_re_dev *rdev;
1329 	struct bnxt_re_qp *sqp;
1330 	struct bnxt_re_ah *sah;
1331 	int rc = 0;
1332 
1333 	rdev = qp->rdev;
1334 	/* Create a shadow QP to handle the QP1 traffic */
1335 	sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
1336 			  GFP_KERNEL);
1337 	if (!sqp_tbl)
1338 		return -ENOMEM;
1339 	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1340 
1341 	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1342 	if (!sqp) {
1343 		rc = -ENODEV;
1344 		ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1345 		goto out;
1346 	}
1347 	rdev->gsi_ctx.gsi_sqp = sqp;
1348 
1349 	sqp->rcq = qp->rcq;
1350 	sqp->scq = qp->scq;
1351 	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1352 					  &qp->qplib_qp);
1353 	if (!sah) {
1354 		bnxt_qplib_destroy_qp(&rdev->qplib_res,
1355 				      &sqp->qplib_qp);
1356 		rc = -ENODEV;
1357 		ibdev_err(&rdev->ibdev,
1358 			  "Failed to create AH entry for ShadowQP");
1359 		goto out;
1360 	}
1361 	rdev->gsi_ctx.gsi_sah = sah;
1362 
1363 	return 0;
1364 out:
1365 	kfree(sqp_tbl);
1366 	return rc;
1367 }
1368 
1369 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1370 				 struct ib_qp_init_attr *init_attr)
1371 {
1372 	struct bnxt_re_dev *rdev;
1373 	struct bnxt_qplib_qp *qplqp;
1374 	int rc = 0;
1375 
1376 	rdev = qp->rdev;
1377 	qplqp = &qp->qplib_qp;
1378 
1379 	qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1380 	qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1381 
1382 	rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1383 	if (rc) {
1384 		ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1385 		goto out;
1386 	}
1387 
1388 	rc = bnxt_re_create_shadow_gsi(qp, pd);
1389 out:
1390 	return rc;
1391 }
1392 
1393 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1394 				   struct ib_qp_init_attr *init_attr,
1395 				   struct bnxt_qplib_dev_attr *dev_attr)
1396 {
1397 	bool rc = true;
1398 
1399 	if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1400 	    init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1401 	    init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1402 	    init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1403 	    init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1404 		ibdev_err(&rdev->ibdev,
1405 			  "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1406 			  init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1407 			  init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1408 			  init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1409 			  init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1410 			  init_attr->cap.max_inline_data,
1411 			  dev_attr->max_inline_data);
1412 		rc = false;
1413 	}
1414 	return rc;
1415 }
1416 
1417 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1418 				struct ib_qp_init_attr *qp_init_attr,
1419 				struct ib_udata *udata)
1420 {
1421 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1422 	struct bnxt_re_dev *rdev = pd->rdev;
1423 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1424 	struct bnxt_re_qp *qp;
1425 	int rc;
1426 
1427 	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1428 	if (!rc) {
1429 		rc = -EINVAL;
1430 		goto exit;
1431 	}
1432 
1433 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1434 	if (!qp) {
1435 		rc = -ENOMEM;
1436 		goto exit;
1437 	}
1438 	qp->rdev = rdev;
1439 	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1440 	if (rc)
1441 		goto fail;
1442 
1443 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1444 	    !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1445 		rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1446 		if (rc == -ENODEV)
1447 			goto qp_destroy;
1448 		if (rc)
1449 			goto fail;
1450 	} else {
1451 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1452 		if (rc) {
1453 			ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1454 			goto free_umem;
1455 		}
1456 		if (udata) {
1457 			struct bnxt_re_qp_resp resp;
1458 
1459 			resp.qpid = qp->qplib_qp.id;
1460 			resp.rsvd = 0;
1461 			rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1462 			if (rc) {
1463 				ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1464 				goto qp_destroy;
1465 			}
1466 		}
1467 	}
1468 
1469 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1470 	if (qp_init_attr->qp_type == IB_QPT_GSI)
1471 		rdev->gsi_ctx.gsi_qp = qp;
1472 	spin_lock_init(&qp->sq_lock);
1473 	spin_lock_init(&qp->rq_lock);
1474 	INIT_LIST_HEAD(&qp->list);
1475 	mutex_lock(&rdev->qp_lock);
1476 	list_add_tail(&qp->list, &rdev->qp_list);
1477 	mutex_unlock(&rdev->qp_lock);
1478 	atomic_inc(&rdev->qp_count);
1479 
1480 	return &qp->ib_qp;
1481 qp_destroy:
1482 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1483 free_umem:
1484 	ib_umem_release(qp->rumem);
1485 	ib_umem_release(qp->sumem);
1486 fail:
1487 	kfree(qp);
1488 exit:
1489 	return ERR_PTR(rc);
1490 }
1491 
1492 static u8 __from_ib_qp_state(enum ib_qp_state state)
1493 {
1494 	switch (state) {
1495 	case IB_QPS_RESET:
1496 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1497 	case IB_QPS_INIT:
1498 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1499 	case IB_QPS_RTR:
1500 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1501 	case IB_QPS_RTS:
1502 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1503 	case IB_QPS_SQD:
1504 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1505 	case IB_QPS_SQE:
1506 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1507 	case IB_QPS_ERR:
1508 	default:
1509 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1510 	}
1511 }
1512 
1513 static enum ib_qp_state __to_ib_qp_state(u8 state)
1514 {
1515 	switch (state) {
1516 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1517 		return IB_QPS_RESET;
1518 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1519 		return IB_QPS_INIT;
1520 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1521 		return IB_QPS_RTR;
1522 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1523 		return IB_QPS_RTS;
1524 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1525 		return IB_QPS_SQD;
1526 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1527 		return IB_QPS_SQE;
1528 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1529 	default:
1530 		return IB_QPS_ERR;
1531 	}
1532 }
1533 
1534 static u32 __from_ib_mtu(enum ib_mtu mtu)
1535 {
1536 	switch (mtu) {
1537 	case IB_MTU_256:
1538 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1539 	case IB_MTU_512:
1540 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1541 	case IB_MTU_1024:
1542 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1543 	case IB_MTU_2048:
1544 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1545 	case IB_MTU_4096:
1546 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1547 	default:
1548 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1549 	}
1550 }
1551 
1552 static enum ib_mtu __to_ib_mtu(u32 mtu)
1553 {
1554 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1555 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1556 		return IB_MTU_256;
1557 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1558 		return IB_MTU_512;
1559 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1560 		return IB_MTU_1024;
1561 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1562 		return IB_MTU_2048;
1563 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1564 		return IB_MTU_4096;
1565 	default:
1566 		return IB_MTU_2048;
1567 	}
1568 }
1569 
1570 /* Shared Receive Queues */
1571 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1572 {
1573 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1574 					       ib_srq);
1575 	struct bnxt_re_dev *rdev = srq->rdev;
1576 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1577 	struct bnxt_qplib_nq *nq = NULL;
1578 
1579 	if (qplib_srq->cq)
1580 		nq = qplib_srq->cq->nq;
1581 	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1582 	ib_umem_release(srq->umem);
1583 	atomic_dec(&rdev->srq_count);
1584 	if (nq)
1585 		nq->budget--;
1586 	return 0;
1587 }
1588 
1589 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1590 				 struct bnxt_re_pd *pd,
1591 				 struct bnxt_re_srq *srq,
1592 				 struct ib_udata *udata)
1593 {
1594 	struct bnxt_re_srq_req ureq;
1595 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1596 	struct ib_umem *umem;
1597 	int bytes = 0;
1598 	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1599 		udata, struct bnxt_re_ucontext, ib_uctx);
1600 
1601 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1602 		return -EFAULT;
1603 
1604 	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1605 	bytes = PAGE_ALIGN(bytes);
1606 	umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1607 			   IB_ACCESS_LOCAL_WRITE);
1608 	if (IS_ERR(umem))
1609 		return PTR_ERR(umem);
1610 
1611 	srq->umem = umem;
1612 	qplib_srq->sg_info.umem = umem;
1613 	qplib_srq->sg_info.pgsize = PAGE_SIZE;
1614 	qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1615 	qplib_srq->srq_handle = ureq.srq_handle;
1616 	qplib_srq->dpi = &cntx->dpi;
1617 
1618 	return 0;
1619 }
1620 
1621 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1622 		       struct ib_srq_init_attr *srq_init_attr,
1623 		       struct ib_udata *udata)
1624 {
1625 	struct bnxt_qplib_dev_attr *dev_attr;
1626 	struct bnxt_qplib_nq *nq = NULL;
1627 	struct bnxt_re_dev *rdev;
1628 	struct bnxt_re_srq *srq;
1629 	struct bnxt_re_pd *pd;
1630 	struct ib_pd *ib_pd;
1631 	int rc, entries;
1632 
1633 	ib_pd = ib_srq->pd;
1634 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1635 	rdev = pd->rdev;
1636 	dev_attr = &rdev->dev_attr;
1637 	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1638 
1639 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1640 		ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1641 		rc = -EINVAL;
1642 		goto exit;
1643 	}
1644 
1645 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1646 		rc = -EOPNOTSUPP;
1647 		goto exit;
1648 	}
1649 
1650 	srq->rdev = rdev;
1651 	srq->qplib_srq.pd = &pd->qplib_pd;
1652 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1653 	/* Allocate 1 more than what's provided so posting max doesn't
1654 	 * mean empty
1655 	 */
1656 	entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1657 	if (entries > dev_attr->max_srq_wqes + 1)
1658 		entries = dev_attr->max_srq_wqes + 1;
1659 	srq->qplib_srq.max_wqe = entries;
1660 
1661 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1662 	 /* 128 byte wqe size for SRQ . So use max sges */
1663 	srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1664 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1665 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1666 	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1667 	nq = &rdev->nq[0];
1668 
1669 	if (udata) {
1670 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1671 		if (rc)
1672 			goto fail;
1673 	}
1674 
1675 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1676 	if (rc) {
1677 		ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1678 		goto fail;
1679 	}
1680 
1681 	if (udata) {
1682 		struct bnxt_re_srq_resp resp;
1683 
1684 		resp.srqid = srq->qplib_srq.id;
1685 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1686 		if (rc) {
1687 			ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1688 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1689 					       &srq->qplib_srq);
1690 			goto fail;
1691 		}
1692 	}
1693 	if (nq)
1694 		nq->budget++;
1695 	atomic_inc(&rdev->srq_count);
1696 
1697 	return 0;
1698 
1699 fail:
1700 	ib_umem_release(srq->umem);
1701 exit:
1702 	return rc;
1703 }
1704 
1705 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1706 		       enum ib_srq_attr_mask srq_attr_mask,
1707 		       struct ib_udata *udata)
1708 {
1709 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1710 					       ib_srq);
1711 	struct bnxt_re_dev *rdev = srq->rdev;
1712 	int rc;
1713 
1714 	switch (srq_attr_mask) {
1715 	case IB_SRQ_MAX_WR:
1716 		/* SRQ resize is not supported */
1717 		break;
1718 	case IB_SRQ_LIMIT:
1719 		/* Change the SRQ threshold */
1720 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1721 			return -EINVAL;
1722 
1723 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1724 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1725 		if (rc) {
1726 			ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1727 			return rc;
1728 		}
1729 		/* On success, update the shadow */
1730 		srq->srq_limit = srq_attr->srq_limit;
1731 		/* No need to Build and send response back to udata */
1732 		break;
1733 	default:
1734 		ibdev_err(&rdev->ibdev,
1735 			  "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1736 		return -EINVAL;
1737 	}
1738 	return 0;
1739 }
1740 
1741 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1742 {
1743 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1744 					       ib_srq);
1745 	struct bnxt_re_srq tsrq;
1746 	struct bnxt_re_dev *rdev = srq->rdev;
1747 	int rc;
1748 
1749 	/* Get live SRQ attr */
1750 	tsrq.qplib_srq.id = srq->qplib_srq.id;
1751 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1752 	if (rc) {
1753 		ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1754 		return rc;
1755 	}
1756 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1757 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1758 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1759 
1760 	return 0;
1761 }
1762 
1763 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1764 			  const struct ib_recv_wr **bad_wr)
1765 {
1766 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1767 					       ib_srq);
1768 	struct bnxt_qplib_swqe wqe;
1769 	unsigned long flags;
1770 	int rc = 0;
1771 
1772 	spin_lock_irqsave(&srq->lock, flags);
1773 	while (wr) {
1774 		/* Transcribe each ib_recv_wr to qplib_swqe */
1775 		wqe.num_sge = wr->num_sge;
1776 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1777 		wqe.wr_id = wr->wr_id;
1778 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1779 
1780 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1781 		if (rc) {
1782 			*bad_wr = wr;
1783 			break;
1784 		}
1785 		wr = wr->next;
1786 	}
1787 	spin_unlock_irqrestore(&srq->lock, flags);
1788 
1789 	return rc;
1790 }
1791 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1792 				    struct bnxt_re_qp *qp1_qp,
1793 				    int qp_attr_mask)
1794 {
1795 	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1796 	int rc = 0;
1797 
1798 	if (qp_attr_mask & IB_QP_STATE) {
1799 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1800 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1801 	}
1802 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1803 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1804 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1805 	}
1806 
1807 	if (qp_attr_mask & IB_QP_QKEY) {
1808 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1809 		/* Using a Random  QKEY */
1810 		qp->qplib_qp.qkey = 0x81818181;
1811 	}
1812 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1813 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1814 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1815 	}
1816 
1817 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1818 	if (rc)
1819 		ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1820 	return rc;
1821 }
1822 
1823 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1824 		      int qp_attr_mask, struct ib_udata *udata)
1825 {
1826 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1827 	struct bnxt_re_dev *rdev = qp->rdev;
1828 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1829 	enum ib_qp_state curr_qp_state, new_qp_state;
1830 	int rc, entries;
1831 	unsigned int flags;
1832 	u8 nw_type;
1833 
1834 	if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1835 		return -EOPNOTSUPP;
1836 
1837 	qp->qplib_qp.modify_flags = 0;
1838 	if (qp_attr_mask & IB_QP_STATE) {
1839 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1840 		new_qp_state = qp_attr->qp_state;
1841 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1842 					ib_qp->qp_type, qp_attr_mask)) {
1843 			ibdev_err(&rdev->ibdev,
1844 				  "Invalid attribute mask: %#x specified ",
1845 				  qp_attr_mask);
1846 			ibdev_err(&rdev->ibdev,
1847 				  "for qpn: %#x type: %#x",
1848 				  ib_qp->qp_num, ib_qp->qp_type);
1849 			ibdev_err(&rdev->ibdev,
1850 				  "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1851 				  curr_qp_state, new_qp_state);
1852 			return -EINVAL;
1853 		}
1854 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1855 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1856 
1857 		if (!qp->sumem &&
1858 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1859 			ibdev_dbg(&rdev->ibdev,
1860 				  "Move QP = %p to flush list\n", qp);
1861 			flags = bnxt_re_lock_cqs(qp);
1862 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1863 			bnxt_re_unlock_cqs(qp, flags);
1864 		}
1865 		if (!qp->sumem &&
1866 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1867 			ibdev_dbg(&rdev->ibdev,
1868 				  "Move QP = %p out of flush list\n", qp);
1869 			flags = bnxt_re_lock_cqs(qp);
1870 			bnxt_qplib_clean_qp(&qp->qplib_qp);
1871 			bnxt_re_unlock_cqs(qp, flags);
1872 		}
1873 	}
1874 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1875 		qp->qplib_qp.modify_flags |=
1876 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1877 		qp->qplib_qp.en_sqd_async_notify = true;
1878 	}
1879 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1880 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1881 		qp->qplib_qp.access =
1882 			__from_ib_access_flags(qp_attr->qp_access_flags);
1883 		/* LOCAL_WRITE access must be set to allow RC receive */
1884 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1885 		/* Temp: Set all params on QP as of now */
1886 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1887 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1888 	}
1889 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1890 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1891 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1892 	}
1893 	if (qp_attr_mask & IB_QP_QKEY) {
1894 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1895 		qp->qplib_qp.qkey = qp_attr->qkey;
1896 	}
1897 	if (qp_attr_mask & IB_QP_AV) {
1898 		const struct ib_global_route *grh =
1899 			rdma_ah_read_grh(&qp_attr->ah_attr);
1900 		const struct ib_gid_attr *sgid_attr;
1901 		struct bnxt_re_gid_ctx *ctx;
1902 
1903 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1904 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1905 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1906 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1907 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1908 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1909 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1910 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1911 		       sizeof(qp->qplib_qp.ah.dgid.data));
1912 		qp->qplib_qp.ah.flow_label = grh->flow_label;
1913 		sgid_attr = grh->sgid_attr;
1914 		/* Get the HW context of the GID. The reference
1915 		 * of GID table entry is already taken by the caller.
1916 		 */
1917 		ctx = rdma_read_gid_hw_context(sgid_attr);
1918 		qp->qplib_qp.ah.sgid_index = ctx->idx;
1919 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1920 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1921 		qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1922 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1923 		ether_addr_copy(qp->qplib_qp.ah.dmac,
1924 				qp_attr->ah_attr.roce.dmac);
1925 
1926 		rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1927 					     &qp->qplib_qp.smac[0]);
1928 		if (rc)
1929 			return rc;
1930 
1931 		nw_type = rdma_gid_attr_network_type(sgid_attr);
1932 		switch (nw_type) {
1933 		case RDMA_NETWORK_IPV4:
1934 			qp->qplib_qp.nw_type =
1935 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1936 			break;
1937 		case RDMA_NETWORK_IPV6:
1938 			qp->qplib_qp.nw_type =
1939 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1940 			break;
1941 		default:
1942 			qp->qplib_qp.nw_type =
1943 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1944 			break;
1945 		}
1946 	}
1947 
1948 	if (qp_attr_mask & IB_QP_PATH_MTU) {
1949 		qp->qplib_qp.modify_flags |=
1950 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1951 		qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1952 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1953 	} else if (qp_attr->qp_state == IB_QPS_RTR) {
1954 		qp->qplib_qp.modify_flags |=
1955 			CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1956 		qp->qplib_qp.path_mtu =
1957 			__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1958 		qp->qplib_qp.mtu =
1959 			ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1960 	}
1961 
1962 	if (qp_attr_mask & IB_QP_TIMEOUT) {
1963 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1964 		qp->qplib_qp.timeout = qp_attr->timeout;
1965 	}
1966 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
1967 		qp->qplib_qp.modify_flags |=
1968 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1969 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1970 	}
1971 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
1972 		qp->qplib_qp.modify_flags |=
1973 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1974 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1975 	}
1976 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1977 		qp->qplib_qp.modify_flags |=
1978 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1979 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1980 	}
1981 	if (qp_attr_mask & IB_QP_RQ_PSN) {
1982 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1983 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1984 	}
1985 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1986 		qp->qplib_qp.modify_flags |=
1987 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1988 		/* Cap the max_rd_atomic to device max */
1989 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1990 						   dev_attr->max_qp_rd_atom);
1991 	}
1992 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1993 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1994 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1995 	}
1996 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1997 		if (qp_attr->max_dest_rd_atomic >
1998 		    dev_attr->max_qp_init_rd_atom) {
1999 			ibdev_err(&rdev->ibdev,
2000 				  "max_dest_rd_atomic requested%d is > dev_max%d",
2001 				  qp_attr->max_dest_rd_atomic,
2002 				  dev_attr->max_qp_init_rd_atom);
2003 			return -EINVAL;
2004 		}
2005 
2006 		qp->qplib_qp.modify_flags |=
2007 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2008 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2009 	}
2010 	if (qp_attr_mask & IB_QP_CAP) {
2011 		qp->qplib_qp.modify_flags |=
2012 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2013 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2014 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2015 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2016 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2017 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2018 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2019 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2020 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2021 		    (qp_attr->cap.max_inline_data >=
2022 						dev_attr->max_inline_data)) {
2023 			ibdev_err(&rdev->ibdev,
2024 				  "Create QP failed - max exceeded");
2025 			return -EINVAL;
2026 		}
2027 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2028 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2029 						dev_attr->max_qp_wqes + 1);
2030 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2031 						qp_attr->cap.max_send_wr;
2032 		/*
2033 		 * Reserving one slot for Phantom WQE. Some application can
2034 		 * post one extra entry in this case. Allowing this to avoid
2035 		 * unexpected Queue full condition
2036 		 */
2037 		qp->qplib_qp.sq.q_full_delta -= 1;
2038 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2039 		if (qp->qplib_qp.rq.max_wqe) {
2040 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2041 			qp->qplib_qp.rq.max_wqe =
2042 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2043 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2044 						       qp_attr->cap.max_recv_wr;
2045 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2046 		} else {
2047 			/* SRQ was used prior, just ignore the RQ caps */
2048 		}
2049 	}
2050 	if (qp_attr_mask & IB_QP_DEST_QPN) {
2051 		qp->qplib_qp.modify_flags |=
2052 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2053 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2054 	}
2055 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2056 	if (rc) {
2057 		ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2058 		return rc;
2059 	}
2060 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2061 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2062 	return rc;
2063 }
2064 
2065 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2066 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2067 {
2068 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2069 	struct bnxt_re_dev *rdev = qp->rdev;
2070 	struct bnxt_qplib_qp *qplib_qp;
2071 	int rc;
2072 
2073 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2074 	if (!qplib_qp)
2075 		return -ENOMEM;
2076 
2077 	qplib_qp->id = qp->qplib_qp.id;
2078 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2079 
2080 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2081 	if (rc) {
2082 		ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2083 		goto out;
2084 	}
2085 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2086 	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2087 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2088 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2089 	qp_attr->pkey_index = qplib_qp->pkey_index;
2090 	qp_attr->qkey = qplib_qp->qkey;
2091 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2092 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2093 			qplib_qp->ah.host_sgid_index,
2094 			qplib_qp->ah.hop_limit,
2095 			qplib_qp->ah.traffic_class);
2096 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2097 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2098 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2099 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2100 	qp_attr->timeout = qplib_qp->timeout;
2101 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2102 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2103 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2104 	qp_attr->rq_psn = qplib_qp->rq.psn;
2105 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2106 	qp_attr->sq_psn = qplib_qp->sq.psn;
2107 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2108 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2109 							 IB_SIGNAL_REQ_WR;
2110 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2111 
2112 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2113 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2114 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2115 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2116 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2117 	qp_init_attr->cap = qp_attr->cap;
2118 
2119 out:
2120 	kfree(qplib_qp);
2121 	return rc;
2122 }
2123 
2124 /* Routine for sending QP1 packets for RoCE V1 an V2
2125  */
2126 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2127 				     const struct ib_send_wr *wr,
2128 				     struct bnxt_qplib_swqe *wqe,
2129 				     int payload_size)
2130 {
2131 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2132 					     ib_ah);
2133 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2134 	const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2135 	struct bnxt_qplib_sge sge;
2136 	u8 nw_type;
2137 	u16 ether_type;
2138 	union ib_gid dgid;
2139 	bool is_eth = false;
2140 	bool is_vlan = false;
2141 	bool is_grh = false;
2142 	bool is_udp = false;
2143 	u8 ip_version = 0;
2144 	u16 vlan_id = 0xFFFF;
2145 	void *buf;
2146 	int i, rc = 0;
2147 
2148 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2149 
2150 	rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2151 	if (rc)
2152 		return rc;
2153 
2154 	/* Get network header type for this GID */
2155 	nw_type = rdma_gid_attr_network_type(sgid_attr);
2156 	switch (nw_type) {
2157 	case RDMA_NETWORK_IPV4:
2158 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2159 		break;
2160 	case RDMA_NETWORK_IPV6:
2161 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2162 		break;
2163 	default:
2164 		nw_type = BNXT_RE_ROCE_V1_PACKET;
2165 		break;
2166 	}
2167 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2168 	is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2169 	if (is_udp) {
2170 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2171 			ip_version = 4;
2172 			ether_type = ETH_P_IP;
2173 		} else {
2174 			ip_version = 6;
2175 			ether_type = ETH_P_IPV6;
2176 		}
2177 		is_grh = false;
2178 	} else {
2179 		ether_type = ETH_P_IBOE;
2180 		is_grh = true;
2181 	}
2182 
2183 	is_eth = true;
2184 	is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2185 
2186 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2187 			  ip_version, is_udp, 0, &qp->qp1_hdr);
2188 
2189 	/* ETH */
2190 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2191 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2192 
2193 	/* For vlan, check the sgid for vlan existence */
2194 
2195 	if (!is_vlan) {
2196 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2197 	} else {
2198 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2199 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2200 	}
2201 
2202 	if (is_grh || (ip_version == 6)) {
2203 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2204 		       sizeof(sgid_attr->gid));
2205 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2206 		       sizeof(sgid_attr->gid));
2207 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
2208 	}
2209 
2210 	if (ip_version == 4) {
2211 		qp->qp1_hdr.ip4.tos = 0;
2212 		qp->qp1_hdr.ip4.id = 0;
2213 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2214 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2215 
2216 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2217 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2218 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2219 	}
2220 
2221 	if (is_udp) {
2222 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2223 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
2224 		qp->qp1_hdr.udp.csum = 0;
2225 	}
2226 
2227 	/* BTH */
2228 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2229 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2230 		qp->qp1_hdr.immediate_present = 1;
2231 	} else {
2232 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2233 	}
2234 	if (wr->send_flags & IB_SEND_SOLICITED)
2235 		qp->qp1_hdr.bth.solicited_event = 1;
2236 	/* pad_count */
2237 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2238 
2239 	/* P_key for QP1 is for all members */
2240 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2241 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2242 	qp->qp1_hdr.bth.ack_req = 0;
2243 	qp->send_psn++;
2244 	qp->send_psn &= BTH_PSN_MASK;
2245 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2246 	/* DETH */
2247 	/* Use the priviledged Q_Key for QP1 */
2248 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2249 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2250 
2251 	/* Pack the QP1 to the transmit buffer */
2252 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2253 	if (buf) {
2254 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2255 		for (i = wqe->num_sge; i; i--) {
2256 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2257 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2258 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2259 		}
2260 
2261 		/*
2262 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2263 		 * which is same as the QP1 SQ header buffer.
2264 		 * Header buf size for IPV4 RoCE V2 can be 66.
2265 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2266 		 * Subtract 20 bytes from QP1 SQ header buf size
2267 		 */
2268 		if (is_udp && ip_version == 4)
2269 			sge.size -= 20;
2270 		/*
2271 		 * Max Header buf size for RoCE V1 is 78.
2272 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2273 		 * Subtract 8 bytes from QP1 SQ header buf size
2274 		 */
2275 		if (!is_udp)
2276 			sge.size -= 8;
2277 
2278 		/* Subtract 4 bytes for non vlan packets */
2279 		if (!is_vlan)
2280 			sge.size -= 4;
2281 
2282 		wqe->sg_list[0].addr = sge.addr;
2283 		wqe->sg_list[0].lkey = sge.lkey;
2284 		wqe->sg_list[0].size = sge.size;
2285 		wqe->num_sge++;
2286 
2287 	} else {
2288 		ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2289 		rc = -ENOMEM;
2290 	}
2291 	return rc;
2292 }
2293 
2294 /* For the MAD layer, it only provides the recv SGE the size of
2295  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2296  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2297  * receive packet (334 bytes) with no VLAN and then copy the GRH
2298  * and the MAD datagram out to the provided SGE.
2299  */
2300 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2301 					    const struct ib_recv_wr *wr,
2302 					    struct bnxt_qplib_swqe *wqe,
2303 					    int payload_size)
2304 {
2305 	struct bnxt_re_sqp_entries *sqp_entry;
2306 	struct bnxt_qplib_sge ref, sge;
2307 	struct bnxt_re_dev *rdev;
2308 	u32 rq_prod_index;
2309 
2310 	rdev = qp->rdev;
2311 
2312 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2313 
2314 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2315 		return -ENOMEM;
2316 
2317 	/* Create 1 SGE to receive the entire
2318 	 * ethernet packet
2319 	 */
2320 	/* Save the reference from ULP */
2321 	ref.addr = wqe->sg_list[0].addr;
2322 	ref.lkey = wqe->sg_list[0].lkey;
2323 	ref.size = wqe->sg_list[0].size;
2324 
2325 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2326 
2327 	/* SGE 1 */
2328 	wqe->sg_list[0].addr = sge.addr;
2329 	wqe->sg_list[0].lkey = sge.lkey;
2330 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2331 	sge.size -= wqe->sg_list[0].size;
2332 
2333 	sqp_entry->sge.addr = ref.addr;
2334 	sqp_entry->sge.lkey = ref.lkey;
2335 	sqp_entry->sge.size = ref.size;
2336 	/* Store the wrid for reporting completion */
2337 	sqp_entry->wrid = wqe->wr_id;
2338 	/* change the wqe->wrid to table index */
2339 	wqe->wr_id = rq_prod_index;
2340 	return 0;
2341 }
2342 
2343 static int is_ud_qp(struct bnxt_re_qp *qp)
2344 {
2345 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2346 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2347 }
2348 
2349 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2350 				  const struct ib_send_wr *wr,
2351 				  struct bnxt_qplib_swqe *wqe)
2352 {
2353 	struct bnxt_re_ah *ah = NULL;
2354 
2355 	if (is_ud_qp(qp)) {
2356 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2357 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2358 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2359 		wqe->send.avid = ah->qplib_ah.id;
2360 	}
2361 	switch (wr->opcode) {
2362 	case IB_WR_SEND:
2363 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2364 		break;
2365 	case IB_WR_SEND_WITH_IMM:
2366 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2367 		wqe->send.imm_data = wr->ex.imm_data;
2368 		break;
2369 	case IB_WR_SEND_WITH_INV:
2370 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2371 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2372 		break;
2373 	default:
2374 		return -EINVAL;
2375 	}
2376 	if (wr->send_flags & IB_SEND_SIGNALED)
2377 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2378 	if (wr->send_flags & IB_SEND_FENCE)
2379 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2380 	if (wr->send_flags & IB_SEND_SOLICITED)
2381 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2382 	if (wr->send_flags & IB_SEND_INLINE)
2383 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2384 
2385 	return 0;
2386 }
2387 
2388 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2389 				  struct bnxt_qplib_swqe *wqe)
2390 {
2391 	switch (wr->opcode) {
2392 	case IB_WR_RDMA_WRITE:
2393 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2394 		break;
2395 	case IB_WR_RDMA_WRITE_WITH_IMM:
2396 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2397 		wqe->rdma.imm_data = wr->ex.imm_data;
2398 		break;
2399 	case IB_WR_RDMA_READ:
2400 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2401 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2402 		break;
2403 	default:
2404 		return -EINVAL;
2405 	}
2406 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2407 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2408 	if (wr->send_flags & IB_SEND_SIGNALED)
2409 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2410 	if (wr->send_flags & IB_SEND_FENCE)
2411 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2412 	if (wr->send_flags & IB_SEND_SOLICITED)
2413 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2414 	if (wr->send_flags & IB_SEND_INLINE)
2415 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2416 
2417 	return 0;
2418 }
2419 
2420 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2421 				    struct bnxt_qplib_swqe *wqe)
2422 {
2423 	switch (wr->opcode) {
2424 	case IB_WR_ATOMIC_CMP_AND_SWP:
2425 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2426 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2427 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2428 		break;
2429 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2430 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2431 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2432 		break;
2433 	default:
2434 		return -EINVAL;
2435 	}
2436 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2437 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2438 	if (wr->send_flags & IB_SEND_SIGNALED)
2439 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2440 	if (wr->send_flags & IB_SEND_FENCE)
2441 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2442 	if (wr->send_flags & IB_SEND_SOLICITED)
2443 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2444 	return 0;
2445 }
2446 
2447 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2448 				 struct bnxt_qplib_swqe *wqe)
2449 {
2450 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2451 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2452 
2453 	/* Need unconditional fence for local invalidate
2454 	 * opcode to work as expected.
2455 	 */
2456 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2457 
2458 	if (wr->send_flags & IB_SEND_SIGNALED)
2459 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2460 	if (wr->send_flags & IB_SEND_SOLICITED)
2461 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2462 
2463 	return 0;
2464 }
2465 
2466 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2467 				 struct bnxt_qplib_swqe *wqe)
2468 {
2469 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2470 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2471 	int access = wr->access;
2472 
2473 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2474 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2475 	wqe->frmr.page_list = mr->pages;
2476 	wqe->frmr.page_list_len = mr->npages;
2477 	wqe->frmr.levels = qplib_frpl->hwq.level;
2478 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2479 
2480 	/* Need unconditional fence for reg_mr
2481 	 * opcode to function as expected.
2482 	 */
2483 
2484 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2485 
2486 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2487 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2488 
2489 	if (access & IB_ACCESS_LOCAL_WRITE)
2490 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2491 	if (access & IB_ACCESS_REMOTE_READ)
2492 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2493 	if (access & IB_ACCESS_REMOTE_WRITE)
2494 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2495 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2496 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2497 	if (access & IB_ACCESS_MW_BIND)
2498 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2499 
2500 	wqe->frmr.l_key = wr->key;
2501 	wqe->frmr.length = wr->mr->length;
2502 	wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2503 	wqe->frmr.va = wr->mr->iova;
2504 	return 0;
2505 }
2506 
2507 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2508 				    const struct ib_send_wr *wr,
2509 				    struct bnxt_qplib_swqe *wqe)
2510 {
2511 	/*  Copy the inline data to the data  field */
2512 	u8 *in_data;
2513 	u32 i, sge_len;
2514 	void *sge_addr;
2515 
2516 	in_data = wqe->inline_data;
2517 	for (i = 0; i < wr->num_sge; i++) {
2518 		sge_addr = (void *)(unsigned long)
2519 				wr->sg_list[i].addr;
2520 		sge_len = wr->sg_list[i].length;
2521 
2522 		if ((sge_len + wqe->inline_len) >
2523 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2524 			ibdev_err(&rdev->ibdev,
2525 				  "Inline data size requested > supported value");
2526 			return -EINVAL;
2527 		}
2528 		sge_len = wr->sg_list[i].length;
2529 
2530 		memcpy(in_data, sge_addr, sge_len);
2531 		in_data += wr->sg_list[i].length;
2532 		wqe->inline_len += wr->sg_list[i].length;
2533 	}
2534 	return wqe->inline_len;
2535 }
2536 
2537 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2538 				   const struct ib_send_wr *wr,
2539 				   struct bnxt_qplib_swqe *wqe)
2540 {
2541 	int payload_sz = 0;
2542 
2543 	if (wr->send_flags & IB_SEND_INLINE)
2544 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2545 	else
2546 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2547 					       wqe->num_sge);
2548 
2549 	return payload_sz;
2550 }
2551 
2552 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2553 {
2554 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2555 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2556 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2557 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2558 		int qp_attr_mask;
2559 		struct ib_qp_attr qp_attr;
2560 
2561 		qp_attr_mask = IB_QP_STATE;
2562 		qp_attr.qp_state = IB_QPS_RTS;
2563 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2564 		qp->qplib_qp.wqe_cnt = 0;
2565 	}
2566 }
2567 
2568 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2569 				       struct bnxt_re_qp *qp,
2570 				       const struct ib_send_wr *wr)
2571 {
2572 	int rc = 0, payload_sz = 0;
2573 	unsigned long flags;
2574 
2575 	spin_lock_irqsave(&qp->sq_lock, flags);
2576 	while (wr) {
2577 		struct bnxt_qplib_swqe wqe = {};
2578 
2579 		/* Common */
2580 		wqe.num_sge = wr->num_sge;
2581 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2582 			ibdev_err(&rdev->ibdev,
2583 				  "Limit exceeded for Send SGEs");
2584 			rc = -EINVAL;
2585 			goto bad;
2586 		}
2587 
2588 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2589 		if (payload_sz < 0) {
2590 			rc = -EINVAL;
2591 			goto bad;
2592 		}
2593 		wqe.wr_id = wr->wr_id;
2594 
2595 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2596 
2597 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2598 		if (!rc)
2599 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2600 bad:
2601 		if (rc) {
2602 			ibdev_err(&rdev->ibdev,
2603 				  "Post send failed opcode = %#x rc = %d",
2604 				  wr->opcode, rc);
2605 			break;
2606 		}
2607 		wr = wr->next;
2608 	}
2609 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2610 	bnxt_ud_qp_hw_stall_workaround(qp);
2611 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2612 	return rc;
2613 }
2614 
2615 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2616 		      const struct ib_send_wr **bad_wr)
2617 {
2618 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2619 	struct bnxt_qplib_swqe wqe;
2620 	int rc = 0, payload_sz = 0;
2621 	unsigned long flags;
2622 
2623 	spin_lock_irqsave(&qp->sq_lock, flags);
2624 	while (wr) {
2625 		/* House keeping */
2626 		memset(&wqe, 0, sizeof(wqe));
2627 
2628 		/* Common */
2629 		wqe.num_sge = wr->num_sge;
2630 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2631 			ibdev_err(&qp->rdev->ibdev,
2632 				  "Limit exceeded for Send SGEs");
2633 			rc = -EINVAL;
2634 			goto bad;
2635 		}
2636 
2637 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2638 		if (payload_sz < 0) {
2639 			rc = -EINVAL;
2640 			goto bad;
2641 		}
2642 		wqe.wr_id = wr->wr_id;
2643 
2644 		switch (wr->opcode) {
2645 		case IB_WR_SEND:
2646 		case IB_WR_SEND_WITH_IMM:
2647 			if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2648 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2649 							       payload_sz);
2650 				if (rc)
2651 					goto bad;
2652 				wqe.rawqp1.lflags |=
2653 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2654 			}
2655 			switch (wr->send_flags) {
2656 			case IB_SEND_IP_CSUM:
2657 				wqe.rawqp1.lflags |=
2658 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2659 				break;
2660 			default:
2661 				break;
2662 			}
2663 			fallthrough;
2664 		case IB_WR_SEND_WITH_INV:
2665 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2666 			break;
2667 		case IB_WR_RDMA_WRITE:
2668 		case IB_WR_RDMA_WRITE_WITH_IMM:
2669 		case IB_WR_RDMA_READ:
2670 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2671 			break;
2672 		case IB_WR_ATOMIC_CMP_AND_SWP:
2673 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2674 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2675 			break;
2676 		case IB_WR_RDMA_READ_WITH_INV:
2677 			ibdev_err(&qp->rdev->ibdev,
2678 				  "RDMA Read with Invalidate is not supported");
2679 			rc = -EINVAL;
2680 			goto bad;
2681 		case IB_WR_LOCAL_INV:
2682 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2683 			break;
2684 		case IB_WR_REG_MR:
2685 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2686 			break;
2687 		default:
2688 			/* Unsupported WRs */
2689 			ibdev_err(&qp->rdev->ibdev,
2690 				  "WR (%#x) is not supported", wr->opcode);
2691 			rc = -EINVAL;
2692 			goto bad;
2693 		}
2694 		if (!rc)
2695 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2696 bad:
2697 		if (rc) {
2698 			ibdev_err(&qp->rdev->ibdev,
2699 				  "post_send failed op:%#x qps = %#x rc = %d\n",
2700 				  wr->opcode, qp->qplib_qp.state, rc);
2701 			*bad_wr = wr;
2702 			break;
2703 		}
2704 		wr = wr->next;
2705 	}
2706 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2707 	bnxt_ud_qp_hw_stall_workaround(qp);
2708 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2709 
2710 	return rc;
2711 }
2712 
2713 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2714 				       struct bnxt_re_qp *qp,
2715 				       const struct ib_recv_wr *wr)
2716 {
2717 	struct bnxt_qplib_swqe wqe;
2718 	int rc = 0;
2719 
2720 	memset(&wqe, 0, sizeof(wqe));
2721 	while (wr) {
2722 		/* House keeping */
2723 		memset(&wqe, 0, sizeof(wqe));
2724 
2725 		/* Common */
2726 		wqe.num_sge = wr->num_sge;
2727 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2728 			ibdev_err(&rdev->ibdev,
2729 				  "Limit exceeded for Receive SGEs");
2730 			rc = -EINVAL;
2731 			break;
2732 		}
2733 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2734 		wqe.wr_id = wr->wr_id;
2735 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2736 
2737 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2738 		if (rc)
2739 			break;
2740 
2741 		wr = wr->next;
2742 	}
2743 	if (!rc)
2744 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2745 	return rc;
2746 }
2747 
2748 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2749 		      const struct ib_recv_wr **bad_wr)
2750 {
2751 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2752 	struct bnxt_qplib_swqe wqe;
2753 	int rc = 0, payload_sz = 0;
2754 	unsigned long flags;
2755 	u32 count = 0;
2756 
2757 	spin_lock_irqsave(&qp->rq_lock, flags);
2758 	while (wr) {
2759 		/* House keeping */
2760 		memset(&wqe, 0, sizeof(wqe));
2761 
2762 		/* Common */
2763 		wqe.num_sge = wr->num_sge;
2764 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2765 			ibdev_err(&qp->rdev->ibdev,
2766 				  "Limit exceeded for Receive SGEs");
2767 			rc = -EINVAL;
2768 			*bad_wr = wr;
2769 			break;
2770 		}
2771 
2772 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2773 					       wr->num_sge);
2774 		wqe.wr_id = wr->wr_id;
2775 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2776 
2777 		if (ib_qp->qp_type == IB_QPT_GSI &&
2778 		    qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2779 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2780 							      payload_sz);
2781 		if (!rc)
2782 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2783 		if (rc) {
2784 			*bad_wr = wr;
2785 			break;
2786 		}
2787 
2788 		/* Ring DB if the RQEs posted reaches a threshold value */
2789 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2790 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
2791 			count = 0;
2792 		}
2793 
2794 		wr = wr->next;
2795 	}
2796 
2797 	if (count)
2798 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2799 
2800 	spin_unlock_irqrestore(&qp->rq_lock, flags);
2801 
2802 	return rc;
2803 }
2804 
2805 /* Completion Queues */
2806 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2807 {
2808 	struct bnxt_re_cq *cq;
2809 	struct bnxt_qplib_nq *nq;
2810 	struct bnxt_re_dev *rdev;
2811 
2812 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2813 	rdev = cq->rdev;
2814 	nq = cq->qplib_cq.nq;
2815 
2816 	bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2817 	ib_umem_release(cq->umem);
2818 
2819 	atomic_dec(&rdev->cq_count);
2820 	nq->budget--;
2821 	kfree(cq->cql);
2822 	return 0;
2823 }
2824 
2825 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2826 		      struct ib_udata *udata)
2827 {
2828 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2829 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2830 	struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2831 	int rc, entries;
2832 	int cqe = attr->cqe;
2833 	struct bnxt_qplib_nq *nq = NULL;
2834 	unsigned int nq_alloc_cnt;
2835 
2836 	if (attr->flags)
2837 		return -EOPNOTSUPP;
2838 
2839 	/* Validate CQ fields */
2840 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2841 		ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2842 		return -EINVAL;
2843 	}
2844 
2845 	cq->rdev = rdev;
2846 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2847 
2848 	entries = roundup_pow_of_two(cqe + 1);
2849 	if (entries > dev_attr->max_cq_wqes + 1)
2850 		entries = dev_attr->max_cq_wqes + 1;
2851 
2852 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2853 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2854 	if (udata) {
2855 		struct bnxt_re_cq_req req;
2856 		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2857 			udata, struct bnxt_re_ucontext, ib_uctx);
2858 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2859 			rc = -EFAULT;
2860 			goto fail;
2861 		}
2862 
2863 		cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2864 				       entries * sizeof(struct cq_base),
2865 				       IB_ACCESS_LOCAL_WRITE);
2866 		if (IS_ERR(cq->umem)) {
2867 			rc = PTR_ERR(cq->umem);
2868 			goto fail;
2869 		}
2870 		cq->qplib_cq.sg_info.umem = cq->umem;
2871 		cq->qplib_cq.dpi = &uctx->dpi;
2872 	} else {
2873 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2874 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2875 				  GFP_KERNEL);
2876 		if (!cq->cql) {
2877 			rc = -ENOMEM;
2878 			goto fail;
2879 		}
2880 
2881 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
2882 	}
2883 	/*
2884 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2885 	 * used for getting the NQ index.
2886 	 */
2887 	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2888 	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2889 	cq->qplib_cq.max_wqe = entries;
2890 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2891 	cq->qplib_cq.nq	= nq;
2892 
2893 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2894 	if (rc) {
2895 		ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2896 		goto fail;
2897 	}
2898 
2899 	cq->ib_cq.cqe = entries;
2900 	cq->cq_period = cq->qplib_cq.period;
2901 	nq->budget++;
2902 
2903 	atomic_inc(&rdev->cq_count);
2904 	spin_lock_init(&cq->cq_lock);
2905 
2906 	if (udata) {
2907 		struct bnxt_re_cq_resp resp;
2908 
2909 		resp.cqid = cq->qplib_cq.id;
2910 		resp.tail = cq->qplib_cq.hwq.cons;
2911 		resp.phase = cq->qplib_cq.period;
2912 		resp.rsvd = 0;
2913 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2914 		if (rc) {
2915 			ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2916 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2917 			goto c2fail;
2918 		}
2919 	}
2920 
2921 	return 0;
2922 
2923 c2fail:
2924 	ib_umem_release(cq->umem);
2925 fail:
2926 	kfree(cq->cql);
2927 	return rc;
2928 }
2929 
2930 static u8 __req_to_ib_wc_status(u8 qstatus)
2931 {
2932 	switch (qstatus) {
2933 	case CQ_REQ_STATUS_OK:
2934 		return IB_WC_SUCCESS;
2935 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2936 		return IB_WC_BAD_RESP_ERR;
2937 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2938 		return IB_WC_LOC_LEN_ERR;
2939 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2940 		return IB_WC_LOC_QP_OP_ERR;
2941 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2942 		return IB_WC_LOC_PROT_ERR;
2943 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2944 		return IB_WC_GENERAL_ERR;
2945 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2946 		return IB_WC_REM_INV_REQ_ERR;
2947 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2948 		return IB_WC_REM_ACCESS_ERR;
2949 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2950 		return IB_WC_REM_OP_ERR;
2951 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2952 		return IB_WC_RNR_RETRY_EXC_ERR;
2953 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2954 		return IB_WC_RETRY_EXC_ERR;
2955 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2956 		return IB_WC_WR_FLUSH_ERR;
2957 	default:
2958 		return IB_WC_GENERAL_ERR;
2959 	}
2960 	return 0;
2961 }
2962 
2963 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2964 {
2965 	switch (qstatus) {
2966 	case CQ_RES_RAWETH_QP1_STATUS_OK:
2967 		return IB_WC_SUCCESS;
2968 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2969 		return IB_WC_LOC_ACCESS_ERR;
2970 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2971 		return IB_WC_LOC_LEN_ERR;
2972 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2973 		return IB_WC_LOC_PROT_ERR;
2974 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2975 		return IB_WC_LOC_QP_OP_ERR;
2976 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2977 		return IB_WC_GENERAL_ERR;
2978 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2979 		return IB_WC_WR_FLUSH_ERR;
2980 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2981 		return IB_WC_WR_FLUSH_ERR;
2982 	default:
2983 		return IB_WC_GENERAL_ERR;
2984 	}
2985 }
2986 
2987 static u8 __rc_to_ib_wc_status(u8 qstatus)
2988 {
2989 	switch (qstatus) {
2990 	case CQ_RES_RC_STATUS_OK:
2991 		return IB_WC_SUCCESS;
2992 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2993 		return IB_WC_LOC_ACCESS_ERR;
2994 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2995 		return IB_WC_LOC_LEN_ERR;
2996 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2997 		return IB_WC_LOC_PROT_ERR;
2998 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2999 		return IB_WC_LOC_QP_OP_ERR;
3000 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3001 		return IB_WC_GENERAL_ERR;
3002 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3003 		return IB_WC_REM_INV_REQ_ERR;
3004 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3005 		return IB_WC_WR_FLUSH_ERR;
3006 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3007 		return IB_WC_WR_FLUSH_ERR;
3008 	default:
3009 		return IB_WC_GENERAL_ERR;
3010 	}
3011 }
3012 
3013 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3014 {
3015 	switch (cqe->type) {
3016 	case BNXT_QPLIB_SWQE_TYPE_SEND:
3017 		wc->opcode = IB_WC_SEND;
3018 		break;
3019 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3020 		wc->opcode = IB_WC_SEND;
3021 		wc->wc_flags |= IB_WC_WITH_IMM;
3022 		break;
3023 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3024 		wc->opcode = IB_WC_SEND;
3025 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3026 		break;
3027 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3028 		wc->opcode = IB_WC_RDMA_WRITE;
3029 		break;
3030 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3031 		wc->opcode = IB_WC_RDMA_WRITE;
3032 		wc->wc_flags |= IB_WC_WITH_IMM;
3033 		break;
3034 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3035 		wc->opcode = IB_WC_RDMA_READ;
3036 		break;
3037 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3038 		wc->opcode = IB_WC_COMP_SWAP;
3039 		break;
3040 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3041 		wc->opcode = IB_WC_FETCH_ADD;
3042 		break;
3043 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3044 		wc->opcode = IB_WC_LOCAL_INV;
3045 		break;
3046 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3047 		wc->opcode = IB_WC_REG_MR;
3048 		break;
3049 	default:
3050 		wc->opcode = IB_WC_SEND;
3051 		break;
3052 	}
3053 
3054 	wc->status = __req_to_ib_wc_status(cqe->status);
3055 }
3056 
3057 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3058 				     u16 raweth_qp1_flags2)
3059 {
3060 	bool is_ipv6 = false, is_ipv4 = false;
3061 
3062 	/* raweth_qp1_flags Bit 9-6 indicates itype */
3063 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3064 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3065 		return -1;
3066 
3067 	if (raweth_qp1_flags2 &
3068 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3069 	    raweth_qp1_flags2 &
3070 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3071 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3072 		(raweth_qp1_flags2 &
3073 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3074 			(is_ipv6 = true) : (is_ipv4 = true);
3075 		return ((is_ipv6) ?
3076 			 BNXT_RE_ROCEV2_IPV6_PACKET :
3077 			 BNXT_RE_ROCEV2_IPV4_PACKET);
3078 	} else {
3079 		return BNXT_RE_ROCE_V1_PACKET;
3080 	}
3081 }
3082 
3083 static int bnxt_re_to_ib_nw_type(int nw_type)
3084 {
3085 	u8 nw_hdr_type = 0xFF;
3086 
3087 	switch (nw_type) {
3088 	case BNXT_RE_ROCE_V1_PACKET:
3089 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3090 		break;
3091 	case BNXT_RE_ROCEV2_IPV4_PACKET:
3092 		nw_hdr_type = RDMA_NETWORK_IPV4;
3093 		break;
3094 	case BNXT_RE_ROCEV2_IPV6_PACKET:
3095 		nw_hdr_type = RDMA_NETWORK_IPV6;
3096 		break;
3097 	}
3098 	return nw_hdr_type;
3099 }
3100 
3101 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3102 				       void *rq_hdr_buf)
3103 {
3104 	u8 *tmp_buf = NULL;
3105 	struct ethhdr *eth_hdr;
3106 	u16 eth_type;
3107 	bool rc = false;
3108 
3109 	tmp_buf = (u8 *)rq_hdr_buf;
3110 	/*
3111 	 * If dest mac is not same as I/F mac, this could be a
3112 	 * loopback address or multicast address, check whether
3113 	 * it is a loopback packet
3114 	 */
3115 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3116 		tmp_buf += 4;
3117 		/* Check the  ether type */
3118 		eth_hdr = (struct ethhdr *)tmp_buf;
3119 		eth_type = ntohs(eth_hdr->h_proto);
3120 		switch (eth_type) {
3121 		case ETH_P_IBOE:
3122 			rc = true;
3123 			break;
3124 		case ETH_P_IP:
3125 		case ETH_P_IPV6: {
3126 			u32 len;
3127 			struct udphdr *udp_hdr;
3128 
3129 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3130 						      sizeof(struct ipv6hdr));
3131 			tmp_buf += sizeof(struct ethhdr) + len;
3132 			udp_hdr = (struct udphdr *)tmp_buf;
3133 			if (ntohs(udp_hdr->dest) ==
3134 				    ROCE_V2_UDP_DPORT)
3135 				rc = true;
3136 			break;
3137 			}
3138 		default:
3139 			break;
3140 		}
3141 	}
3142 
3143 	return rc;
3144 }
3145 
3146 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3147 					 struct bnxt_qplib_cqe *cqe)
3148 {
3149 	struct bnxt_re_dev *rdev = gsi_qp->rdev;
3150 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3151 	struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3152 	struct bnxt_re_ah *gsi_sah;
3153 	struct ib_send_wr *swr;
3154 	struct ib_ud_wr udwr;
3155 	struct ib_recv_wr rwr;
3156 	int pkt_type = 0;
3157 	u32 tbl_idx;
3158 	void *rq_hdr_buf;
3159 	dma_addr_t rq_hdr_buf_map;
3160 	dma_addr_t shrq_hdr_buf_map;
3161 	u32 offset = 0;
3162 	u32 skip_bytes = 0;
3163 	struct ib_sge s_sge[2];
3164 	struct ib_sge r_sge[2];
3165 	int rc;
3166 
3167 	memset(&udwr, 0, sizeof(udwr));
3168 	memset(&rwr, 0, sizeof(rwr));
3169 	memset(&s_sge, 0, sizeof(s_sge));
3170 	memset(&r_sge, 0, sizeof(r_sge));
3171 
3172 	swr = &udwr.wr;
3173 	tbl_idx = cqe->wr_id;
3174 
3175 	rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3176 			(tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3177 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3178 							  tbl_idx);
3179 
3180 	/* Shadow QP header buffer */
3181 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3182 							    tbl_idx);
3183 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3184 
3185 	/* Store this cqe */
3186 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3187 	sqp_entry->qp1_qp = gsi_qp;
3188 
3189 	/* Find packet type from the cqe */
3190 
3191 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3192 					     cqe->raweth_qp1_flags2);
3193 	if (pkt_type < 0) {
3194 		ibdev_err(&rdev->ibdev, "Invalid packet\n");
3195 		return -EINVAL;
3196 	}
3197 
3198 	/* Adjust the offset for the user buffer and post in the rq */
3199 
3200 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3201 		offset = 20;
3202 
3203 	/*
3204 	 * QP1 loopback packet has 4 bytes of internal header before
3205 	 * ether header. Skip these four bytes.
3206 	 */
3207 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3208 		skip_bytes = 4;
3209 
3210 	/* First send SGE . Skip the ether header*/
3211 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3212 			+ skip_bytes;
3213 	s_sge[0].lkey = 0xFFFFFFFF;
3214 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3215 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3216 
3217 	/* Second Send SGE */
3218 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3219 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3220 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3221 		s_sge[1].addr += 8;
3222 	s_sge[1].lkey = 0xFFFFFFFF;
3223 	s_sge[1].length = 256;
3224 
3225 	/* First recv SGE */
3226 
3227 	r_sge[0].addr = shrq_hdr_buf_map;
3228 	r_sge[0].lkey = 0xFFFFFFFF;
3229 	r_sge[0].length = 40;
3230 
3231 	r_sge[1].addr = sqp_entry->sge.addr + offset;
3232 	r_sge[1].lkey = sqp_entry->sge.lkey;
3233 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3234 
3235 	/* Create receive work request */
3236 	rwr.num_sge = 2;
3237 	rwr.sg_list = r_sge;
3238 	rwr.wr_id = tbl_idx;
3239 	rwr.next = NULL;
3240 
3241 	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3242 	if (rc) {
3243 		ibdev_err(&rdev->ibdev,
3244 			  "Failed to post Rx buffers to shadow QP");
3245 		return -ENOMEM;
3246 	}
3247 
3248 	swr->num_sge = 2;
3249 	swr->sg_list = s_sge;
3250 	swr->wr_id = tbl_idx;
3251 	swr->opcode = IB_WR_SEND;
3252 	swr->next = NULL;
3253 	gsi_sah = rdev->gsi_ctx.gsi_sah;
3254 	udwr.ah = &gsi_sah->ib_ah;
3255 	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3256 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3257 
3258 	/* post data received  in the send queue */
3259 	rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3260 
3261 	return 0;
3262 }
3263 
3264 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3265 					  struct bnxt_qplib_cqe *cqe)
3266 {
3267 	wc->opcode = IB_WC_RECV;
3268 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3269 	wc->wc_flags |= IB_WC_GRH;
3270 }
3271 
3272 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3273 					u16 vlan_id)
3274 {
3275 	/*
3276 	 * Check if the vlan is configured in the host.  If not configured, it
3277 	 * can be a transparent VLAN. So dont report the vlan id.
3278 	 */
3279 	if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3280 				      htons(ETH_P_8021Q), vlan_id))
3281 		return false;
3282 	return true;
3283 }
3284 
3285 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3286 				u16 *vid, u8 *sl)
3287 {
3288 	bool ret = false;
3289 	u32 metadata;
3290 	u16 tpid;
3291 
3292 	metadata = orig_cqe->raweth_qp1_metadata;
3293 	if (orig_cqe->raweth_qp1_flags2 &
3294 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3295 		tpid = ((metadata &
3296 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3297 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3298 		if (tpid == ETH_P_8021Q) {
3299 			*vid = metadata &
3300 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3301 			*sl = (metadata &
3302 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3303 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3304 			ret = true;
3305 		}
3306 	}
3307 
3308 	return ret;
3309 }
3310 
3311 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3312 				      struct bnxt_qplib_cqe *cqe)
3313 {
3314 	wc->opcode = IB_WC_RECV;
3315 	wc->status = __rc_to_ib_wc_status(cqe->status);
3316 
3317 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3318 		wc->wc_flags |= IB_WC_WITH_IMM;
3319 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3320 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3321 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3322 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3323 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3324 }
3325 
3326 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3327 					     struct ib_wc *wc,
3328 					     struct bnxt_qplib_cqe *cqe)
3329 {
3330 	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3331 	struct bnxt_re_qp *gsi_qp = NULL;
3332 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3333 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3334 	int nw_type;
3335 	u32 tbl_idx;
3336 	u16 vlan_id;
3337 	u8 sl;
3338 
3339 	tbl_idx = cqe->wr_id;
3340 
3341 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3342 	gsi_qp = sqp_entry->qp1_qp;
3343 	orig_cqe = &sqp_entry->cqe;
3344 
3345 	wc->wr_id = sqp_entry->wrid;
3346 	wc->byte_len = orig_cqe->length;
3347 	wc->qp = &gsi_qp->ib_qp;
3348 
3349 	wc->ex.imm_data = orig_cqe->immdata;
3350 	wc->src_qp = orig_cqe->src_qp;
3351 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3352 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3353 		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3354 			wc->vlan_id = vlan_id;
3355 			wc->sl = sl;
3356 			wc->wc_flags |= IB_WC_WITH_VLAN;
3357 		}
3358 	}
3359 	wc->port_num = 1;
3360 	wc->vendor_err = orig_cqe->status;
3361 
3362 	wc->opcode = IB_WC_RECV;
3363 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3364 	wc->wc_flags |= IB_WC_GRH;
3365 
3366 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3367 					    orig_cqe->raweth_qp1_flags2);
3368 	if (nw_type >= 0) {
3369 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3370 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3371 	}
3372 }
3373 
3374 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3375 				      struct ib_wc *wc,
3376 				      struct bnxt_qplib_cqe *cqe)
3377 {
3378 	u8 nw_type;
3379 
3380 	wc->opcode = IB_WC_RECV;
3381 	wc->status = __rc_to_ib_wc_status(cqe->status);
3382 
3383 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3384 		wc->wc_flags |= IB_WC_WITH_IMM;
3385 	/* report only on GSI QP for Thor */
3386 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3387 		wc->wc_flags |= IB_WC_GRH;
3388 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
3389 		wc->wc_flags |= IB_WC_WITH_SMAC;
3390 		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3391 			wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3392 			if (wc->vlan_id < 0x1000)
3393 				wc->wc_flags |= IB_WC_WITH_VLAN;
3394 		}
3395 		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3396 			   CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3397 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3398 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3399 	}
3400 
3401 }
3402 
3403 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3404 {
3405 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3406 	unsigned long flags;
3407 	int rc = 0;
3408 
3409 	spin_lock_irqsave(&qp->sq_lock, flags);
3410 
3411 	rc = bnxt_re_bind_fence_mw(lib_qp);
3412 	if (!rc) {
3413 		lib_qp->sq.phantom_wqe_cnt++;
3414 		ibdev_dbg(&qp->rdev->ibdev,
3415 			  "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3416 			  lib_qp->id, lib_qp->sq.hwq.prod,
3417 			  HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3418 			  lib_qp->sq.phantom_wqe_cnt);
3419 	}
3420 
3421 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3422 	return rc;
3423 }
3424 
3425 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3426 {
3427 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3428 	struct bnxt_re_qp *qp, *sh_qp;
3429 	struct bnxt_qplib_cqe *cqe;
3430 	int i, ncqe, budget;
3431 	struct bnxt_qplib_q *sq;
3432 	struct bnxt_qplib_qp *lib_qp;
3433 	u32 tbl_idx;
3434 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3435 	unsigned long flags;
3436 
3437 	spin_lock_irqsave(&cq->cq_lock, flags);
3438 	budget = min_t(u32, num_entries, cq->max_cql);
3439 	num_entries = budget;
3440 	if (!cq->cql) {
3441 		ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3442 		goto exit;
3443 	}
3444 	cqe = &cq->cql[0];
3445 	while (budget) {
3446 		lib_qp = NULL;
3447 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3448 		if (lib_qp) {
3449 			sq = &lib_qp->sq;
3450 			if (sq->send_phantom) {
3451 				qp = container_of(lib_qp,
3452 						  struct bnxt_re_qp, qplib_qp);
3453 				if (send_phantom_wqe(qp) == -ENOMEM)
3454 					ibdev_err(&cq->rdev->ibdev,
3455 						  "Phantom failed! Scheduled to send again\n");
3456 				else
3457 					sq->send_phantom = false;
3458 			}
3459 		}
3460 		if (ncqe < budget)
3461 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3462 							      cqe + ncqe,
3463 							      budget - ncqe);
3464 
3465 		if (!ncqe)
3466 			break;
3467 
3468 		for (i = 0; i < ncqe; i++, cqe++) {
3469 			/* Transcribe each qplib_wqe back to ib_wc */
3470 			memset(wc, 0, sizeof(*wc));
3471 
3472 			wc->wr_id = cqe->wr_id;
3473 			wc->byte_len = cqe->length;
3474 			qp = container_of
3475 				((struct bnxt_qplib_qp *)
3476 				 (unsigned long)(cqe->qp_handle),
3477 				 struct bnxt_re_qp, qplib_qp);
3478 			if (!qp) {
3479 				ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
3480 				continue;
3481 			}
3482 			wc->qp = &qp->ib_qp;
3483 			wc->ex.imm_data = cqe->immdata;
3484 			wc->src_qp = cqe->src_qp;
3485 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3486 			wc->port_num = 1;
3487 			wc->vendor_err = cqe->status;
3488 
3489 			switch (cqe->opcode) {
3490 			case CQ_BASE_CQE_TYPE_REQ:
3491 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3492 				if (sh_qp &&
3493 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3494 					/* Handle this completion with
3495 					 * the stored completion
3496 					 */
3497 					memset(wc, 0, sizeof(*wc));
3498 					continue;
3499 				}
3500 				bnxt_re_process_req_wc(wc, cqe);
3501 				break;
3502 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3503 				if (!cqe->status) {
3504 					int rc = 0;
3505 
3506 					rc = bnxt_re_process_raw_qp_pkt_rx
3507 								(qp, cqe);
3508 					if (!rc) {
3509 						memset(wc, 0, sizeof(*wc));
3510 						continue;
3511 					}
3512 					cqe->status = -1;
3513 				}
3514 				/* Errors need not be looped back.
3515 				 * But change the wr_id to the one
3516 				 * stored in the table
3517 				 */
3518 				tbl_idx = cqe->wr_id;
3519 				sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3520 				wc->wr_id = sqp_entry->wrid;
3521 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3522 				break;
3523 			case CQ_BASE_CQE_TYPE_RES_RC:
3524 				bnxt_re_process_res_rc_wc(wc, cqe);
3525 				break;
3526 			case CQ_BASE_CQE_TYPE_RES_UD:
3527 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3528 				if (sh_qp &&
3529 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3530 					/* Handle this completion with
3531 					 * the stored completion
3532 					 */
3533 					if (cqe->status) {
3534 						continue;
3535 					} else {
3536 						bnxt_re_process_res_shadow_qp_wc
3537 								(qp, wc, cqe);
3538 						break;
3539 					}
3540 				}
3541 				bnxt_re_process_res_ud_wc(qp, wc, cqe);
3542 				break;
3543 			default:
3544 				ibdev_err(&cq->rdev->ibdev,
3545 					  "POLL CQ : type 0x%x not handled",
3546 					  cqe->opcode);
3547 				continue;
3548 			}
3549 			wc++;
3550 			budget--;
3551 		}
3552 	}
3553 exit:
3554 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3555 	return num_entries - budget;
3556 }
3557 
3558 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3559 			  enum ib_cq_notify_flags ib_cqn_flags)
3560 {
3561 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3562 	int type = 0, rc = 0;
3563 	unsigned long flags;
3564 
3565 	spin_lock_irqsave(&cq->cq_lock, flags);
3566 	/* Trigger on the very next completion */
3567 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3568 		type = DBC_DBC_TYPE_CQ_ARMALL;
3569 	/* Trigger on the next solicited completion */
3570 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3571 		type = DBC_DBC_TYPE_CQ_ARMSE;
3572 
3573 	/* Poll to see if there are missed events */
3574 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3575 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3576 		rc = 1;
3577 		goto exit;
3578 	}
3579 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3580 
3581 exit:
3582 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3583 	return rc;
3584 }
3585 
3586 /* Memory Regions */
3587 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3588 {
3589 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3590 	struct bnxt_re_dev *rdev = pd->rdev;
3591 	struct bnxt_re_mr *mr;
3592 	u64 pbl = 0;
3593 	int rc;
3594 
3595 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3596 	if (!mr)
3597 		return ERR_PTR(-ENOMEM);
3598 
3599 	mr->rdev = rdev;
3600 	mr->qplib_mr.pd = &pd->qplib_pd;
3601 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3602 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3603 
3604 	/* Allocate and register 0 as the address */
3605 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3606 	if (rc)
3607 		goto fail;
3608 
3609 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3610 	mr->qplib_mr.total_size = -1; /* Infinte length */
3611 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3612 			       PAGE_SIZE);
3613 	if (rc)
3614 		goto fail_mr;
3615 
3616 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3617 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3618 			       IB_ACCESS_REMOTE_ATOMIC))
3619 		mr->ib_mr.rkey = mr->ib_mr.lkey;
3620 	atomic_inc(&rdev->mr_count);
3621 
3622 	return &mr->ib_mr;
3623 
3624 fail_mr:
3625 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3626 fail:
3627 	kfree(mr);
3628 	return ERR_PTR(rc);
3629 }
3630 
3631 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3632 {
3633 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3634 	struct bnxt_re_dev *rdev = mr->rdev;
3635 	int rc;
3636 
3637 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3638 	if (rc) {
3639 		ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3640 		return rc;
3641 	}
3642 
3643 	if (mr->pages) {
3644 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3645 							&mr->qplib_frpl);
3646 		kfree(mr->pages);
3647 		mr->npages = 0;
3648 		mr->pages = NULL;
3649 	}
3650 	ib_umem_release(mr->ib_umem);
3651 
3652 	kfree(mr);
3653 	atomic_dec(&rdev->mr_count);
3654 	return rc;
3655 }
3656 
3657 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3658 {
3659 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3660 
3661 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3662 		return -ENOMEM;
3663 
3664 	mr->pages[mr->npages++] = addr;
3665 	return 0;
3666 }
3667 
3668 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3669 		      unsigned int *sg_offset)
3670 {
3671 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3672 
3673 	mr->npages = 0;
3674 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3675 }
3676 
3677 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3678 			       u32 max_num_sg)
3679 {
3680 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3681 	struct bnxt_re_dev *rdev = pd->rdev;
3682 	struct bnxt_re_mr *mr = NULL;
3683 	int rc;
3684 
3685 	if (type != IB_MR_TYPE_MEM_REG) {
3686 		ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3687 		return ERR_PTR(-EINVAL);
3688 	}
3689 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
3690 		return ERR_PTR(-EINVAL);
3691 
3692 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3693 	if (!mr)
3694 		return ERR_PTR(-ENOMEM);
3695 
3696 	mr->rdev = rdev;
3697 	mr->qplib_mr.pd = &pd->qplib_pd;
3698 	mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3699 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3700 
3701 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3702 	if (rc)
3703 		goto bail;
3704 
3705 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3706 	mr->ib_mr.rkey = mr->ib_mr.lkey;
3707 
3708 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3709 	if (!mr->pages) {
3710 		rc = -ENOMEM;
3711 		goto fail;
3712 	}
3713 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3714 						 &mr->qplib_frpl, max_num_sg);
3715 	if (rc) {
3716 		ibdev_err(&rdev->ibdev,
3717 			  "Failed to allocate HW FR page list");
3718 		goto fail_mr;
3719 	}
3720 
3721 	atomic_inc(&rdev->mr_count);
3722 	return &mr->ib_mr;
3723 
3724 fail_mr:
3725 	kfree(mr->pages);
3726 fail:
3727 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3728 bail:
3729 	kfree(mr);
3730 	return ERR_PTR(rc);
3731 }
3732 
3733 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3734 			       struct ib_udata *udata)
3735 {
3736 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3737 	struct bnxt_re_dev *rdev = pd->rdev;
3738 	struct bnxt_re_mw *mw;
3739 	int rc;
3740 
3741 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3742 	if (!mw)
3743 		return ERR_PTR(-ENOMEM);
3744 	mw->rdev = rdev;
3745 	mw->qplib_mw.pd = &pd->qplib_pd;
3746 
3747 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3748 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3749 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3750 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3751 	if (rc) {
3752 		ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3753 		goto fail;
3754 	}
3755 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
3756 
3757 	atomic_inc(&rdev->mw_count);
3758 	return &mw->ib_mw;
3759 
3760 fail:
3761 	kfree(mw);
3762 	return ERR_PTR(rc);
3763 }
3764 
3765 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3766 {
3767 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3768 	struct bnxt_re_dev *rdev = mw->rdev;
3769 	int rc;
3770 
3771 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3772 	if (rc) {
3773 		ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3774 		return rc;
3775 	}
3776 
3777 	kfree(mw);
3778 	atomic_dec(&rdev->mw_count);
3779 	return rc;
3780 }
3781 
3782 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3783 			     int page_shift)
3784 {
3785 	u64 *pbl_tbl = pbl_tbl_orig;
3786 	u64 page_size =  BIT_ULL(page_shift);
3787 	struct ib_block_iter biter;
3788 
3789 	rdma_umem_for_each_dma_block(umem, &biter, page_size)
3790 		*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
3791 
3792 	return pbl_tbl - pbl_tbl_orig;
3793 }
3794 
3795 /* uverbs */
3796 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3797 				  u64 virt_addr, int mr_access_flags,
3798 				  struct ib_udata *udata)
3799 {
3800 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3801 	struct bnxt_re_dev *rdev = pd->rdev;
3802 	struct bnxt_re_mr *mr;
3803 	struct ib_umem *umem;
3804 	u64 *pbl_tbl = NULL;
3805 	unsigned long page_size;
3806 	int umem_pgs, rc;
3807 
3808 	if (length > BNXT_RE_MAX_MR_SIZE) {
3809 		ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3810 			  length, BNXT_RE_MAX_MR_SIZE);
3811 		return ERR_PTR(-ENOMEM);
3812 	}
3813 
3814 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3815 	if (!mr)
3816 		return ERR_PTR(-ENOMEM);
3817 
3818 	mr->rdev = rdev;
3819 	mr->qplib_mr.pd = &pd->qplib_pd;
3820 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3821 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3822 
3823 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3824 	if (rc) {
3825 		ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3826 		goto free_mr;
3827 	}
3828 	/* The fixed portion of the rkey is the same as the lkey */
3829 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
3830 
3831 	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
3832 	if (IS_ERR(umem)) {
3833 		ibdev_err(&rdev->ibdev, "Failed to get umem");
3834 		rc = -EFAULT;
3835 		goto free_mrw;
3836 	}
3837 	mr->ib_umem = umem;
3838 
3839 	mr->qplib_mr.va = virt_addr;
3840 	page_size = ib_umem_find_best_pgsz(
3841 		umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
3842 	if (!page_size) {
3843 		ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3844 		rc = -EFAULT;
3845 		goto free_umem;
3846 	}
3847 	mr->qplib_mr.total_size = length;
3848 
3849 	if (page_size == BNXT_RE_PAGE_SIZE_4K &&
3850 	    length > BNXT_RE_MAX_MR_SIZE_LOW) {
3851 		ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
3852 			  length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3853 		rc = -EINVAL;
3854 		goto free_umem;
3855 	}
3856 
3857 	umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3858 	pbl_tbl = kcalloc(umem_pgs, sizeof(*pbl_tbl), GFP_KERNEL);
3859 	if (!pbl_tbl) {
3860 		rc = -ENOMEM;
3861 		goto free_umem;
3862 	}
3863 
3864 	/* Map umem buf ptrs to the PBL */
3865 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, order_base_2(page_size));
3866 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3867 			       umem_pgs, false, page_size);
3868 	if (rc) {
3869 		ibdev_err(&rdev->ibdev, "Failed to register user MR");
3870 		goto fail;
3871 	}
3872 
3873 	kfree(pbl_tbl);
3874 
3875 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3876 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
3877 	atomic_inc(&rdev->mr_count);
3878 
3879 	return &mr->ib_mr;
3880 fail:
3881 	kfree(pbl_tbl);
3882 free_umem:
3883 	ib_umem_release(umem);
3884 free_mrw:
3885 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3886 free_mr:
3887 	kfree(mr);
3888 	return ERR_PTR(rc);
3889 }
3890 
3891 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3892 {
3893 	struct ib_device *ibdev = ctx->device;
3894 	struct bnxt_re_ucontext *uctx =
3895 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3896 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3897 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3898 	struct bnxt_re_uctx_resp resp;
3899 	u32 chip_met_rev_num = 0;
3900 	int rc;
3901 
3902 	ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3903 
3904 	if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3905 		ibdev_dbg(ibdev, " is different from the device %d ",
3906 			  BNXT_RE_ABI_VERSION);
3907 		return -EPERM;
3908 	}
3909 
3910 	uctx->rdev = rdev;
3911 
3912 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3913 	if (!uctx->shpg) {
3914 		rc = -ENOMEM;
3915 		goto fail;
3916 	}
3917 	spin_lock_init(&uctx->sh_lock);
3918 
3919 	resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3920 	chip_met_rev_num = rdev->chip_ctx->chip_num;
3921 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3922 			     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3923 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3924 			     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3925 	resp.chip_id0 = chip_met_rev_num;
3926 	/* Future extension of chip info */
3927 	resp.chip_id1 = 0;
3928 	/*Temp, Use xa_alloc instead */
3929 	resp.dev_id = rdev->en_dev->pdev->devfn;
3930 	resp.max_qp = rdev->qplib_ctx.qpc_count;
3931 	resp.pg_size = PAGE_SIZE;
3932 	resp.cqe_sz = sizeof(struct cq_base);
3933 	resp.max_cqd = dev_attr->max_cq_wqes;
3934 	resp.rsvd    = 0;
3935 
3936 	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3937 	if (rc) {
3938 		ibdev_err(ibdev, "Failed to copy user context");
3939 		rc = -EFAULT;
3940 		goto cfail;
3941 	}
3942 
3943 	return 0;
3944 cfail:
3945 	free_page((unsigned long)uctx->shpg);
3946 	uctx->shpg = NULL;
3947 fail:
3948 	return rc;
3949 }
3950 
3951 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3952 {
3953 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3954 						   struct bnxt_re_ucontext,
3955 						   ib_uctx);
3956 
3957 	struct bnxt_re_dev *rdev = uctx->rdev;
3958 
3959 	if (uctx->shpg)
3960 		free_page((unsigned long)uctx->shpg);
3961 
3962 	if (uctx->dpi.dbr) {
3963 		/* Free DPI only if this is the first PD allocated by the
3964 		 * application and mark the context dpi as NULL
3965 		 */
3966 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3967 				       &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3968 		uctx->dpi.dbr = NULL;
3969 	}
3970 }
3971 
3972 /* Helper function to mmap the virtual memory from user app */
3973 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3974 {
3975 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3976 						   struct bnxt_re_ucontext,
3977 						   ib_uctx);
3978 	struct bnxt_re_dev *rdev = uctx->rdev;
3979 	u64 pfn;
3980 
3981 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3982 		return -EINVAL;
3983 
3984 	if (vma->vm_pgoff) {
3985 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3986 		if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3987 				       PAGE_SIZE, vma->vm_page_prot)) {
3988 			ibdev_err(&rdev->ibdev, "Failed to map DPI");
3989 			return -EAGAIN;
3990 		}
3991 	} else {
3992 		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3993 		if (remap_pfn_range(vma, vma->vm_start,
3994 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
3995 			ibdev_err(&rdev->ibdev, "Failed to map shared page");
3996 			return -EAGAIN;
3997 		}
3998 	}
3999 
4000 	return 0;
4001 }
4002