xref: /openbmc/linux/drivers/infiniband/hw/bnxt_re/ib_verbs.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 
52 #include "bnxt_ulp.h"
53 
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59 
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63 
64 static int __from_ib_access_flags(int iflags)
65 {
66 	int qflags = 0;
67 
68 	if (iflags & IB_ACCESS_LOCAL_WRITE)
69 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 	if (iflags & IB_ACCESS_REMOTE_READ)
71 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 	if (iflags & IB_ACCESS_REMOTE_WRITE)
73 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 	if (iflags & IB_ACCESS_MW_BIND)
77 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 	if (iflags & IB_ZERO_BASED)
79 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 	if (iflags & IB_ACCESS_ON_DEMAND)
81 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 	return qflags;
83 };
84 
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
86 {
87 	enum ib_access_flags iflags = 0;
88 
89 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 		iflags |= IB_ACCESS_LOCAL_WRITE;
91 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 		iflags |= IB_ACCESS_REMOTE_WRITE;
93 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 		iflags |= IB_ACCESS_REMOTE_READ;
95 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 		iflags |= IB_ACCESS_MW_BIND;
99 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 		iflags |= IB_ZERO_BASED;
101 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 		iflags |= IB_ACCESS_ON_DEMAND;
103 	return iflags;
104 };
105 
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 			     struct bnxt_qplib_sge *sg_list, int num)
108 {
109 	int i, total = 0;
110 
111 	for (i = 0; i < num; i++) {
112 		sg_list[i].addr = ib_sg_list[i].addr;
113 		sg_list[i].lkey = ib_sg_list[i].lkey;
114 		sg_list[i].size = ib_sg_list[i].length;
115 		total += sg_list[i].size;
116 	}
117 	return total;
118 }
119 
120 /* Device */
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122 {
123 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 	struct net_device *netdev = NULL;
125 
126 	rcu_read_lock();
127 	if (rdev)
128 		netdev = rdev->netdev;
129 	if (netdev)
130 		dev_hold(netdev);
131 
132 	rcu_read_unlock();
133 	return netdev;
134 }
135 
136 int bnxt_re_query_device(struct ib_device *ibdev,
137 			 struct ib_device_attr *ib_attr,
138 			 struct ib_udata *udata)
139 {
140 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142 
143 	memset(ib_attr, 0, sizeof(*ib_attr));
144 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145 	       min(sizeof(dev_attr->fw_ver),
146 		   sizeof(ib_attr->fw_ver)));
147 	bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148 			    (u8 *)&ib_attr->sys_image_guid);
149 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
151 
152 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154 	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155 	ib_attr->max_qp = dev_attr->max_qp;
156 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157 	ib_attr->device_cap_flags =
158 				    IB_DEVICE_CURR_QP_STATE_MOD
159 				    | IB_DEVICE_RC_RNR_NAK_GEN
160 				    | IB_DEVICE_SHUTDOWN_PORT
161 				    | IB_DEVICE_SYS_IMAGE_GUID
162 				    | IB_DEVICE_LOCAL_DMA_LKEY
163 				    | IB_DEVICE_RESIZE_MAX_WR
164 				    | IB_DEVICE_PORT_ACTIVE_EVENT
165 				    | IB_DEVICE_N_NOTIFY_CQ
166 				    | IB_DEVICE_MEM_WINDOW
167 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
168 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
169 	ib_attr->max_sge = dev_attr->max_qp_sges;
170 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171 	ib_attr->max_cq = dev_attr->max_cq;
172 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
173 	ib_attr->max_mr = dev_attr->max_mr;
174 	ib_attr->max_pd = dev_attr->max_pd;
175 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177 	ib_attr->atomic_cap = IB_ATOMIC_NONE;
178 	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
179 
180 	ib_attr->max_ee_rd_atom = 0;
181 	ib_attr->max_res_rd_atom = 0;
182 	ib_attr->max_ee_init_rd_atom = 0;
183 	ib_attr->max_ee = 0;
184 	ib_attr->max_rdd = 0;
185 	ib_attr->max_mw = dev_attr->max_mw;
186 	ib_attr->max_raw_ipv6_qp = 0;
187 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
188 	ib_attr->max_mcast_grp = 0;
189 	ib_attr->max_mcast_qp_attach = 0;
190 	ib_attr->max_total_mcast_qp_attach = 0;
191 	ib_attr->max_ah = dev_attr->max_ah;
192 
193 	ib_attr->max_fmr = 0;
194 	ib_attr->max_map_per_fmr = 0;
195 
196 	ib_attr->max_srq = dev_attr->max_srq;
197 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
198 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
199 
200 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
201 
202 	ib_attr->max_pkeys = 1;
203 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
204 	return 0;
205 }
206 
207 int bnxt_re_modify_device(struct ib_device *ibdev,
208 			  int device_modify_mask,
209 			  struct ib_device_modify *device_modify)
210 {
211 	switch (device_modify_mask) {
212 	case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
213 		/* Modify the GUID requires the modification of the GID table */
214 		/* GUID should be made as READ-ONLY */
215 		break;
216 	case IB_DEVICE_MODIFY_NODE_DESC:
217 		/* Node Desc should be made as READ-ONLY */
218 		break;
219 	default:
220 		break;
221 	}
222 	return 0;
223 }
224 
225 /* Port */
226 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
227 		       struct ib_port_attr *port_attr)
228 {
229 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
230 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
231 
232 	memset(port_attr, 0, sizeof(*port_attr));
233 
234 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
235 		port_attr->state = IB_PORT_ACTIVE;
236 		port_attr->phys_state = 5;
237 	} else {
238 		port_attr->state = IB_PORT_DOWN;
239 		port_attr->phys_state = 3;
240 	}
241 	port_attr->max_mtu = IB_MTU_4096;
242 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
243 	port_attr->gid_tbl_len = dev_attr->max_sgid;
244 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
245 				    IB_PORT_DEVICE_MGMT_SUP |
246 				    IB_PORT_VENDOR_CLASS_SUP |
247 				    IB_PORT_IP_BASED_GIDS;
248 
249 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
250 	port_attr->bad_pkey_cntr = 0;
251 	port_attr->qkey_viol_cntr = 0;
252 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
253 	port_attr->lid = 0;
254 	port_attr->sm_lid = 0;
255 	port_attr->lmc = 0;
256 	port_attr->max_vl_num = 4;
257 	port_attr->sm_sl = 0;
258 	port_attr->subnet_timeout = 0;
259 	port_attr->init_type_reply = 0;
260 	port_attr->active_speed = rdev->active_speed;
261 	port_attr->active_width = rdev->active_width;
262 
263 	return 0;
264 }
265 
266 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
267 			       struct ib_port_immutable *immutable)
268 {
269 	struct ib_port_attr port_attr;
270 
271 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
272 		return -EINVAL;
273 
274 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
275 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
276 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
277 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
278 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
279 	return 0;
280 }
281 
282 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
283 {
284 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
285 
286 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
287 		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
288 		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
289 }
290 
291 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
292 		       u16 index, u16 *pkey)
293 {
294 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
295 
296 	/* Ignore port_num */
297 
298 	memset(pkey, 0, sizeof(*pkey));
299 	return bnxt_qplib_get_pkey(&rdev->qplib_res,
300 				   &rdev->qplib_res.pkey_tbl, index, pkey);
301 }
302 
303 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
304 		      int index, union ib_gid *gid)
305 {
306 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
307 	int rc = 0;
308 
309 	/* Ignore port_num */
310 	memset(gid, 0, sizeof(*gid));
311 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
312 				 &rdev->qplib_res.sgid_tbl, index,
313 				 (struct bnxt_qplib_gid *)gid);
314 	return rc;
315 }
316 
317 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
318 		    unsigned int index, void **context)
319 {
320 	int rc = 0;
321 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
322 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
323 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
324 	struct bnxt_qplib_gid *gid_to_del;
325 
326 	/* Delete the entry from the hardware */
327 	ctx = *context;
328 	if (!ctx)
329 		return -EINVAL;
330 
331 	if (sgid_tbl && sgid_tbl->active) {
332 		if (ctx->idx >= sgid_tbl->max)
333 			return -EINVAL;
334 		gid_to_del = &sgid_tbl->tbl[ctx->idx];
335 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
336 		 * or via the ib_unregister_device path. In the former case QP1
337 		 * may not be destroyed yet, in which case just return as FW
338 		 * needs that entry to be present and will fail it's deletion.
339 		 * We could get invoked again after QP1 is destroyed OR get an
340 		 * ADD_GID call with a different GID value for the same index
341 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
342 		 */
343 		if (ctx->idx == 0 &&
344 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
345 		    ctx->refcnt == 1 && rdev->qp1_sqp) {
346 			dev_dbg(rdev_to_dev(rdev),
347 				"Trying to delete GID0 while QP1 is alive\n");
348 			return -EFAULT;
349 		}
350 		ctx->refcnt--;
351 		if (!ctx->refcnt) {
352 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
353 			if (rc) {
354 				dev_err(rdev_to_dev(rdev),
355 					"Failed to remove GID: %#x", rc);
356 			} else {
357 				ctx_tbl = sgid_tbl->ctx;
358 				ctx_tbl[ctx->idx] = NULL;
359 				kfree(ctx);
360 			}
361 		}
362 	} else {
363 		return -EINVAL;
364 	}
365 	return rc;
366 }
367 
368 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
369 		    unsigned int index, const union ib_gid *gid,
370 		    const struct ib_gid_attr *attr, void **context)
371 {
372 	int rc;
373 	u32 tbl_idx = 0;
374 	u16 vlan_id = 0xFFFF;
375 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
376 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
377 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
378 
379 	if ((attr->ndev) && is_vlan_dev(attr->ndev))
380 		vlan_id = vlan_dev_vlan_id(attr->ndev);
381 
382 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
383 				 rdev->qplib_res.netdev->dev_addr,
384 				 vlan_id, true, &tbl_idx);
385 	if (rc == -EALREADY) {
386 		ctx_tbl = sgid_tbl->ctx;
387 		ctx_tbl[tbl_idx]->refcnt++;
388 		*context = ctx_tbl[tbl_idx];
389 		return 0;
390 	}
391 
392 	if (rc < 0) {
393 		dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
394 		return rc;
395 	}
396 
397 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
398 	if (!ctx)
399 		return -ENOMEM;
400 	ctx_tbl = sgid_tbl->ctx;
401 	ctx->idx = tbl_idx;
402 	ctx->refcnt = 1;
403 	ctx_tbl[tbl_idx] = ctx;
404 	*context = ctx;
405 
406 	return rc;
407 }
408 
409 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
410 					    u8 port_num)
411 {
412 	return IB_LINK_LAYER_ETHERNET;
413 }
414 
415 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
416 
417 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
418 {
419 	struct bnxt_re_fence_data *fence = &pd->fence;
420 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
421 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
422 
423 	memset(wqe, 0, sizeof(*wqe));
424 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
425 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
426 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
427 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
428 	wqe->bind.zero_based = false;
429 	wqe->bind.parent_l_key = ib_mr->lkey;
430 	wqe->bind.va = (u64)(unsigned long)fence->va;
431 	wqe->bind.length = fence->size;
432 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
433 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
434 
435 	/* Save the initial rkey in fence structure for now;
436 	 * wqe->bind.r_key will be set at (re)bind time.
437 	 */
438 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
439 }
440 
441 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
442 {
443 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
444 					     qplib_qp);
445 	struct ib_pd *ib_pd = qp->ib_qp.pd;
446 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
447 	struct bnxt_re_fence_data *fence = &pd->fence;
448 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
449 	struct bnxt_qplib_swqe wqe;
450 	int rc;
451 
452 	memcpy(&wqe, fence_wqe, sizeof(wqe));
453 	wqe.bind.r_key = fence->bind_rkey;
454 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
455 
456 	dev_dbg(rdev_to_dev(qp->rdev),
457 		"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
458 		wqe.bind.r_key, qp->qplib_qp.id, pd);
459 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
460 	if (rc) {
461 		dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
462 		return rc;
463 	}
464 	bnxt_qplib_post_send_db(&qp->qplib_qp);
465 
466 	return rc;
467 }
468 
469 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
470 {
471 	struct bnxt_re_fence_data *fence = &pd->fence;
472 	struct bnxt_re_dev *rdev = pd->rdev;
473 	struct device *dev = &rdev->en_dev->pdev->dev;
474 	struct bnxt_re_mr *mr = fence->mr;
475 
476 	if (fence->mw) {
477 		bnxt_re_dealloc_mw(fence->mw);
478 		fence->mw = NULL;
479 	}
480 	if (mr) {
481 		if (mr->ib_mr.rkey)
482 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
483 					     true);
484 		if (mr->ib_mr.lkey)
485 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
486 		kfree(mr);
487 		fence->mr = NULL;
488 	}
489 	if (fence->dma_addr) {
490 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
491 				 DMA_BIDIRECTIONAL);
492 		fence->dma_addr = 0;
493 	}
494 }
495 
496 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
497 {
498 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
499 	struct bnxt_re_fence_data *fence = &pd->fence;
500 	struct bnxt_re_dev *rdev = pd->rdev;
501 	struct device *dev = &rdev->en_dev->pdev->dev;
502 	struct bnxt_re_mr *mr = NULL;
503 	dma_addr_t dma_addr = 0;
504 	struct ib_mw *mw;
505 	u64 pbl_tbl;
506 	int rc;
507 
508 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
509 				  DMA_BIDIRECTIONAL);
510 	rc = dma_mapping_error(dev, dma_addr);
511 	if (rc) {
512 		dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
513 		rc = -EIO;
514 		fence->dma_addr = 0;
515 		goto fail;
516 	}
517 	fence->dma_addr = dma_addr;
518 
519 	/* Allocate a MR */
520 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
521 	if (!mr) {
522 		rc = -ENOMEM;
523 		goto fail;
524 	}
525 	fence->mr = mr;
526 	mr->rdev = rdev;
527 	mr->qplib_mr.pd = &pd->qplib_pd;
528 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
529 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
530 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
531 	if (rc) {
532 		dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
533 		goto fail;
534 	}
535 
536 	/* Register MR */
537 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
538 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
539 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
540 	pbl_tbl = dma_addr;
541 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
542 			       BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
543 	if (rc) {
544 		dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
545 		goto fail;
546 	}
547 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
548 
549 	/* Create a fence MW only for kernel consumers */
550 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
551 	if (IS_ERR(mw)) {
552 		dev_err(rdev_to_dev(rdev),
553 			"Failed to create fence-MW for PD: %p\n", pd);
554 		rc = PTR_ERR(mw);
555 		goto fail;
556 	}
557 	fence->mw = mw;
558 
559 	bnxt_re_create_fence_wqe(pd);
560 	return 0;
561 
562 fail:
563 	bnxt_re_destroy_fence_mr(pd);
564 	return rc;
565 }
566 
567 /* Protection Domains */
568 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
569 {
570 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
571 	struct bnxt_re_dev *rdev = pd->rdev;
572 	int rc;
573 
574 	bnxt_re_destroy_fence_mr(pd);
575 
576 	if (pd->qplib_pd.id) {
577 		rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
578 					   &rdev->qplib_res.pd_tbl,
579 					   &pd->qplib_pd);
580 		if (rc)
581 			dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
582 	}
583 
584 	kfree(pd);
585 	return 0;
586 }
587 
588 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
589 			       struct ib_ucontext *ucontext,
590 			       struct ib_udata *udata)
591 {
592 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
593 	struct bnxt_re_ucontext *ucntx = container_of(ucontext,
594 						      struct bnxt_re_ucontext,
595 						      ib_uctx);
596 	struct bnxt_re_pd *pd;
597 	int rc;
598 
599 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
600 	if (!pd)
601 		return ERR_PTR(-ENOMEM);
602 
603 	pd->rdev = rdev;
604 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
605 		dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
606 		rc = -ENOMEM;
607 		goto fail;
608 	}
609 
610 	if (udata) {
611 		struct bnxt_re_pd_resp resp;
612 
613 		if (!ucntx->dpi.dbr) {
614 			/* Allocate DPI in alloc_pd to avoid failing of
615 			 * ibv_devinfo and family of application when DPIs
616 			 * are depleted.
617 			 */
618 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
619 						 &ucntx->dpi, ucntx)) {
620 				rc = -ENOMEM;
621 				goto dbfail;
622 			}
623 		}
624 
625 		resp.pdid = pd->qplib_pd.id;
626 		/* Still allow mapping this DBR to the new user PD. */
627 		resp.dpi = ucntx->dpi.dpi;
628 		resp.dbr = (u64)ucntx->dpi.umdbr;
629 
630 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
631 		if (rc) {
632 			dev_err(rdev_to_dev(rdev),
633 				"Failed to copy user response\n");
634 			goto dbfail;
635 		}
636 	}
637 
638 	if (!udata)
639 		if (bnxt_re_create_fence_mr(pd))
640 			dev_warn(rdev_to_dev(rdev),
641 				 "Failed to create Fence-MR\n");
642 	return &pd->ib_pd;
643 dbfail:
644 	(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
645 				    &pd->qplib_pd);
646 fail:
647 	kfree(pd);
648 	return ERR_PTR(rc);
649 }
650 
651 /* Address Handles */
652 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
653 {
654 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
655 	struct bnxt_re_dev *rdev = ah->rdev;
656 	int rc;
657 
658 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
659 	if (rc) {
660 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
661 		return rc;
662 	}
663 	kfree(ah);
664 	return 0;
665 }
666 
667 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
668 				struct rdma_ah_attr *ah_attr,
669 				struct ib_udata *udata)
670 {
671 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
672 	struct bnxt_re_dev *rdev = pd->rdev;
673 	struct bnxt_re_ah *ah;
674 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
675 	int rc;
676 	u8 nw_type;
677 
678 	struct ib_gid_attr sgid_attr;
679 
680 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
681 		dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
682 		return ERR_PTR(-EINVAL);
683 	}
684 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
685 	if (!ah)
686 		return ERR_PTR(-ENOMEM);
687 
688 	ah->rdev = rdev;
689 	ah->qplib_ah.pd = &pd->qplib_pd;
690 
691 	/* Supply the configuration for the HW */
692 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
693 	       sizeof(union ib_gid));
694 	/*
695 	 * If RoCE V2 is enabled, stack will have two entries for
696 	 * each GID entry. Avoiding this duplicte entry in HW. Dividing
697 	 * the GID index by 2 for RoCE V2
698 	 */
699 	ah->qplib_ah.sgid_index = grh->sgid_index / 2;
700 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
701 	ah->qplib_ah.traffic_class = grh->traffic_class;
702 	ah->qplib_ah.flow_label = grh->flow_label;
703 	ah->qplib_ah.hop_limit = grh->hop_limit;
704 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
705 	if (ib_pd->uobject &&
706 	    !rdma_is_multicast_addr((struct in6_addr *)
707 				    grh->dgid.raw) &&
708 	    !rdma_link_local_addr((struct in6_addr *)
709 				  grh->dgid.raw)) {
710 		union ib_gid sgid;
711 
712 		rc = ib_get_cached_gid(&rdev->ibdev, 1,
713 				       grh->sgid_index, &sgid,
714 				       &sgid_attr);
715 		if (rc) {
716 			dev_err(rdev_to_dev(rdev),
717 				"Failed to query gid at index %d",
718 				grh->sgid_index);
719 			goto fail;
720 		}
721 		if (sgid_attr.ndev)
722 			dev_put(sgid_attr.ndev);
723 		/* Get network header type for this GID */
724 		nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
725 		switch (nw_type) {
726 		case RDMA_NETWORK_IPV4:
727 			ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
728 			break;
729 		case RDMA_NETWORK_IPV6:
730 			ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
731 			break;
732 		default:
733 			ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
734 			break;
735 		}
736 	}
737 
738 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
739 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
740 	if (rc) {
741 		dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
742 		goto fail;
743 	}
744 
745 	/* Write AVID to shared page. */
746 	if (ib_pd->uobject) {
747 		struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
748 		struct bnxt_re_ucontext *uctx;
749 		unsigned long flag;
750 		u32 *wrptr;
751 
752 		uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
753 		spin_lock_irqsave(&uctx->sh_lock, flag);
754 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
755 		*wrptr = ah->qplib_ah.id;
756 		wmb(); /* make sure cache is updated. */
757 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
758 	}
759 
760 	return &ah->ib_ah;
761 
762 fail:
763 	kfree(ah);
764 	return ERR_PTR(rc);
765 }
766 
767 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
768 {
769 	return 0;
770 }
771 
772 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
773 {
774 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
775 
776 	ah_attr->type = ib_ah->type;
777 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
778 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
779 	rdma_ah_set_grh(ah_attr, NULL, 0,
780 			ah->qplib_ah.host_sgid_index,
781 			0, ah->qplib_ah.traffic_class);
782 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
783 	rdma_ah_set_port_num(ah_attr, 1);
784 	rdma_ah_set_static_rate(ah_attr, 0);
785 	return 0;
786 }
787 
788 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789 	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790 {
791 	unsigned long flags;
792 
793 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
794 	if (qp->rcq != qp->scq)
795 		spin_lock(&qp->rcq->cq_lock);
796 	else
797 		__acquire(&qp->rcq->cq_lock);
798 
799 	return flags;
800 }
801 
802 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803 			unsigned long flags)
804 	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805 {
806 	if (qp->rcq != qp->scq)
807 		spin_unlock(&qp->rcq->cq_lock);
808 	else
809 		__release(&qp->rcq->cq_lock);
810 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
811 }
812 
813 /* Queue Pairs */
814 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
815 {
816 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
817 	struct bnxt_re_dev *rdev = qp->rdev;
818 	int rc;
819 	unsigned int flags;
820 
821 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
822 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
823 	if (rc) {
824 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
825 		return rc;
826 	}
827 
828 	flags = bnxt_re_lock_cqs(qp);
829 	bnxt_qplib_clean_qp(&qp->qplib_qp);
830 	bnxt_re_unlock_cqs(qp, flags);
831 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
832 
833 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
834 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
835 					   &rdev->sqp_ah->qplib_ah);
836 		if (rc) {
837 			dev_err(rdev_to_dev(rdev),
838 				"Failed to destroy HW AH for shadow QP");
839 			return rc;
840 		}
841 
842 		bnxt_qplib_clean_qp(&qp->qplib_qp);
843 		rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
844 					   &rdev->qp1_sqp->qplib_qp);
845 		if (rc) {
846 			dev_err(rdev_to_dev(rdev),
847 				"Failed to destroy Shadow QP");
848 			return rc;
849 		}
850 		mutex_lock(&rdev->qp_lock);
851 		list_del(&rdev->qp1_sqp->list);
852 		atomic_dec(&rdev->qp_count);
853 		mutex_unlock(&rdev->qp_lock);
854 
855 		kfree(rdev->sqp_ah);
856 		kfree(rdev->qp1_sqp);
857 		rdev->qp1_sqp = NULL;
858 		rdev->sqp_ah = NULL;
859 	}
860 
861 	if (!IS_ERR_OR_NULL(qp->rumem))
862 		ib_umem_release(qp->rumem);
863 	if (!IS_ERR_OR_NULL(qp->sumem))
864 		ib_umem_release(qp->sumem);
865 
866 	mutex_lock(&rdev->qp_lock);
867 	list_del(&qp->list);
868 	atomic_dec(&rdev->qp_count);
869 	mutex_unlock(&rdev->qp_lock);
870 	kfree(qp);
871 	return 0;
872 }
873 
874 static u8 __from_ib_qp_type(enum ib_qp_type type)
875 {
876 	switch (type) {
877 	case IB_QPT_GSI:
878 		return CMDQ_CREATE_QP1_TYPE_GSI;
879 	case IB_QPT_RC:
880 		return CMDQ_CREATE_QP_TYPE_RC;
881 	case IB_QPT_UD:
882 		return CMDQ_CREATE_QP_TYPE_UD;
883 	default:
884 		return IB_QPT_MAX;
885 	}
886 }
887 
888 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
889 				struct bnxt_re_qp *qp, struct ib_udata *udata)
890 {
891 	struct bnxt_re_qp_req ureq;
892 	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
893 	struct ib_umem *umem;
894 	int bytes = 0;
895 	struct ib_ucontext *context = pd->ib_pd.uobject->context;
896 	struct bnxt_re_ucontext *cntx = container_of(context,
897 						     struct bnxt_re_ucontext,
898 						     ib_uctx);
899 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
900 		return -EFAULT;
901 
902 	bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
903 	/* Consider mapping PSN search memory only for RC QPs. */
904 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
905 		bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
906 	bytes = PAGE_ALIGN(bytes);
907 	umem = ib_umem_get(context, ureq.qpsva, bytes,
908 			   IB_ACCESS_LOCAL_WRITE, 1);
909 	if (IS_ERR(umem))
910 		return PTR_ERR(umem);
911 
912 	qp->sumem = umem;
913 	qplib_qp->sq.sglist = umem->sg_head.sgl;
914 	qplib_qp->sq.nmap = umem->nmap;
915 	qplib_qp->qp_handle = ureq.qp_handle;
916 
917 	if (!qp->qplib_qp.srq) {
918 		bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
919 		bytes = PAGE_ALIGN(bytes);
920 		umem = ib_umem_get(context, ureq.qprva, bytes,
921 				   IB_ACCESS_LOCAL_WRITE, 1);
922 		if (IS_ERR(umem))
923 			goto rqfail;
924 		qp->rumem = umem;
925 		qplib_qp->rq.sglist = umem->sg_head.sgl;
926 		qplib_qp->rq.nmap = umem->nmap;
927 	}
928 
929 	qplib_qp->dpi = &cntx->dpi;
930 	return 0;
931 rqfail:
932 	ib_umem_release(qp->sumem);
933 	qp->sumem = NULL;
934 	qplib_qp->sq.sglist = NULL;
935 	qplib_qp->sq.nmap = 0;
936 
937 	return PTR_ERR(umem);
938 }
939 
940 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
941 				(struct bnxt_re_pd *pd,
942 				 struct bnxt_qplib_res *qp1_res,
943 				 struct bnxt_qplib_qp *qp1_qp)
944 {
945 	struct bnxt_re_dev *rdev = pd->rdev;
946 	struct bnxt_re_ah *ah;
947 	union ib_gid sgid;
948 	int rc;
949 
950 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
951 	if (!ah)
952 		return NULL;
953 
954 	ah->rdev = rdev;
955 	ah->qplib_ah.pd = &pd->qplib_pd;
956 
957 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
958 	if (rc)
959 		goto fail;
960 
961 	/* supply the dgid data same as sgid */
962 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
963 	       sizeof(union ib_gid));
964 	ah->qplib_ah.sgid_index = 0;
965 
966 	ah->qplib_ah.traffic_class = 0;
967 	ah->qplib_ah.flow_label = 0;
968 	ah->qplib_ah.hop_limit = 1;
969 	ah->qplib_ah.sl = 0;
970 	/* Have DMAC same as SMAC */
971 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
972 
973 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
974 	if (rc) {
975 		dev_err(rdev_to_dev(rdev),
976 			"Failed to allocate HW AH for Shadow QP");
977 		goto fail;
978 	}
979 
980 	return ah;
981 
982 fail:
983 	kfree(ah);
984 	return NULL;
985 }
986 
987 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
988 				(struct bnxt_re_pd *pd,
989 				 struct bnxt_qplib_res *qp1_res,
990 				 struct bnxt_qplib_qp *qp1_qp)
991 {
992 	struct bnxt_re_dev *rdev = pd->rdev;
993 	struct bnxt_re_qp *qp;
994 	int rc;
995 
996 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
997 	if (!qp)
998 		return NULL;
999 
1000 	qp->rdev = rdev;
1001 
1002 	/* Initialize the shadow QP structure from the QP1 values */
1003 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1004 
1005 	qp->qplib_qp.pd = &pd->qplib_pd;
1006 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1007 	qp->qplib_qp.type = IB_QPT_UD;
1008 
1009 	qp->qplib_qp.max_inline_data = 0;
1010 	qp->qplib_qp.sig_type = true;
1011 
1012 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1013 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1014 	qp->qplib_qp.sq.max_sge = 2;
1015 	/* Q full delta can be 1 since it is internal QP */
1016 	qp->qplib_qp.sq.q_full_delta = 1;
1017 
1018 	qp->qplib_qp.scq = qp1_qp->scq;
1019 	qp->qplib_qp.rcq = qp1_qp->rcq;
1020 
1021 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1022 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1023 	/* Q full delta can be 1 since it is internal QP */
1024 	qp->qplib_qp.rq.q_full_delta = 1;
1025 
1026 	qp->qplib_qp.mtu = qp1_qp->mtu;
1027 
1028 	qp->qplib_qp.sq_hdr_buf_size = 0;
1029 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1030 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1031 
1032 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1033 	if (rc)
1034 		goto fail;
1035 
1036 	rdev->sqp_id = qp->qplib_qp.id;
1037 
1038 	spin_lock_init(&qp->sq_lock);
1039 	INIT_LIST_HEAD(&qp->list);
1040 	mutex_lock(&rdev->qp_lock);
1041 	list_add_tail(&qp->list, &rdev->qp_list);
1042 	atomic_inc(&rdev->qp_count);
1043 	mutex_unlock(&rdev->qp_lock);
1044 	return qp;
1045 fail:
1046 	kfree(qp);
1047 	return NULL;
1048 }
1049 
1050 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1051 				struct ib_qp_init_attr *qp_init_attr,
1052 				struct ib_udata *udata)
1053 {
1054 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1055 	struct bnxt_re_dev *rdev = pd->rdev;
1056 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1057 	struct bnxt_re_qp *qp;
1058 	struct bnxt_re_cq *cq;
1059 	struct bnxt_re_srq *srq;
1060 	int rc, entries;
1061 
1062 	if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1063 	    (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1064 	    (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1065 	    (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1066 	    (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1067 		return ERR_PTR(-EINVAL);
1068 
1069 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1070 	if (!qp)
1071 		return ERR_PTR(-ENOMEM);
1072 
1073 	qp->rdev = rdev;
1074 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1075 	qp->qplib_qp.pd = &pd->qplib_pd;
1076 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1077 	qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1078 	if (qp->qplib_qp.type == IB_QPT_MAX) {
1079 		dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1080 			qp->qplib_qp.type);
1081 		rc = -EINVAL;
1082 		goto fail;
1083 	}
1084 	qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1085 	qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1086 				  IB_SIGNAL_ALL_WR) ? true : false);
1087 
1088 	qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1089 	if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1090 		qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1091 
1092 	if (qp_init_attr->send_cq) {
1093 		cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1094 				  ib_cq);
1095 		if (!cq) {
1096 			dev_err(rdev_to_dev(rdev), "Send CQ not found");
1097 			rc = -EINVAL;
1098 			goto fail;
1099 		}
1100 		qp->qplib_qp.scq = &cq->qplib_cq;
1101 		qp->scq = cq;
1102 	}
1103 
1104 	if (qp_init_attr->recv_cq) {
1105 		cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1106 				  ib_cq);
1107 		if (!cq) {
1108 			dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1109 			rc = -EINVAL;
1110 			goto fail;
1111 		}
1112 		qp->qplib_qp.rcq = &cq->qplib_cq;
1113 		qp->rcq = cq;
1114 	}
1115 
1116 	if (qp_init_attr->srq) {
1117 		srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1118 				   ib_srq);
1119 		if (!srq) {
1120 			dev_err(rdev_to_dev(rdev), "SRQ not found");
1121 			rc = -EINVAL;
1122 			goto fail;
1123 		}
1124 		qp->qplib_qp.srq = &srq->qplib_srq;
1125 		qp->qplib_qp.rq.max_wqe = 0;
1126 	} else {
1127 		/* Allocate 1 more than what's provided so posting max doesn't
1128 		 * mean empty
1129 		 */
1130 		entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1131 		qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1132 						dev_attr->max_qp_wqes + 1);
1133 
1134 		qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1135 						qp_init_attr->cap.max_recv_wr;
1136 
1137 		qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1138 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1139 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1140 	}
1141 
1142 	qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1143 
1144 	if (qp_init_attr->qp_type == IB_QPT_GSI) {
1145 		/* Allocate 1 more than what's provided */
1146 		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1147 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1148 						dev_attr->max_qp_wqes + 1);
1149 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1150 						qp_init_attr->cap.max_send_wr;
1151 		qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1152 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1153 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1154 		qp->qplib_qp.sq.max_sge++;
1155 		if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1156 			qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1157 
1158 		qp->qplib_qp.rq_hdr_buf_size =
1159 					BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1160 
1161 		qp->qplib_qp.sq_hdr_buf_size =
1162 					BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1163 		qp->qplib_qp.dpi = &rdev->dpi_privileged;
1164 		rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1165 		if (rc) {
1166 			dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1167 			goto fail;
1168 		}
1169 		/* Create a shadow QP to handle the QP1 traffic */
1170 		rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1171 							 &qp->qplib_qp);
1172 		if (!rdev->qp1_sqp) {
1173 			rc = -EINVAL;
1174 			dev_err(rdev_to_dev(rdev),
1175 				"Failed to create Shadow QP for QP1");
1176 			goto qp_destroy;
1177 		}
1178 		rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1179 							   &qp->qplib_qp);
1180 		if (!rdev->sqp_ah) {
1181 			bnxt_qplib_destroy_qp(&rdev->qplib_res,
1182 					      &rdev->qp1_sqp->qplib_qp);
1183 			rc = -EINVAL;
1184 			dev_err(rdev_to_dev(rdev),
1185 				"Failed to create AH entry for ShadowQP");
1186 			goto qp_destroy;
1187 		}
1188 
1189 	} else {
1190 		/* Allocate 128 + 1 more than what's provided */
1191 		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1192 					     BNXT_QPLIB_RESERVED_QP_WRS + 1);
1193 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1194 						dev_attr->max_qp_wqes +
1195 						BNXT_QPLIB_RESERVED_QP_WRS + 1);
1196 		qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1197 
1198 		/*
1199 		 * Reserving one slot for Phantom WQE. Application can
1200 		 * post one extra entry in this case. But allowing this to avoid
1201 		 * unexpected Queue full condition
1202 		 */
1203 
1204 		qp->qplib_qp.sq.q_full_delta -= 1;
1205 
1206 		qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1207 		qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1208 		if (udata) {
1209 			rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1210 			if (rc)
1211 				goto fail;
1212 		} else {
1213 			qp->qplib_qp.dpi = &rdev->dpi_privileged;
1214 		}
1215 
1216 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1217 		if (rc) {
1218 			dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1219 			goto free_umem;
1220 		}
1221 	}
1222 
1223 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1224 	spin_lock_init(&qp->sq_lock);
1225 	spin_lock_init(&qp->rq_lock);
1226 
1227 	if (udata) {
1228 		struct bnxt_re_qp_resp resp;
1229 
1230 		resp.qpid = qp->ib_qp.qp_num;
1231 		resp.rsvd = 0;
1232 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1233 		if (rc) {
1234 			dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1235 			goto qp_destroy;
1236 		}
1237 	}
1238 	INIT_LIST_HEAD(&qp->list);
1239 	mutex_lock(&rdev->qp_lock);
1240 	list_add_tail(&qp->list, &rdev->qp_list);
1241 	atomic_inc(&rdev->qp_count);
1242 	mutex_unlock(&rdev->qp_lock);
1243 
1244 	return &qp->ib_qp;
1245 qp_destroy:
1246 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1247 free_umem:
1248 	if (udata) {
1249 		if (qp->rumem)
1250 			ib_umem_release(qp->rumem);
1251 		if (qp->sumem)
1252 			ib_umem_release(qp->sumem);
1253 	}
1254 fail:
1255 	kfree(qp);
1256 	return ERR_PTR(rc);
1257 }
1258 
1259 static u8 __from_ib_qp_state(enum ib_qp_state state)
1260 {
1261 	switch (state) {
1262 	case IB_QPS_RESET:
1263 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1264 	case IB_QPS_INIT:
1265 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1266 	case IB_QPS_RTR:
1267 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1268 	case IB_QPS_RTS:
1269 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1270 	case IB_QPS_SQD:
1271 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1272 	case IB_QPS_SQE:
1273 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1274 	case IB_QPS_ERR:
1275 	default:
1276 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1277 	}
1278 }
1279 
1280 static enum ib_qp_state __to_ib_qp_state(u8 state)
1281 {
1282 	switch (state) {
1283 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1284 		return IB_QPS_RESET;
1285 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1286 		return IB_QPS_INIT;
1287 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1288 		return IB_QPS_RTR;
1289 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1290 		return IB_QPS_RTS;
1291 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1292 		return IB_QPS_SQD;
1293 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1294 		return IB_QPS_SQE;
1295 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1296 	default:
1297 		return IB_QPS_ERR;
1298 	}
1299 }
1300 
1301 static u32 __from_ib_mtu(enum ib_mtu mtu)
1302 {
1303 	switch (mtu) {
1304 	case IB_MTU_256:
1305 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1306 	case IB_MTU_512:
1307 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1308 	case IB_MTU_1024:
1309 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1310 	case IB_MTU_2048:
1311 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1312 	case IB_MTU_4096:
1313 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1314 	default:
1315 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1316 	}
1317 }
1318 
1319 static enum ib_mtu __to_ib_mtu(u32 mtu)
1320 {
1321 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1322 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1323 		return IB_MTU_256;
1324 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1325 		return IB_MTU_512;
1326 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1327 		return IB_MTU_1024;
1328 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1329 		return IB_MTU_2048;
1330 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1331 		return IB_MTU_4096;
1332 	default:
1333 		return IB_MTU_2048;
1334 	}
1335 }
1336 
1337 /* Shared Receive Queues */
1338 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1339 {
1340 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1341 					       ib_srq);
1342 	struct bnxt_re_dev *rdev = srq->rdev;
1343 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1344 	struct bnxt_qplib_nq *nq = NULL;
1345 	int rc;
1346 
1347 	if (qplib_srq->cq)
1348 		nq = qplib_srq->cq->nq;
1349 	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1350 	if (rc) {
1351 		dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1352 		return rc;
1353 	}
1354 
1355 	if (srq->umem)
1356 		ib_umem_release(srq->umem);
1357 	kfree(srq);
1358 	atomic_dec(&rdev->srq_count);
1359 	if (nq)
1360 		nq->budget--;
1361 	return 0;
1362 }
1363 
1364 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1365 				 struct bnxt_re_pd *pd,
1366 				 struct bnxt_re_srq *srq,
1367 				 struct ib_udata *udata)
1368 {
1369 	struct bnxt_re_srq_req ureq;
1370 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1371 	struct ib_umem *umem;
1372 	int bytes = 0;
1373 	struct ib_ucontext *context = pd->ib_pd.uobject->context;
1374 	struct bnxt_re_ucontext *cntx = container_of(context,
1375 						     struct bnxt_re_ucontext,
1376 						     ib_uctx);
1377 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1378 		return -EFAULT;
1379 
1380 	bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1381 	bytes = PAGE_ALIGN(bytes);
1382 	umem = ib_umem_get(context, ureq.srqva, bytes,
1383 			   IB_ACCESS_LOCAL_WRITE, 1);
1384 	if (IS_ERR(umem))
1385 		return PTR_ERR(umem);
1386 
1387 	srq->umem = umem;
1388 	qplib_srq->nmap = umem->nmap;
1389 	qplib_srq->sglist = umem->sg_head.sgl;
1390 	qplib_srq->srq_handle = ureq.srq_handle;
1391 	qplib_srq->dpi = &cntx->dpi;
1392 
1393 	return 0;
1394 }
1395 
1396 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1397 				  struct ib_srq_init_attr *srq_init_attr,
1398 				  struct ib_udata *udata)
1399 {
1400 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1401 	struct bnxt_re_dev *rdev = pd->rdev;
1402 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1403 	struct bnxt_re_srq *srq;
1404 	struct bnxt_qplib_nq *nq = NULL;
1405 	int rc, entries;
1406 
1407 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1408 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1409 		rc = -EINVAL;
1410 		goto exit;
1411 	}
1412 
1413 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1414 		rc = -ENOTSUPP;
1415 		goto exit;
1416 	}
1417 
1418 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1419 	if (!srq) {
1420 		rc = -ENOMEM;
1421 		goto exit;
1422 	}
1423 	srq->rdev = rdev;
1424 	srq->qplib_srq.pd = &pd->qplib_pd;
1425 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1426 	/* Allocate 1 more than what's provided so posting max doesn't
1427 	 * mean empty
1428 	 */
1429 	entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1430 	if (entries > dev_attr->max_srq_wqes + 1)
1431 		entries = dev_attr->max_srq_wqes + 1;
1432 
1433 	srq->qplib_srq.max_wqe = entries;
1434 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1435 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1436 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1437 	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1438 	nq = &rdev->nq[0];
1439 
1440 	if (udata) {
1441 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1442 		if (rc)
1443 			goto fail;
1444 	}
1445 
1446 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1447 	if (rc) {
1448 		dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1449 		goto fail;
1450 	}
1451 
1452 	if (udata) {
1453 		struct bnxt_re_srq_resp resp;
1454 
1455 		resp.srqid = srq->qplib_srq.id;
1456 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1457 		if (rc) {
1458 			dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1459 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1460 					       &srq->qplib_srq);
1461 			goto exit;
1462 		}
1463 	}
1464 	if (nq)
1465 		nq->budget++;
1466 	atomic_inc(&rdev->srq_count);
1467 
1468 	return &srq->ib_srq;
1469 
1470 fail:
1471 	if (srq->umem)
1472 		ib_umem_release(srq->umem);
1473 	kfree(srq);
1474 exit:
1475 	return ERR_PTR(rc);
1476 }
1477 
1478 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1479 		       enum ib_srq_attr_mask srq_attr_mask,
1480 		       struct ib_udata *udata)
1481 {
1482 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1483 					       ib_srq);
1484 	struct bnxt_re_dev *rdev = srq->rdev;
1485 	int rc;
1486 
1487 	switch (srq_attr_mask) {
1488 	case IB_SRQ_MAX_WR:
1489 		/* SRQ resize is not supported */
1490 		break;
1491 	case IB_SRQ_LIMIT:
1492 		/* Change the SRQ threshold */
1493 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1494 			return -EINVAL;
1495 
1496 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1497 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1498 		if (rc) {
1499 			dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1500 			return rc;
1501 		}
1502 		/* On success, update the shadow */
1503 		srq->srq_limit = srq_attr->srq_limit;
1504 		/* No need to Build and send response back to udata */
1505 		break;
1506 	default:
1507 		dev_err(rdev_to_dev(rdev),
1508 			"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1509 		return -EINVAL;
1510 	}
1511 	return 0;
1512 }
1513 
1514 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1515 {
1516 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1517 					       ib_srq);
1518 	struct bnxt_re_srq tsrq;
1519 	struct bnxt_re_dev *rdev = srq->rdev;
1520 	int rc;
1521 
1522 	/* Get live SRQ attr */
1523 	tsrq.qplib_srq.id = srq->qplib_srq.id;
1524 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1525 	if (rc) {
1526 		dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1527 		return rc;
1528 	}
1529 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1530 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1531 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1532 
1533 	return 0;
1534 }
1535 
1536 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
1537 			  struct ib_recv_wr **bad_wr)
1538 {
1539 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1540 					       ib_srq);
1541 	struct bnxt_qplib_swqe wqe;
1542 	unsigned long flags;
1543 	int rc = 0, payload_sz = 0;
1544 
1545 	spin_lock_irqsave(&srq->lock, flags);
1546 	while (wr) {
1547 		/* Transcribe each ib_recv_wr to qplib_swqe */
1548 		wqe.num_sge = wr->num_sge;
1549 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
1550 					       wr->num_sge);
1551 		wqe.wr_id = wr->wr_id;
1552 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1553 
1554 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1555 		if (rc) {
1556 			*bad_wr = wr;
1557 			break;
1558 		}
1559 		wr = wr->next;
1560 	}
1561 	spin_unlock_irqrestore(&srq->lock, flags);
1562 
1563 	return rc;
1564 }
1565 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1566 				    struct bnxt_re_qp *qp1_qp,
1567 				    int qp_attr_mask)
1568 {
1569 	struct bnxt_re_qp *qp = rdev->qp1_sqp;
1570 	int rc = 0;
1571 
1572 	if (qp_attr_mask & IB_QP_STATE) {
1573 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1574 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1575 	}
1576 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1577 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1578 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1579 	}
1580 
1581 	if (qp_attr_mask & IB_QP_QKEY) {
1582 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1583 		/* Using a Random  QKEY */
1584 		qp->qplib_qp.qkey = 0x81818181;
1585 	}
1586 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1587 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1588 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1589 	}
1590 
1591 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1592 	if (rc)
1593 		dev_err(rdev_to_dev(rdev),
1594 			"Failed to modify Shadow QP for QP1");
1595 	return rc;
1596 }
1597 
1598 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1599 		      int qp_attr_mask, struct ib_udata *udata)
1600 {
1601 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1602 	struct bnxt_re_dev *rdev = qp->rdev;
1603 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1604 	enum ib_qp_state curr_qp_state, new_qp_state;
1605 	int rc, entries;
1606 	int status;
1607 	union ib_gid sgid;
1608 	struct ib_gid_attr sgid_attr;
1609 	unsigned int flags;
1610 	u8 nw_type;
1611 
1612 	qp->qplib_qp.modify_flags = 0;
1613 	if (qp_attr_mask & IB_QP_STATE) {
1614 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1615 		new_qp_state = qp_attr->qp_state;
1616 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1617 					ib_qp->qp_type, qp_attr_mask,
1618 					IB_LINK_LAYER_ETHERNET)) {
1619 			dev_err(rdev_to_dev(rdev),
1620 				"Invalid attribute mask: %#x specified ",
1621 				qp_attr_mask);
1622 			dev_err(rdev_to_dev(rdev),
1623 				"for qpn: %#x type: %#x",
1624 				ib_qp->qp_num, ib_qp->qp_type);
1625 			dev_err(rdev_to_dev(rdev),
1626 				"curr_qp_state=0x%x, new_qp_state=0x%x\n",
1627 				curr_qp_state, new_qp_state);
1628 			return -EINVAL;
1629 		}
1630 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1631 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1632 
1633 		if (!qp->sumem &&
1634 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1635 			dev_dbg(rdev_to_dev(rdev),
1636 				"Move QP = %p to flush list\n",
1637 				qp);
1638 			flags = bnxt_re_lock_cqs(qp);
1639 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1640 			bnxt_re_unlock_cqs(qp, flags);
1641 		}
1642 		if (!qp->sumem &&
1643 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1644 			dev_dbg(rdev_to_dev(rdev),
1645 				"Move QP = %p out of flush list\n",
1646 				qp);
1647 			flags = bnxt_re_lock_cqs(qp);
1648 			bnxt_qplib_clean_qp(&qp->qplib_qp);
1649 			bnxt_re_unlock_cqs(qp, flags);
1650 		}
1651 	}
1652 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1653 		qp->qplib_qp.modify_flags |=
1654 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1655 		qp->qplib_qp.en_sqd_async_notify = true;
1656 	}
1657 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1658 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1659 		qp->qplib_qp.access =
1660 			__from_ib_access_flags(qp_attr->qp_access_flags);
1661 		/* LOCAL_WRITE access must be set to allow RC receive */
1662 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1663 	}
1664 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1665 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1666 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1667 	}
1668 	if (qp_attr_mask & IB_QP_QKEY) {
1669 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1670 		qp->qplib_qp.qkey = qp_attr->qkey;
1671 	}
1672 	if (qp_attr_mask & IB_QP_AV) {
1673 		const struct ib_global_route *grh =
1674 			rdma_ah_read_grh(&qp_attr->ah_attr);
1675 
1676 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1677 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1678 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1679 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1680 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1681 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1682 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1683 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1684 		       sizeof(qp->qplib_qp.ah.dgid.data));
1685 		qp->qplib_qp.ah.flow_label = grh->flow_label;
1686 		/* If RoCE V2 is enabled, stack will have two entries for
1687 		 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1688 		 * the GID index by 2 for RoCE V2
1689 		 */
1690 		qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1691 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1692 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1693 		qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1694 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1695 		ether_addr_copy(qp->qplib_qp.ah.dmac,
1696 				qp_attr->ah_attr.roce.dmac);
1697 
1698 		status = ib_get_cached_gid(&rdev->ibdev, 1,
1699 					   grh->sgid_index,
1700 					   &sgid, &sgid_attr);
1701 		if (!status && sgid_attr.ndev) {
1702 			memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1703 			       ETH_ALEN);
1704 			dev_put(sgid_attr.ndev);
1705 			nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1706 							 &sgid);
1707 			switch (nw_type) {
1708 			case RDMA_NETWORK_IPV4:
1709 				qp->qplib_qp.nw_type =
1710 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1711 				break;
1712 			case RDMA_NETWORK_IPV6:
1713 				qp->qplib_qp.nw_type =
1714 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1715 				break;
1716 			default:
1717 				qp->qplib_qp.nw_type =
1718 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1719 				break;
1720 			}
1721 		}
1722 	}
1723 
1724 	if (qp_attr_mask & IB_QP_PATH_MTU) {
1725 		qp->qplib_qp.modify_flags |=
1726 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1727 		qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1728 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1729 	} else if (qp_attr->qp_state == IB_QPS_RTR) {
1730 		qp->qplib_qp.modify_flags |=
1731 			CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1732 		qp->qplib_qp.path_mtu =
1733 			__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1734 		qp->qplib_qp.mtu =
1735 			ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1736 	}
1737 
1738 	if (qp_attr_mask & IB_QP_TIMEOUT) {
1739 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1740 		qp->qplib_qp.timeout = qp_attr->timeout;
1741 	}
1742 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
1743 		qp->qplib_qp.modify_flags |=
1744 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1745 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1746 	}
1747 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
1748 		qp->qplib_qp.modify_flags |=
1749 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1750 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1751 	}
1752 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1753 		qp->qplib_qp.modify_flags |=
1754 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1755 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1756 	}
1757 	if (qp_attr_mask & IB_QP_RQ_PSN) {
1758 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1759 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1760 	}
1761 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1762 		qp->qplib_qp.modify_flags |=
1763 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1764 		/* Cap the max_rd_atomic to device max */
1765 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1766 						   dev_attr->max_qp_rd_atom);
1767 	}
1768 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1769 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1770 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1771 	}
1772 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1773 		if (qp_attr->max_dest_rd_atomic >
1774 		    dev_attr->max_qp_init_rd_atom) {
1775 			dev_err(rdev_to_dev(rdev),
1776 				"max_dest_rd_atomic requested%d is > dev_max%d",
1777 				qp_attr->max_dest_rd_atomic,
1778 				dev_attr->max_qp_init_rd_atom);
1779 			return -EINVAL;
1780 		}
1781 
1782 		qp->qplib_qp.modify_flags |=
1783 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1784 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1785 	}
1786 	if (qp_attr_mask & IB_QP_CAP) {
1787 		qp->qplib_qp.modify_flags |=
1788 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1789 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1790 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1791 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1792 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1793 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1794 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1795 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1796 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1797 		    (qp_attr->cap.max_inline_data >=
1798 						dev_attr->max_inline_data)) {
1799 			dev_err(rdev_to_dev(rdev),
1800 				"Create QP failed - max exceeded");
1801 			return -EINVAL;
1802 		}
1803 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1804 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1805 						dev_attr->max_qp_wqes + 1);
1806 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1807 						qp_attr->cap.max_send_wr;
1808 		/*
1809 		 * Reserving one slot for Phantom WQE. Some application can
1810 		 * post one extra entry in this case. Allowing this to avoid
1811 		 * unexpected Queue full condition
1812 		 */
1813 		qp->qplib_qp.sq.q_full_delta -= 1;
1814 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1815 		if (qp->qplib_qp.rq.max_wqe) {
1816 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1817 			qp->qplib_qp.rq.max_wqe =
1818 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1819 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1820 						       qp_attr->cap.max_recv_wr;
1821 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1822 		} else {
1823 			/* SRQ was used prior, just ignore the RQ caps */
1824 		}
1825 	}
1826 	if (qp_attr_mask & IB_QP_DEST_QPN) {
1827 		qp->qplib_qp.modify_flags |=
1828 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1829 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1830 	}
1831 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1832 	if (rc) {
1833 		dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1834 		return rc;
1835 	}
1836 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1837 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1838 	return rc;
1839 }
1840 
1841 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1842 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1843 {
1844 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1845 	struct bnxt_re_dev *rdev = qp->rdev;
1846 	struct bnxt_qplib_qp *qplib_qp;
1847 	int rc;
1848 
1849 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1850 	if (!qplib_qp)
1851 		return -ENOMEM;
1852 
1853 	qplib_qp->id = qp->qplib_qp.id;
1854 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1855 
1856 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1857 	if (rc) {
1858 		dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1859 		goto out;
1860 	}
1861 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1862 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1863 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1864 	qp_attr->pkey_index = qplib_qp->pkey_index;
1865 	qp_attr->qkey = qplib_qp->qkey;
1866 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1867 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1868 			qplib_qp->ah.host_sgid_index,
1869 			qplib_qp->ah.hop_limit,
1870 			qplib_qp->ah.traffic_class);
1871 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1872 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1873 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1874 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1875 	qp_attr->timeout = qplib_qp->timeout;
1876 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
1877 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
1878 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1879 	qp_attr->rq_psn = qplib_qp->rq.psn;
1880 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1881 	qp_attr->sq_psn = qplib_qp->sq.psn;
1882 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1883 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1884 							 IB_SIGNAL_REQ_WR;
1885 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1886 
1887 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1888 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1889 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1890 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1891 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1892 	qp_init_attr->cap = qp_attr->cap;
1893 
1894 out:
1895 	kfree(qplib_qp);
1896 	return rc;
1897 }
1898 
1899 /* Routine for sending QP1 packets for RoCE V1 an V2
1900  */
1901 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1902 				     struct ib_send_wr *wr,
1903 				     struct bnxt_qplib_swqe *wqe,
1904 				     int payload_size)
1905 {
1906 	struct ib_device *ibdev = &qp->rdev->ibdev;
1907 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1908 					     ib_ah);
1909 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1910 	struct bnxt_qplib_sge sge;
1911 	union ib_gid sgid;
1912 	u8 nw_type;
1913 	u16 ether_type;
1914 	struct ib_gid_attr sgid_attr;
1915 	union ib_gid dgid;
1916 	bool is_eth = false;
1917 	bool is_vlan = false;
1918 	bool is_grh = false;
1919 	bool is_udp = false;
1920 	u8 ip_version = 0;
1921 	u16 vlan_id = 0xFFFF;
1922 	void *buf;
1923 	int i, rc = 0;
1924 
1925 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1926 
1927 	rc = ib_get_cached_gid(ibdev, 1,
1928 			       qplib_ah->host_sgid_index, &sgid,
1929 			       &sgid_attr);
1930 	if (rc) {
1931 		dev_err(rdev_to_dev(qp->rdev),
1932 			"Failed to query gid at index %d",
1933 			qplib_ah->host_sgid_index);
1934 		return rc;
1935 	}
1936 	if (sgid_attr.ndev) {
1937 		if (is_vlan_dev(sgid_attr.ndev))
1938 			vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1939 		dev_put(sgid_attr.ndev);
1940 	}
1941 	/* Get network header type for this GID */
1942 	nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1943 	switch (nw_type) {
1944 	case RDMA_NETWORK_IPV4:
1945 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1946 		break;
1947 	case RDMA_NETWORK_IPV6:
1948 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1949 		break;
1950 	default:
1951 		nw_type = BNXT_RE_ROCE_V1_PACKET;
1952 		break;
1953 	}
1954 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1955 	is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1956 	if (is_udp) {
1957 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1958 			ip_version = 4;
1959 			ether_type = ETH_P_IP;
1960 		} else {
1961 			ip_version = 6;
1962 			ether_type = ETH_P_IPV6;
1963 		}
1964 		is_grh = false;
1965 	} else {
1966 		ether_type = ETH_P_IBOE;
1967 		is_grh = true;
1968 	}
1969 
1970 	is_eth = true;
1971 	is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1972 
1973 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1974 			  ip_version, is_udp, 0, &qp->qp1_hdr);
1975 
1976 	/* ETH */
1977 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1978 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1979 
1980 	/* For vlan, check the sgid for vlan existence */
1981 
1982 	if (!is_vlan) {
1983 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1984 	} else {
1985 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1986 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1987 	}
1988 
1989 	if (is_grh || (ip_version == 6)) {
1990 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1991 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1992 		       sizeof(sgid));
1993 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1994 	}
1995 
1996 	if (ip_version == 4) {
1997 		qp->qp1_hdr.ip4.tos = 0;
1998 		qp->qp1_hdr.ip4.id = 0;
1999 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2000 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2001 
2002 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
2003 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2004 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2005 	}
2006 
2007 	if (is_udp) {
2008 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2009 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
2010 		qp->qp1_hdr.udp.csum = 0;
2011 	}
2012 
2013 	/* BTH */
2014 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2015 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2016 		qp->qp1_hdr.immediate_present = 1;
2017 	} else {
2018 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2019 	}
2020 	if (wr->send_flags & IB_SEND_SOLICITED)
2021 		qp->qp1_hdr.bth.solicited_event = 1;
2022 	/* pad_count */
2023 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2024 
2025 	/* P_key for QP1 is for all members */
2026 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2027 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2028 	qp->qp1_hdr.bth.ack_req = 0;
2029 	qp->send_psn++;
2030 	qp->send_psn &= BTH_PSN_MASK;
2031 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2032 	/* DETH */
2033 	/* Use the priviledged Q_Key for QP1 */
2034 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2035 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2036 
2037 	/* Pack the QP1 to the transmit buffer */
2038 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2039 	if (buf) {
2040 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2041 		for (i = wqe->num_sge; i; i--) {
2042 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2043 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2044 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2045 		}
2046 
2047 		/*
2048 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2049 		 * which is same as the QP1 SQ header buffer.
2050 		 * Header buf size for IPV4 RoCE V2 can be 66.
2051 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2052 		 * Subtract 20 bytes from QP1 SQ header buf size
2053 		 */
2054 		if (is_udp && ip_version == 4)
2055 			sge.size -= 20;
2056 		/*
2057 		 * Max Header buf size for RoCE V1 is 78.
2058 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2059 		 * Subtract 8 bytes from QP1 SQ header buf size
2060 		 */
2061 		if (!is_udp)
2062 			sge.size -= 8;
2063 
2064 		/* Subtract 4 bytes for non vlan packets */
2065 		if (!is_vlan)
2066 			sge.size -= 4;
2067 
2068 		wqe->sg_list[0].addr = sge.addr;
2069 		wqe->sg_list[0].lkey = sge.lkey;
2070 		wqe->sg_list[0].size = sge.size;
2071 		wqe->num_sge++;
2072 
2073 	} else {
2074 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2075 		rc = -ENOMEM;
2076 	}
2077 	return rc;
2078 }
2079 
2080 /* For the MAD layer, it only provides the recv SGE the size of
2081  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2082  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2083  * receive packet (334 bytes) with no VLAN and then copy the GRH
2084  * and the MAD datagram out to the provided SGE.
2085  */
2086 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2087 					    struct ib_recv_wr *wr,
2088 					    struct bnxt_qplib_swqe *wqe,
2089 					    int payload_size)
2090 {
2091 	struct bnxt_qplib_sge ref, sge;
2092 	u32 rq_prod_index;
2093 	struct bnxt_re_sqp_entries *sqp_entry;
2094 
2095 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2096 
2097 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2098 		return -ENOMEM;
2099 
2100 	/* Create 1 SGE to receive the entire
2101 	 * ethernet packet
2102 	 */
2103 	/* Save the reference from ULP */
2104 	ref.addr = wqe->sg_list[0].addr;
2105 	ref.lkey = wqe->sg_list[0].lkey;
2106 	ref.size = wqe->sg_list[0].size;
2107 
2108 	sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2109 
2110 	/* SGE 1 */
2111 	wqe->sg_list[0].addr = sge.addr;
2112 	wqe->sg_list[0].lkey = sge.lkey;
2113 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2114 	sge.size -= wqe->sg_list[0].size;
2115 
2116 	sqp_entry->sge.addr = ref.addr;
2117 	sqp_entry->sge.lkey = ref.lkey;
2118 	sqp_entry->sge.size = ref.size;
2119 	/* Store the wrid for reporting completion */
2120 	sqp_entry->wrid = wqe->wr_id;
2121 	/* change the wqe->wrid to table index */
2122 	wqe->wr_id = rq_prod_index;
2123 	return 0;
2124 }
2125 
2126 static int is_ud_qp(struct bnxt_re_qp *qp)
2127 {
2128 	return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2129 }
2130 
2131 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2132 				  struct ib_send_wr *wr,
2133 				  struct bnxt_qplib_swqe *wqe)
2134 {
2135 	struct bnxt_re_ah *ah = NULL;
2136 
2137 	if (is_ud_qp(qp)) {
2138 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2139 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2140 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2141 		wqe->send.avid = ah->qplib_ah.id;
2142 	}
2143 	switch (wr->opcode) {
2144 	case IB_WR_SEND:
2145 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2146 		break;
2147 	case IB_WR_SEND_WITH_IMM:
2148 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2149 		wqe->send.imm_data = wr->ex.imm_data;
2150 		break;
2151 	case IB_WR_SEND_WITH_INV:
2152 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2153 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2154 		break;
2155 	default:
2156 		return -EINVAL;
2157 	}
2158 	if (wr->send_flags & IB_SEND_SIGNALED)
2159 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2160 	if (wr->send_flags & IB_SEND_FENCE)
2161 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2162 	if (wr->send_flags & IB_SEND_SOLICITED)
2163 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2164 	if (wr->send_flags & IB_SEND_INLINE)
2165 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2166 
2167 	return 0;
2168 }
2169 
2170 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
2171 				  struct bnxt_qplib_swqe *wqe)
2172 {
2173 	switch (wr->opcode) {
2174 	case IB_WR_RDMA_WRITE:
2175 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2176 		break;
2177 	case IB_WR_RDMA_WRITE_WITH_IMM:
2178 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2179 		wqe->rdma.imm_data = wr->ex.imm_data;
2180 		break;
2181 	case IB_WR_RDMA_READ:
2182 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2183 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2184 		break;
2185 	default:
2186 		return -EINVAL;
2187 	}
2188 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2189 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2190 	if (wr->send_flags & IB_SEND_SIGNALED)
2191 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2192 	if (wr->send_flags & IB_SEND_FENCE)
2193 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2194 	if (wr->send_flags & IB_SEND_SOLICITED)
2195 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2196 	if (wr->send_flags & IB_SEND_INLINE)
2197 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2198 
2199 	return 0;
2200 }
2201 
2202 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
2203 				    struct bnxt_qplib_swqe *wqe)
2204 {
2205 	switch (wr->opcode) {
2206 	case IB_WR_ATOMIC_CMP_AND_SWP:
2207 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2208 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2209 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2210 		break;
2211 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2212 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2213 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2214 		break;
2215 	default:
2216 		return -EINVAL;
2217 	}
2218 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2219 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2220 	if (wr->send_flags & IB_SEND_SIGNALED)
2221 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2222 	if (wr->send_flags & IB_SEND_FENCE)
2223 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2224 	if (wr->send_flags & IB_SEND_SOLICITED)
2225 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2226 	return 0;
2227 }
2228 
2229 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2230 				 struct bnxt_qplib_swqe *wqe)
2231 {
2232 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2233 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2234 
2235 	/* Need unconditional fence for local invalidate
2236 	 * opcode to work as expected.
2237 	 */
2238 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2239 
2240 	if (wr->send_flags & IB_SEND_SIGNALED)
2241 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2242 	if (wr->send_flags & IB_SEND_SOLICITED)
2243 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2244 
2245 	return 0;
2246 }
2247 
2248 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2249 				 struct bnxt_qplib_swqe *wqe)
2250 {
2251 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2252 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2253 	int access = wr->access;
2254 
2255 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2256 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2257 	wqe->frmr.page_list = mr->pages;
2258 	wqe->frmr.page_list_len = mr->npages;
2259 	wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2260 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2261 
2262 	/* Need unconditional fence for reg_mr
2263 	 * opcode to function as expected.
2264 	 */
2265 
2266 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2267 
2268 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2269 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2270 
2271 	if (access & IB_ACCESS_LOCAL_WRITE)
2272 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2273 	if (access & IB_ACCESS_REMOTE_READ)
2274 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2275 	if (access & IB_ACCESS_REMOTE_WRITE)
2276 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2277 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2278 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2279 	if (access & IB_ACCESS_MW_BIND)
2280 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2281 
2282 	wqe->frmr.l_key = wr->key;
2283 	wqe->frmr.length = wr->mr->length;
2284 	wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2285 	wqe->frmr.va = wr->mr->iova;
2286 	return 0;
2287 }
2288 
2289 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2290 				    struct ib_send_wr *wr,
2291 				    struct bnxt_qplib_swqe *wqe)
2292 {
2293 	/*  Copy the inline data to the data  field */
2294 	u8 *in_data;
2295 	u32 i, sge_len;
2296 	void *sge_addr;
2297 
2298 	in_data = wqe->inline_data;
2299 	for (i = 0; i < wr->num_sge; i++) {
2300 		sge_addr = (void *)(unsigned long)
2301 				wr->sg_list[i].addr;
2302 		sge_len = wr->sg_list[i].length;
2303 
2304 		if ((sge_len + wqe->inline_len) >
2305 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2306 			dev_err(rdev_to_dev(rdev),
2307 				"Inline data size requested > supported value");
2308 			return -EINVAL;
2309 		}
2310 		sge_len = wr->sg_list[i].length;
2311 
2312 		memcpy(in_data, sge_addr, sge_len);
2313 		in_data += wr->sg_list[i].length;
2314 		wqe->inline_len += wr->sg_list[i].length;
2315 	}
2316 	return wqe->inline_len;
2317 }
2318 
2319 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2320 				   struct ib_send_wr *wr,
2321 				   struct bnxt_qplib_swqe *wqe)
2322 {
2323 	int payload_sz = 0;
2324 
2325 	if (wr->send_flags & IB_SEND_INLINE)
2326 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2327 	else
2328 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2329 					       wqe->num_sge);
2330 
2331 	return payload_sz;
2332 }
2333 
2334 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2335 {
2336 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2337 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2338 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2339 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2340 		int qp_attr_mask;
2341 		struct ib_qp_attr qp_attr;
2342 
2343 		qp_attr_mask = IB_QP_STATE;
2344 		qp_attr.qp_state = IB_QPS_RTS;
2345 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2346 		qp->qplib_qp.wqe_cnt = 0;
2347 	}
2348 }
2349 
2350 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2351 				       struct bnxt_re_qp *qp,
2352 				struct ib_send_wr *wr)
2353 {
2354 	struct bnxt_qplib_swqe wqe;
2355 	int rc = 0, payload_sz = 0;
2356 	unsigned long flags;
2357 
2358 	spin_lock_irqsave(&qp->sq_lock, flags);
2359 	memset(&wqe, 0, sizeof(wqe));
2360 	while (wr) {
2361 		/* House keeping */
2362 		memset(&wqe, 0, sizeof(wqe));
2363 
2364 		/* Common */
2365 		wqe.num_sge = wr->num_sge;
2366 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2367 			dev_err(rdev_to_dev(rdev),
2368 				"Limit exceeded for Send SGEs");
2369 			rc = -EINVAL;
2370 			goto bad;
2371 		}
2372 
2373 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2374 		if (payload_sz < 0) {
2375 			rc = -EINVAL;
2376 			goto bad;
2377 		}
2378 		wqe.wr_id = wr->wr_id;
2379 
2380 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2381 
2382 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2383 		if (!rc)
2384 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2385 bad:
2386 		if (rc) {
2387 			dev_err(rdev_to_dev(rdev),
2388 				"Post send failed opcode = %#x rc = %d",
2389 				wr->opcode, rc);
2390 			break;
2391 		}
2392 		wr = wr->next;
2393 	}
2394 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2395 	bnxt_ud_qp_hw_stall_workaround(qp);
2396 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2397 	return rc;
2398 }
2399 
2400 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2401 		      struct ib_send_wr **bad_wr)
2402 {
2403 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2404 	struct bnxt_qplib_swqe wqe;
2405 	int rc = 0, payload_sz = 0;
2406 	unsigned long flags;
2407 
2408 	spin_lock_irqsave(&qp->sq_lock, flags);
2409 	while (wr) {
2410 		/* House keeping */
2411 		memset(&wqe, 0, sizeof(wqe));
2412 
2413 		/* Common */
2414 		wqe.num_sge = wr->num_sge;
2415 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2416 			dev_err(rdev_to_dev(qp->rdev),
2417 				"Limit exceeded for Send SGEs");
2418 			rc = -EINVAL;
2419 			goto bad;
2420 		}
2421 
2422 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2423 		if (payload_sz < 0) {
2424 			rc = -EINVAL;
2425 			goto bad;
2426 		}
2427 		wqe.wr_id = wr->wr_id;
2428 
2429 		switch (wr->opcode) {
2430 		case IB_WR_SEND:
2431 		case IB_WR_SEND_WITH_IMM:
2432 			if (ib_qp->qp_type == IB_QPT_GSI) {
2433 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2434 							       payload_sz);
2435 				if (rc)
2436 					goto bad;
2437 				wqe.rawqp1.lflags |=
2438 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2439 			}
2440 			switch (wr->send_flags) {
2441 			case IB_SEND_IP_CSUM:
2442 				wqe.rawqp1.lflags |=
2443 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2444 				break;
2445 			default:
2446 				break;
2447 			}
2448 			/* Fall thru to build the wqe */
2449 		case IB_WR_SEND_WITH_INV:
2450 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2451 			break;
2452 		case IB_WR_RDMA_WRITE:
2453 		case IB_WR_RDMA_WRITE_WITH_IMM:
2454 		case IB_WR_RDMA_READ:
2455 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2456 			break;
2457 		case IB_WR_ATOMIC_CMP_AND_SWP:
2458 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2459 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2460 			break;
2461 		case IB_WR_RDMA_READ_WITH_INV:
2462 			dev_err(rdev_to_dev(qp->rdev),
2463 				"RDMA Read with Invalidate is not supported");
2464 			rc = -EINVAL;
2465 			goto bad;
2466 		case IB_WR_LOCAL_INV:
2467 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2468 			break;
2469 		case IB_WR_REG_MR:
2470 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2471 			break;
2472 		default:
2473 			/* Unsupported WRs */
2474 			dev_err(rdev_to_dev(qp->rdev),
2475 				"WR (%#x) is not supported", wr->opcode);
2476 			rc = -EINVAL;
2477 			goto bad;
2478 		}
2479 		if (!rc)
2480 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2481 bad:
2482 		if (rc) {
2483 			dev_err(rdev_to_dev(qp->rdev),
2484 				"post_send failed op:%#x qps = %#x rc = %d\n",
2485 				wr->opcode, qp->qplib_qp.state, rc);
2486 			*bad_wr = wr;
2487 			break;
2488 		}
2489 		wr = wr->next;
2490 	}
2491 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2492 	bnxt_ud_qp_hw_stall_workaround(qp);
2493 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2494 
2495 	return rc;
2496 }
2497 
2498 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2499 				       struct bnxt_re_qp *qp,
2500 				       struct ib_recv_wr *wr)
2501 {
2502 	struct bnxt_qplib_swqe wqe;
2503 	int rc = 0;
2504 
2505 	memset(&wqe, 0, sizeof(wqe));
2506 	while (wr) {
2507 		/* House keeping */
2508 		memset(&wqe, 0, sizeof(wqe));
2509 
2510 		/* Common */
2511 		wqe.num_sge = wr->num_sge;
2512 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2513 			dev_err(rdev_to_dev(rdev),
2514 				"Limit exceeded for Receive SGEs");
2515 			rc = -EINVAL;
2516 			break;
2517 		}
2518 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2519 		wqe.wr_id = wr->wr_id;
2520 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2521 
2522 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2523 		if (rc)
2524 			break;
2525 
2526 		wr = wr->next;
2527 	}
2528 	if (!rc)
2529 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2530 	return rc;
2531 }
2532 
2533 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2534 		      struct ib_recv_wr **bad_wr)
2535 {
2536 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2537 	struct bnxt_qplib_swqe wqe;
2538 	int rc = 0, payload_sz = 0;
2539 	unsigned long flags;
2540 	u32 count = 0;
2541 
2542 	spin_lock_irqsave(&qp->rq_lock, flags);
2543 	while (wr) {
2544 		/* House keeping */
2545 		memset(&wqe, 0, sizeof(wqe));
2546 
2547 		/* Common */
2548 		wqe.num_sge = wr->num_sge;
2549 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2550 			dev_err(rdev_to_dev(qp->rdev),
2551 				"Limit exceeded for Receive SGEs");
2552 			rc = -EINVAL;
2553 			*bad_wr = wr;
2554 			break;
2555 		}
2556 
2557 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2558 					       wr->num_sge);
2559 		wqe.wr_id = wr->wr_id;
2560 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2561 
2562 		if (ib_qp->qp_type == IB_QPT_GSI)
2563 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2564 							      payload_sz);
2565 		if (!rc)
2566 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2567 		if (rc) {
2568 			*bad_wr = wr;
2569 			break;
2570 		}
2571 
2572 		/* Ring DB if the RQEs posted reaches a threshold value */
2573 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2574 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
2575 			count = 0;
2576 		}
2577 
2578 		wr = wr->next;
2579 	}
2580 
2581 	if (count)
2582 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2583 
2584 	spin_unlock_irqrestore(&qp->rq_lock, flags);
2585 
2586 	return rc;
2587 }
2588 
2589 /* Completion Queues */
2590 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2591 {
2592 	int rc;
2593 	struct bnxt_re_cq *cq;
2594 	struct bnxt_qplib_nq *nq;
2595 	struct bnxt_re_dev *rdev;
2596 
2597 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2598 	rdev = cq->rdev;
2599 	nq = cq->qplib_cq.nq;
2600 
2601 	rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2602 	if (rc) {
2603 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2604 		return rc;
2605 	}
2606 	if (!IS_ERR_OR_NULL(cq->umem))
2607 		ib_umem_release(cq->umem);
2608 
2609 	atomic_dec(&rdev->cq_count);
2610 	nq->budget--;
2611 	kfree(cq->cql);
2612 	kfree(cq);
2613 
2614 	return 0;
2615 }
2616 
2617 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2618 				const struct ib_cq_init_attr *attr,
2619 				struct ib_ucontext *context,
2620 				struct ib_udata *udata)
2621 {
2622 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2623 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2624 	struct bnxt_re_cq *cq = NULL;
2625 	int rc, entries;
2626 	int cqe = attr->cqe;
2627 	struct bnxt_qplib_nq *nq = NULL;
2628 	unsigned int nq_alloc_cnt;
2629 
2630 	/* Validate CQ fields */
2631 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2632 		dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2633 		return ERR_PTR(-EINVAL);
2634 	}
2635 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2636 	if (!cq)
2637 		return ERR_PTR(-ENOMEM);
2638 
2639 	cq->rdev = rdev;
2640 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2641 
2642 	entries = roundup_pow_of_two(cqe + 1);
2643 	if (entries > dev_attr->max_cq_wqes + 1)
2644 		entries = dev_attr->max_cq_wqes + 1;
2645 
2646 	if (context) {
2647 		struct bnxt_re_cq_req req;
2648 		struct bnxt_re_ucontext *uctx = container_of
2649 						(context,
2650 						 struct bnxt_re_ucontext,
2651 						 ib_uctx);
2652 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2653 			rc = -EFAULT;
2654 			goto fail;
2655 		}
2656 
2657 		cq->umem = ib_umem_get(context, req.cq_va,
2658 				       entries * sizeof(struct cq_base),
2659 				       IB_ACCESS_LOCAL_WRITE, 1);
2660 		if (IS_ERR(cq->umem)) {
2661 			rc = PTR_ERR(cq->umem);
2662 			goto fail;
2663 		}
2664 		cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2665 		cq->qplib_cq.nmap = cq->umem->nmap;
2666 		cq->qplib_cq.dpi = &uctx->dpi;
2667 	} else {
2668 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2669 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2670 				  GFP_KERNEL);
2671 		if (!cq->cql) {
2672 			rc = -ENOMEM;
2673 			goto fail;
2674 		}
2675 
2676 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
2677 		cq->qplib_cq.sghead = NULL;
2678 		cq->qplib_cq.nmap = 0;
2679 	}
2680 	/*
2681 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2682 	 * used for getting the NQ index.
2683 	 */
2684 	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2685 	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2686 	cq->qplib_cq.max_wqe = entries;
2687 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2688 	cq->qplib_cq.nq	= nq;
2689 
2690 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2691 	if (rc) {
2692 		dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2693 		goto fail;
2694 	}
2695 
2696 	cq->ib_cq.cqe = entries;
2697 	cq->cq_period = cq->qplib_cq.period;
2698 	nq->budget++;
2699 
2700 	atomic_inc(&rdev->cq_count);
2701 
2702 	if (context) {
2703 		struct bnxt_re_cq_resp resp;
2704 
2705 		resp.cqid = cq->qplib_cq.id;
2706 		resp.tail = cq->qplib_cq.hwq.cons;
2707 		resp.phase = cq->qplib_cq.period;
2708 		resp.rsvd = 0;
2709 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2710 		if (rc) {
2711 			dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2712 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2713 			goto c2fail;
2714 		}
2715 	}
2716 
2717 	return &cq->ib_cq;
2718 
2719 c2fail:
2720 	if (context)
2721 		ib_umem_release(cq->umem);
2722 fail:
2723 	kfree(cq->cql);
2724 	kfree(cq);
2725 	return ERR_PTR(rc);
2726 }
2727 
2728 static u8 __req_to_ib_wc_status(u8 qstatus)
2729 {
2730 	switch (qstatus) {
2731 	case CQ_REQ_STATUS_OK:
2732 		return IB_WC_SUCCESS;
2733 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2734 		return IB_WC_BAD_RESP_ERR;
2735 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2736 		return IB_WC_LOC_LEN_ERR;
2737 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2738 		return IB_WC_LOC_QP_OP_ERR;
2739 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2740 		return IB_WC_LOC_PROT_ERR;
2741 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2742 		return IB_WC_GENERAL_ERR;
2743 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2744 		return IB_WC_REM_INV_REQ_ERR;
2745 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2746 		return IB_WC_REM_ACCESS_ERR;
2747 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2748 		return IB_WC_REM_OP_ERR;
2749 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2750 		return IB_WC_RNR_RETRY_EXC_ERR;
2751 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2752 		return IB_WC_RETRY_EXC_ERR;
2753 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2754 		return IB_WC_WR_FLUSH_ERR;
2755 	default:
2756 		return IB_WC_GENERAL_ERR;
2757 	}
2758 	return 0;
2759 }
2760 
2761 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2762 {
2763 	switch (qstatus) {
2764 	case CQ_RES_RAWETH_QP1_STATUS_OK:
2765 		return IB_WC_SUCCESS;
2766 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2767 		return IB_WC_LOC_ACCESS_ERR;
2768 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2769 		return IB_WC_LOC_LEN_ERR;
2770 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2771 		return IB_WC_LOC_PROT_ERR;
2772 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2773 		return IB_WC_LOC_QP_OP_ERR;
2774 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2775 		return IB_WC_GENERAL_ERR;
2776 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2777 		return IB_WC_WR_FLUSH_ERR;
2778 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2779 		return IB_WC_WR_FLUSH_ERR;
2780 	default:
2781 		return IB_WC_GENERAL_ERR;
2782 	}
2783 }
2784 
2785 static u8 __rc_to_ib_wc_status(u8 qstatus)
2786 {
2787 	switch (qstatus) {
2788 	case CQ_RES_RC_STATUS_OK:
2789 		return IB_WC_SUCCESS;
2790 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2791 		return IB_WC_LOC_ACCESS_ERR;
2792 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2793 		return IB_WC_LOC_LEN_ERR;
2794 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2795 		return IB_WC_LOC_PROT_ERR;
2796 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2797 		return IB_WC_LOC_QP_OP_ERR;
2798 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2799 		return IB_WC_GENERAL_ERR;
2800 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2801 		return IB_WC_REM_INV_REQ_ERR;
2802 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2803 		return IB_WC_WR_FLUSH_ERR;
2804 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2805 		return IB_WC_WR_FLUSH_ERR;
2806 	default:
2807 		return IB_WC_GENERAL_ERR;
2808 	}
2809 }
2810 
2811 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2812 {
2813 	switch (cqe->type) {
2814 	case BNXT_QPLIB_SWQE_TYPE_SEND:
2815 		wc->opcode = IB_WC_SEND;
2816 		break;
2817 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2818 		wc->opcode = IB_WC_SEND;
2819 		wc->wc_flags |= IB_WC_WITH_IMM;
2820 		break;
2821 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2822 		wc->opcode = IB_WC_SEND;
2823 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2824 		break;
2825 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2826 		wc->opcode = IB_WC_RDMA_WRITE;
2827 		break;
2828 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2829 		wc->opcode = IB_WC_RDMA_WRITE;
2830 		wc->wc_flags |= IB_WC_WITH_IMM;
2831 		break;
2832 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2833 		wc->opcode = IB_WC_RDMA_READ;
2834 		break;
2835 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2836 		wc->opcode = IB_WC_COMP_SWAP;
2837 		break;
2838 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2839 		wc->opcode = IB_WC_FETCH_ADD;
2840 		break;
2841 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2842 		wc->opcode = IB_WC_LOCAL_INV;
2843 		break;
2844 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2845 		wc->opcode = IB_WC_REG_MR;
2846 		break;
2847 	default:
2848 		wc->opcode = IB_WC_SEND;
2849 		break;
2850 	}
2851 
2852 	wc->status = __req_to_ib_wc_status(cqe->status);
2853 }
2854 
2855 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2856 				     u16 raweth_qp1_flags2)
2857 {
2858 	bool is_ipv6 = false, is_ipv4 = false;
2859 
2860 	/* raweth_qp1_flags Bit 9-6 indicates itype */
2861 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2862 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2863 		return -1;
2864 
2865 	if (raweth_qp1_flags2 &
2866 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2867 	    raweth_qp1_flags2 &
2868 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2869 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2870 		(raweth_qp1_flags2 &
2871 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2872 			(is_ipv6 = true) : (is_ipv4 = true);
2873 		return ((is_ipv6) ?
2874 			 BNXT_RE_ROCEV2_IPV6_PACKET :
2875 			 BNXT_RE_ROCEV2_IPV4_PACKET);
2876 	} else {
2877 		return BNXT_RE_ROCE_V1_PACKET;
2878 	}
2879 }
2880 
2881 static int bnxt_re_to_ib_nw_type(int nw_type)
2882 {
2883 	u8 nw_hdr_type = 0xFF;
2884 
2885 	switch (nw_type) {
2886 	case BNXT_RE_ROCE_V1_PACKET:
2887 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2888 		break;
2889 	case BNXT_RE_ROCEV2_IPV4_PACKET:
2890 		nw_hdr_type = RDMA_NETWORK_IPV4;
2891 		break;
2892 	case BNXT_RE_ROCEV2_IPV6_PACKET:
2893 		nw_hdr_type = RDMA_NETWORK_IPV6;
2894 		break;
2895 	}
2896 	return nw_hdr_type;
2897 }
2898 
2899 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2900 				       void *rq_hdr_buf)
2901 {
2902 	u8 *tmp_buf = NULL;
2903 	struct ethhdr *eth_hdr;
2904 	u16 eth_type;
2905 	bool rc = false;
2906 
2907 	tmp_buf = (u8 *)rq_hdr_buf;
2908 	/*
2909 	 * If dest mac is not same as I/F mac, this could be a
2910 	 * loopback address or multicast address, check whether
2911 	 * it is a loopback packet
2912 	 */
2913 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2914 		tmp_buf += 4;
2915 		/* Check the  ether type */
2916 		eth_hdr = (struct ethhdr *)tmp_buf;
2917 		eth_type = ntohs(eth_hdr->h_proto);
2918 		switch (eth_type) {
2919 		case ETH_P_IBOE:
2920 			rc = true;
2921 			break;
2922 		case ETH_P_IP:
2923 		case ETH_P_IPV6: {
2924 			u32 len;
2925 			struct udphdr *udp_hdr;
2926 
2927 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2928 						      sizeof(struct ipv6hdr));
2929 			tmp_buf += sizeof(struct ethhdr) + len;
2930 			udp_hdr = (struct udphdr *)tmp_buf;
2931 			if (ntohs(udp_hdr->dest) ==
2932 				    ROCE_V2_UDP_DPORT)
2933 				rc = true;
2934 			break;
2935 			}
2936 		default:
2937 			break;
2938 		}
2939 	}
2940 
2941 	return rc;
2942 }
2943 
2944 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2945 					 struct bnxt_qplib_cqe *cqe)
2946 {
2947 	struct bnxt_re_dev *rdev = qp1_qp->rdev;
2948 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
2949 	struct bnxt_re_qp *qp = rdev->qp1_sqp;
2950 	struct ib_send_wr *swr;
2951 	struct ib_ud_wr udwr;
2952 	struct ib_recv_wr rwr;
2953 	int pkt_type = 0;
2954 	u32 tbl_idx;
2955 	void *rq_hdr_buf;
2956 	dma_addr_t rq_hdr_buf_map;
2957 	dma_addr_t shrq_hdr_buf_map;
2958 	u32 offset = 0;
2959 	u32 skip_bytes = 0;
2960 	struct ib_sge s_sge[2];
2961 	struct ib_sge r_sge[2];
2962 	int rc;
2963 
2964 	memset(&udwr, 0, sizeof(udwr));
2965 	memset(&rwr, 0, sizeof(rwr));
2966 	memset(&s_sge, 0, sizeof(s_sge));
2967 	memset(&r_sge, 0, sizeof(r_sge));
2968 
2969 	swr = &udwr.wr;
2970 	tbl_idx = cqe->wr_id;
2971 
2972 	rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2973 			(tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2974 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2975 							  tbl_idx);
2976 
2977 	/* Shadow QP header buffer */
2978 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2979 							    tbl_idx);
2980 	sqp_entry = &rdev->sqp_tbl[tbl_idx];
2981 
2982 	/* Store this cqe */
2983 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2984 	sqp_entry->qp1_qp = qp1_qp;
2985 
2986 	/* Find packet type from the cqe */
2987 
2988 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2989 					     cqe->raweth_qp1_flags2);
2990 	if (pkt_type < 0) {
2991 		dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2992 		return -EINVAL;
2993 	}
2994 
2995 	/* Adjust the offset for the user buffer and post in the rq */
2996 
2997 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2998 		offset = 20;
2999 
3000 	/*
3001 	 * QP1 loopback packet has 4 bytes of internal header before
3002 	 * ether header. Skip these four bytes.
3003 	 */
3004 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3005 		skip_bytes = 4;
3006 
3007 	/* First send SGE . Skip the ether header*/
3008 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3009 			+ skip_bytes;
3010 	s_sge[0].lkey = 0xFFFFFFFF;
3011 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3012 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3013 
3014 	/* Second Send SGE */
3015 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3016 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3017 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3018 		s_sge[1].addr += 8;
3019 	s_sge[1].lkey = 0xFFFFFFFF;
3020 	s_sge[1].length = 256;
3021 
3022 	/* First recv SGE */
3023 
3024 	r_sge[0].addr = shrq_hdr_buf_map;
3025 	r_sge[0].lkey = 0xFFFFFFFF;
3026 	r_sge[0].length = 40;
3027 
3028 	r_sge[1].addr = sqp_entry->sge.addr + offset;
3029 	r_sge[1].lkey = sqp_entry->sge.lkey;
3030 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3031 
3032 	/* Create receive work request */
3033 	rwr.num_sge = 2;
3034 	rwr.sg_list = r_sge;
3035 	rwr.wr_id = tbl_idx;
3036 	rwr.next = NULL;
3037 
3038 	rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3039 	if (rc) {
3040 		dev_err(rdev_to_dev(rdev),
3041 			"Failed to post Rx buffers to shadow QP");
3042 		return -ENOMEM;
3043 	}
3044 
3045 	swr->num_sge = 2;
3046 	swr->sg_list = s_sge;
3047 	swr->wr_id = tbl_idx;
3048 	swr->opcode = IB_WR_SEND;
3049 	swr->next = NULL;
3050 
3051 	udwr.ah = &rdev->sqp_ah->ib_ah;
3052 	udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3053 	udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3054 
3055 	/* post data received  in the send queue */
3056 	rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3057 
3058 	return 0;
3059 }
3060 
3061 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3062 					  struct bnxt_qplib_cqe *cqe)
3063 {
3064 	wc->opcode = IB_WC_RECV;
3065 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3066 	wc->wc_flags |= IB_WC_GRH;
3067 }
3068 
3069 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3070 				u16 *vid, u8 *sl)
3071 {
3072 	bool ret = false;
3073 	u32 metadata;
3074 	u16 tpid;
3075 
3076 	metadata = orig_cqe->raweth_qp1_metadata;
3077 	if (orig_cqe->raweth_qp1_flags2 &
3078 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3079 		tpid = ((metadata &
3080 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3081 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3082 		if (tpid == ETH_P_8021Q) {
3083 			*vid = metadata &
3084 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3085 			*sl = (metadata &
3086 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3087 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3088 			ret = true;
3089 		}
3090 	}
3091 
3092 	return ret;
3093 }
3094 
3095 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3096 				      struct bnxt_qplib_cqe *cqe)
3097 {
3098 	wc->opcode = IB_WC_RECV;
3099 	wc->status = __rc_to_ib_wc_status(cqe->status);
3100 
3101 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3102 		wc->wc_flags |= IB_WC_WITH_IMM;
3103 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3104 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3105 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3106 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3107 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3108 }
3109 
3110 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3111 					     struct ib_wc *wc,
3112 					     struct bnxt_qplib_cqe *cqe)
3113 {
3114 	struct bnxt_re_dev *rdev = qp->rdev;
3115 	struct bnxt_re_qp *qp1_qp = NULL;
3116 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3117 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3118 	int nw_type;
3119 	u32 tbl_idx;
3120 	u16 vlan_id;
3121 	u8 sl;
3122 
3123 	tbl_idx = cqe->wr_id;
3124 
3125 	sqp_entry = &rdev->sqp_tbl[tbl_idx];
3126 	qp1_qp = sqp_entry->qp1_qp;
3127 	orig_cqe = &sqp_entry->cqe;
3128 
3129 	wc->wr_id = sqp_entry->wrid;
3130 	wc->byte_len = orig_cqe->length;
3131 	wc->qp = &qp1_qp->ib_qp;
3132 
3133 	wc->ex.imm_data = orig_cqe->immdata;
3134 	wc->src_qp = orig_cqe->src_qp;
3135 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3136 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3137 		wc->vlan_id = vlan_id;
3138 		wc->sl = sl;
3139 		wc->wc_flags |= IB_WC_WITH_VLAN;
3140 	}
3141 	wc->port_num = 1;
3142 	wc->vendor_err = orig_cqe->status;
3143 
3144 	wc->opcode = IB_WC_RECV;
3145 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3146 	wc->wc_flags |= IB_WC_GRH;
3147 
3148 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3149 					    orig_cqe->raweth_qp1_flags2);
3150 	if (nw_type >= 0) {
3151 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3152 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3153 	}
3154 }
3155 
3156 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3157 				      struct bnxt_qplib_cqe *cqe)
3158 {
3159 	wc->opcode = IB_WC_RECV;
3160 	wc->status = __rc_to_ib_wc_status(cqe->status);
3161 
3162 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3163 		wc->wc_flags |= IB_WC_WITH_IMM;
3164 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3165 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3166 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3167 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3168 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3169 }
3170 
3171 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3172 {
3173 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3174 	unsigned long flags;
3175 	int rc = 0;
3176 
3177 	spin_lock_irqsave(&qp->sq_lock, flags);
3178 
3179 	rc = bnxt_re_bind_fence_mw(lib_qp);
3180 	if (!rc) {
3181 		lib_qp->sq.phantom_wqe_cnt++;
3182 		dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3183 			"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3184 			lib_qp->id, lib_qp->sq.hwq.prod,
3185 			HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3186 			lib_qp->sq.phantom_wqe_cnt);
3187 	}
3188 
3189 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3190 	return rc;
3191 }
3192 
3193 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3194 {
3195 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3196 	struct bnxt_re_qp *qp;
3197 	struct bnxt_qplib_cqe *cqe;
3198 	int i, ncqe, budget;
3199 	struct bnxt_qplib_q *sq;
3200 	struct bnxt_qplib_qp *lib_qp;
3201 	u32 tbl_idx;
3202 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3203 	unsigned long flags;
3204 
3205 	spin_lock_irqsave(&cq->cq_lock, flags);
3206 	budget = min_t(u32, num_entries, cq->max_cql);
3207 	num_entries = budget;
3208 	if (!cq->cql) {
3209 		dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3210 		goto exit;
3211 	}
3212 	cqe = &cq->cql[0];
3213 	while (budget) {
3214 		lib_qp = NULL;
3215 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3216 		if (lib_qp) {
3217 			sq = &lib_qp->sq;
3218 			if (sq->send_phantom) {
3219 				qp = container_of(lib_qp,
3220 						  struct bnxt_re_qp, qplib_qp);
3221 				if (send_phantom_wqe(qp) == -ENOMEM)
3222 					dev_err(rdev_to_dev(cq->rdev),
3223 						"Phantom failed! Scheduled to send again\n");
3224 				else
3225 					sq->send_phantom = false;
3226 			}
3227 		}
3228 		if (ncqe < budget)
3229 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3230 							      cqe + ncqe,
3231 							      budget - ncqe);
3232 
3233 		if (!ncqe)
3234 			break;
3235 
3236 		for (i = 0; i < ncqe; i++, cqe++) {
3237 			/* Transcribe each qplib_wqe back to ib_wc */
3238 			memset(wc, 0, sizeof(*wc));
3239 
3240 			wc->wr_id = cqe->wr_id;
3241 			wc->byte_len = cqe->length;
3242 			qp = container_of
3243 				((struct bnxt_qplib_qp *)
3244 				 (unsigned long)(cqe->qp_handle),
3245 				 struct bnxt_re_qp, qplib_qp);
3246 			if (!qp) {
3247 				dev_err(rdev_to_dev(cq->rdev),
3248 					"POLL CQ : bad QP handle");
3249 				continue;
3250 			}
3251 			wc->qp = &qp->ib_qp;
3252 			wc->ex.imm_data = cqe->immdata;
3253 			wc->src_qp = cqe->src_qp;
3254 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3255 			wc->port_num = 1;
3256 			wc->vendor_err = cqe->status;
3257 
3258 			switch (cqe->opcode) {
3259 			case CQ_BASE_CQE_TYPE_REQ:
3260 				if (qp->qplib_qp.id ==
3261 				    qp->rdev->qp1_sqp->qplib_qp.id) {
3262 					/* Handle this completion with
3263 					 * the stored completion
3264 					 */
3265 					memset(wc, 0, sizeof(*wc));
3266 					continue;
3267 				}
3268 				bnxt_re_process_req_wc(wc, cqe);
3269 				break;
3270 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3271 				if (!cqe->status) {
3272 					int rc = 0;
3273 
3274 					rc = bnxt_re_process_raw_qp_pkt_rx
3275 								(qp, cqe);
3276 					if (!rc) {
3277 						memset(wc, 0, sizeof(*wc));
3278 						continue;
3279 					}
3280 					cqe->status = -1;
3281 				}
3282 				/* Errors need not be looped back.
3283 				 * But change the wr_id to the one
3284 				 * stored in the table
3285 				 */
3286 				tbl_idx = cqe->wr_id;
3287 				sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3288 				wc->wr_id = sqp_entry->wrid;
3289 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3290 				break;
3291 			case CQ_BASE_CQE_TYPE_RES_RC:
3292 				bnxt_re_process_res_rc_wc(wc, cqe);
3293 				break;
3294 			case CQ_BASE_CQE_TYPE_RES_UD:
3295 				if (qp->qplib_qp.id ==
3296 				    qp->rdev->qp1_sqp->qplib_qp.id) {
3297 					/* Handle this completion with
3298 					 * the stored completion
3299 					 */
3300 					if (cqe->status) {
3301 						continue;
3302 					} else {
3303 						bnxt_re_process_res_shadow_qp_wc
3304 								(qp, wc, cqe);
3305 						break;
3306 					}
3307 				}
3308 				bnxt_re_process_res_ud_wc(wc, cqe);
3309 				break;
3310 			default:
3311 				dev_err(rdev_to_dev(cq->rdev),
3312 					"POLL CQ : type 0x%x not handled",
3313 					cqe->opcode);
3314 				continue;
3315 			}
3316 			wc++;
3317 			budget--;
3318 		}
3319 	}
3320 exit:
3321 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3322 	return num_entries - budget;
3323 }
3324 
3325 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3326 			  enum ib_cq_notify_flags ib_cqn_flags)
3327 {
3328 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3329 	int type = 0, rc = 0;
3330 	unsigned long flags;
3331 
3332 	spin_lock_irqsave(&cq->cq_lock, flags);
3333 	/* Trigger on the very next completion */
3334 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3335 		type = DBR_DBR_TYPE_CQ_ARMALL;
3336 	/* Trigger on the next solicited completion */
3337 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3338 		type = DBR_DBR_TYPE_CQ_ARMSE;
3339 
3340 	/* Poll to see if there are missed events */
3341 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3342 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3343 		rc = 1;
3344 		goto exit;
3345 	}
3346 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3347 
3348 exit:
3349 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3350 	return rc;
3351 }
3352 
3353 /* Memory Regions */
3354 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3355 {
3356 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3357 	struct bnxt_re_dev *rdev = pd->rdev;
3358 	struct bnxt_re_mr *mr;
3359 	u64 pbl = 0;
3360 	int rc;
3361 
3362 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3363 	if (!mr)
3364 		return ERR_PTR(-ENOMEM);
3365 
3366 	mr->rdev = rdev;
3367 	mr->qplib_mr.pd = &pd->qplib_pd;
3368 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3369 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3370 
3371 	/* Allocate and register 0 as the address */
3372 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3373 	if (rc)
3374 		goto fail;
3375 
3376 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3377 	mr->qplib_mr.total_size = -1; /* Infinte length */
3378 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3379 			       PAGE_SIZE);
3380 	if (rc)
3381 		goto fail_mr;
3382 
3383 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3384 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3385 			       IB_ACCESS_REMOTE_ATOMIC))
3386 		mr->ib_mr.rkey = mr->ib_mr.lkey;
3387 	atomic_inc(&rdev->mr_count);
3388 
3389 	return &mr->ib_mr;
3390 
3391 fail_mr:
3392 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3393 fail:
3394 	kfree(mr);
3395 	return ERR_PTR(rc);
3396 }
3397 
3398 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3399 {
3400 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3401 	struct bnxt_re_dev *rdev = mr->rdev;
3402 	int rc;
3403 
3404 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3405 	if (rc)
3406 		dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3407 
3408 	if (mr->pages) {
3409 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3410 							&mr->qplib_frpl);
3411 		kfree(mr->pages);
3412 		mr->npages = 0;
3413 		mr->pages = NULL;
3414 	}
3415 	if (!IS_ERR_OR_NULL(mr->ib_umem))
3416 		ib_umem_release(mr->ib_umem);
3417 
3418 	kfree(mr);
3419 	atomic_dec(&rdev->mr_count);
3420 	return rc;
3421 }
3422 
3423 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3424 {
3425 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3426 
3427 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3428 		return -ENOMEM;
3429 
3430 	mr->pages[mr->npages++] = addr;
3431 	return 0;
3432 }
3433 
3434 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3435 		      unsigned int *sg_offset)
3436 {
3437 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3438 
3439 	mr->npages = 0;
3440 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3441 }
3442 
3443 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3444 			       u32 max_num_sg)
3445 {
3446 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3447 	struct bnxt_re_dev *rdev = pd->rdev;
3448 	struct bnxt_re_mr *mr = NULL;
3449 	int rc;
3450 
3451 	if (type != IB_MR_TYPE_MEM_REG) {
3452 		dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3453 		return ERR_PTR(-EINVAL);
3454 	}
3455 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
3456 		return ERR_PTR(-EINVAL);
3457 
3458 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3459 	if (!mr)
3460 		return ERR_PTR(-ENOMEM);
3461 
3462 	mr->rdev = rdev;
3463 	mr->qplib_mr.pd = &pd->qplib_pd;
3464 	mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3465 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3466 
3467 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3468 	if (rc)
3469 		goto bail;
3470 
3471 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3472 	mr->ib_mr.rkey = mr->ib_mr.lkey;
3473 
3474 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3475 	if (!mr->pages) {
3476 		rc = -ENOMEM;
3477 		goto fail;
3478 	}
3479 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3480 						 &mr->qplib_frpl, max_num_sg);
3481 	if (rc) {
3482 		dev_err(rdev_to_dev(rdev),
3483 			"Failed to allocate HW FR page list");
3484 		goto fail_mr;
3485 	}
3486 
3487 	atomic_inc(&rdev->mr_count);
3488 	return &mr->ib_mr;
3489 
3490 fail_mr:
3491 	kfree(mr->pages);
3492 fail:
3493 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3494 bail:
3495 	kfree(mr);
3496 	return ERR_PTR(rc);
3497 }
3498 
3499 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3500 			       struct ib_udata *udata)
3501 {
3502 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3503 	struct bnxt_re_dev *rdev = pd->rdev;
3504 	struct bnxt_re_mw *mw;
3505 	int rc;
3506 
3507 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3508 	if (!mw)
3509 		return ERR_PTR(-ENOMEM);
3510 	mw->rdev = rdev;
3511 	mw->qplib_mw.pd = &pd->qplib_pd;
3512 
3513 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3514 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3515 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3516 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3517 	if (rc) {
3518 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3519 		goto fail;
3520 	}
3521 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
3522 
3523 	atomic_inc(&rdev->mw_count);
3524 	return &mw->ib_mw;
3525 
3526 fail:
3527 	kfree(mw);
3528 	return ERR_PTR(rc);
3529 }
3530 
3531 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3532 {
3533 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3534 	struct bnxt_re_dev *rdev = mw->rdev;
3535 	int rc;
3536 
3537 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3538 	if (rc) {
3539 		dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3540 		return rc;
3541 	}
3542 
3543 	kfree(mw);
3544 	atomic_dec(&rdev->mw_count);
3545 	return rc;
3546 }
3547 
3548 static int bnxt_re_page_size_ok(int page_shift)
3549 {
3550 	switch (page_shift) {
3551 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3552 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3553 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3554 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3555 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3556 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3557 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3558 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3559 		return 1;
3560 	default:
3561 		return 0;
3562 	}
3563 }
3564 
3565 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3566 			     int page_shift)
3567 {
3568 	u64 *pbl_tbl = pbl_tbl_orig;
3569 	u64 paddr;
3570 	u64 page_mask = (1ULL << page_shift) - 1;
3571 	int i, pages;
3572 	struct scatterlist *sg;
3573 	int entry;
3574 
3575 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3576 		pages = sg_dma_len(sg) >> PAGE_SHIFT;
3577 		for (i = 0; i < pages; i++) {
3578 			paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3579 			if (pbl_tbl == pbl_tbl_orig)
3580 				*pbl_tbl++ = paddr & ~page_mask;
3581 			else if ((paddr & page_mask) == 0)
3582 				*pbl_tbl++ = paddr;
3583 		}
3584 	}
3585 	return pbl_tbl - pbl_tbl_orig;
3586 }
3587 
3588 /* uverbs */
3589 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3590 				  u64 virt_addr, int mr_access_flags,
3591 				  struct ib_udata *udata)
3592 {
3593 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3594 	struct bnxt_re_dev *rdev = pd->rdev;
3595 	struct bnxt_re_mr *mr;
3596 	struct ib_umem *umem;
3597 	u64 *pbl_tbl = NULL;
3598 	int umem_pgs, page_shift, rc;
3599 
3600 	if (length > BNXT_RE_MAX_MR_SIZE) {
3601 		dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3602 			length, BNXT_RE_MAX_MR_SIZE);
3603 		return ERR_PTR(-ENOMEM);
3604 	}
3605 
3606 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3607 	if (!mr)
3608 		return ERR_PTR(-ENOMEM);
3609 
3610 	mr->rdev = rdev;
3611 	mr->qplib_mr.pd = &pd->qplib_pd;
3612 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3613 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3614 
3615 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3616 	if (rc) {
3617 		dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3618 		goto free_mr;
3619 	}
3620 	/* The fixed portion of the rkey is the same as the lkey */
3621 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
3622 
3623 	umem = ib_umem_get(ib_pd->uobject->context, start, length,
3624 			   mr_access_flags, 0);
3625 	if (IS_ERR(umem)) {
3626 		dev_err(rdev_to_dev(rdev), "Failed to get umem");
3627 		rc = -EFAULT;
3628 		goto free_mrw;
3629 	}
3630 	mr->ib_umem = umem;
3631 
3632 	mr->qplib_mr.va = virt_addr;
3633 	umem_pgs = ib_umem_page_count(umem);
3634 	if (!umem_pgs) {
3635 		dev_err(rdev_to_dev(rdev), "umem is invalid!");
3636 		rc = -EINVAL;
3637 		goto free_umem;
3638 	}
3639 	mr->qplib_mr.total_size = length;
3640 
3641 	pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3642 	if (!pbl_tbl) {
3643 		rc = -ENOMEM;
3644 		goto free_umem;
3645 	}
3646 
3647 	page_shift = umem->page_shift;
3648 
3649 	if (!bnxt_re_page_size_ok(page_shift)) {
3650 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3651 		rc = -EFAULT;
3652 		goto fail;
3653 	}
3654 
3655 	if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3656 		dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3657 			length,	(u64)BNXT_RE_MAX_MR_SIZE_LOW);
3658 		rc = -EINVAL;
3659 		goto fail;
3660 	}
3661 	if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3662 		page_shift = BNXT_RE_PAGE_SHIFT_2M;
3663 		dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3664 			 1 << page_shift);
3665 	}
3666 
3667 	/* Map umem buf ptrs to the PBL */
3668 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3669 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3670 			       umem_pgs, false, 1 << page_shift);
3671 	if (rc) {
3672 		dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3673 		goto fail;
3674 	}
3675 
3676 	kfree(pbl_tbl);
3677 
3678 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3679 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
3680 	atomic_inc(&rdev->mr_count);
3681 
3682 	return &mr->ib_mr;
3683 fail:
3684 	kfree(pbl_tbl);
3685 free_umem:
3686 	ib_umem_release(umem);
3687 free_mrw:
3688 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3689 free_mr:
3690 	kfree(mr);
3691 	return ERR_PTR(rc);
3692 }
3693 
3694 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3695 					   struct ib_udata *udata)
3696 {
3697 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3698 	struct bnxt_re_uctx_resp resp;
3699 	struct bnxt_re_ucontext *uctx;
3700 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3701 	int rc;
3702 
3703 	dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3704 		ibdev->uverbs_abi_ver);
3705 
3706 	if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3707 		dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3708 			BNXT_RE_ABI_VERSION);
3709 		return ERR_PTR(-EPERM);
3710 	}
3711 
3712 	uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3713 	if (!uctx)
3714 		return ERR_PTR(-ENOMEM);
3715 
3716 	uctx->rdev = rdev;
3717 
3718 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3719 	if (!uctx->shpg) {
3720 		rc = -ENOMEM;
3721 		goto fail;
3722 	}
3723 	spin_lock_init(&uctx->sh_lock);
3724 
3725 	resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3726 	resp.max_qp = rdev->qplib_ctx.qpc_count;
3727 	resp.pg_size = PAGE_SIZE;
3728 	resp.cqe_sz = sizeof(struct cq_base);
3729 	resp.max_cqd = dev_attr->max_cq_wqes;
3730 	resp.rsvd    = 0;
3731 
3732 	rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3733 	if (rc) {
3734 		dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3735 		rc = -EFAULT;
3736 		goto cfail;
3737 	}
3738 
3739 	return &uctx->ib_uctx;
3740 cfail:
3741 	free_page((unsigned long)uctx->shpg);
3742 	uctx->shpg = NULL;
3743 fail:
3744 	kfree(uctx);
3745 	return ERR_PTR(rc);
3746 }
3747 
3748 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3749 {
3750 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3751 						   struct bnxt_re_ucontext,
3752 						   ib_uctx);
3753 
3754 	struct bnxt_re_dev *rdev = uctx->rdev;
3755 	int rc = 0;
3756 
3757 	if (uctx->shpg)
3758 		free_page((unsigned long)uctx->shpg);
3759 
3760 	if (uctx->dpi.dbr) {
3761 		/* Free DPI only if this is the first PD allocated by the
3762 		 * application and mark the context dpi as NULL
3763 		 */
3764 		rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3765 					    &rdev->qplib_res.dpi_tbl,
3766 					    &uctx->dpi);
3767 		if (rc)
3768 			dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3769 			/* Don't fail, continue*/
3770 		uctx->dpi.dbr = NULL;
3771 	}
3772 
3773 	kfree(uctx);
3774 	return 0;
3775 }
3776 
3777 /* Helper function to mmap the virtual memory from user app */
3778 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3779 {
3780 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3781 						   struct bnxt_re_ucontext,
3782 						   ib_uctx);
3783 	struct bnxt_re_dev *rdev = uctx->rdev;
3784 	u64 pfn;
3785 
3786 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3787 		return -EINVAL;
3788 
3789 	if (vma->vm_pgoff) {
3790 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3791 		if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3792 				       PAGE_SIZE, vma->vm_page_prot)) {
3793 			dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3794 			return -EAGAIN;
3795 		}
3796 	} else {
3797 		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3798 		if (remap_pfn_range(vma, vma->vm_start,
3799 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
3800 			dev_err(rdev_to_dev(rdev),
3801 				"Failed to map shared page");
3802 			return -EAGAIN;
3803 		}
3804 	}
3805 
3806 	return 0;
3807 }
3808