1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
52 
53 #include "bnxt_ulp.h"
54 
55 #include "roce_hsi.h"
56 #include "qplib_res.h"
57 #include "qplib_sp.h"
58 #include "qplib_fp.h"
59 #include "qplib_rcfw.h"
60 
61 #include "bnxt_re.h"
62 #include "ib_verbs.h"
63 #include <rdma/bnxt_re-abi.h>
64 
65 static int __from_ib_access_flags(int iflags)
66 {
67 	int qflags = 0;
68 
69 	if (iflags & IB_ACCESS_LOCAL_WRITE)
70 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 	if (iflags & IB_ACCESS_REMOTE_READ)
72 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 	if (iflags & IB_ACCESS_REMOTE_WRITE)
74 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 	if (iflags & IB_ACCESS_MW_BIND)
78 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 	if (iflags & IB_ZERO_BASED)
80 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 	if (iflags & IB_ACCESS_ON_DEMAND)
82 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 	return qflags;
84 };
85 
86 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 {
88 	enum ib_access_flags iflags = 0;
89 
90 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 		iflags |= IB_ACCESS_LOCAL_WRITE;
92 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 		iflags |= IB_ACCESS_REMOTE_WRITE;
94 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 		iflags |= IB_ACCESS_REMOTE_READ;
96 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 		iflags |= IB_ACCESS_MW_BIND;
100 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 		iflags |= IB_ZERO_BASED;
102 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 		iflags |= IB_ACCESS_ON_DEMAND;
104 	return iflags;
105 };
106 
107 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 			     struct bnxt_qplib_sge *sg_list, int num)
109 {
110 	int i, total = 0;
111 
112 	for (i = 0; i < num; i++) {
113 		sg_list[i].addr = ib_sg_list[i].addr;
114 		sg_list[i].lkey = ib_sg_list[i].lkey;
115 		sg_list[i].size = ib_sg_list[i].length;
116 		total += sg_list[i].size;
117 	}
118 	return total;
119 }
120 
121 /* Device */
122 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
123 {
124 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
125 	struct net_device *netdev = NULL;
126 
127 	rcu_read_lock();
128 	if (rdev)
129 		netdev = rdev->netdev;
130 	if (netdev)
131 		dev_hold(netdev);
132 
133 	rcu_read_unlock();
134 	return netdev;
135 }
136 
137 int bnxt_re_query_device(struct ib_device *ibdev,
138 			 struct ib_device_attr *ib_attr,
139 			 struct ib_udata *udata)
140 {
141 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
142 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
143 
144 	memset(ib_attr, 0, sizeof(*ib_attr));
145 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
146 	       min(sizeof(dev_attr->fw_ver),
147 		   sizeof(ib_attr->fw_ver)));
148 	bnxt_qplib_get_guid(rdev->netdev->dev_addr,
149 			    (u8 *)&ib_attr->sys_image_guid);
150 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
151 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
152 
153 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
154 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
155 	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
156 	ib_attr->max_qp = dev_attr->max_qp;
157 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
158 	ib_attr->device_cap_flags =
159 				    IB_DEVICE_CURR_QP_STATE_MOD
160 				    | IB_DEVICE_RC_RNR_NAK_GEN
161 				    | IB_DEVICE_SHUTDOWN_PORT
162 				    | IB_DEVICE_SYS_IMAGE_GUID
163 				    | IB_DEVICE_LOCAL_DMA_LKEY
164 				    | IB_DEVICE_RESIZE_MAX_WR
165 				    | IB_DEVICE_PORT_ACTIVE_EVENT
166 				    | IB_DEVICE_N_NOTIFY_CQ
167 				    | IB_DEVICE_MEM_WINDOW
168 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
169 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
170 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
171 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
172 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
173 	ib_attr->max_cq = dev_attr->max_cq;
174 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
175 	ib_attr->max_mr = dev_attr->max_mr;
176 	ib_attr->max_pd = dev_attr->max_pd;
177 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
178 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
179 	ib_attr->atomic_cap = IB_ATOMIC_NONE;
180 	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
181 
182 	ib_attr->max_ee_rd_atom = 0;
183 	ib_attr->max_res_rd_atom = 0;
184 	ib_attr->max_ee_init_rd_atom = 0;
185 	ib_attr->max_ee = 0;
186 	ib_attr->max_rdd = 0;
187 	ib_attr->max_mw = dev_attr->max_mw;
188 	ib_attr->max_raw_ipv6_qp = 0;
189 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190 	ib_attr->max_mcast_grp = 0;
191 	ib_attr->max_mcast_qp_attach = 0;
192 	ib_attr->max_total_mcast_qp_attach = 0;
193 	ib_attr->max_ah = dev_attr->max_ah;
194 
195 	ib_attr->max_fmr = 0;
196 	ib_attr->max_map_per_fmr = 0;
197 
198 	ib_attr->max_srq = dev_attr->max_srq;
199 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
200 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
201 
202 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
203 
204 	ib_attr->max_pkeys = 1;
205 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
206 	return 0;
207 }
208 
209 int bnxt_re_modify_device(struct ib_device *ibdev,
210 			  int device_modify_mask,
211 			  struct ib_device_modify *device_modify)
212 {
213 	switch (device_modify_mask) {
214 	case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
215 		/* Modify the GUID requires the modification of the GID table */
216 		/* GUID should be made as READ-ONLY */
217 		break;
218 	case IB_DEVICE_MODIFY_NODE_DESC:
219 		/* Node Desc should be made as READ-ONLY */
220 		break;
221 	default:
222 		break;
223 	}
224 	return 0;
225 }
226 
227 /* Port */
228 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
229 		       struct ib_port_attr *port_attr)
230 {
231 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
232 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
233 
234 	memset(port_attr, 0, sizeof(*port_attr));
235 
236 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
237 		port_attr->state = IB_PORT_ACTIVE;
238 		port_attr->phys_state = 5;
239 	} else {
240 		port_attr->state = IB_PORT_DOWN;
241 		port_attr->phys_state = 3;
242 	}
243 	port_attr->max_mtu = IB_MTU_4096;
244 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
245 	port_attr->gid_tbl_len = dev_attr->max_sgid;
246 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
247 				    IB_PORT_DEVICE_MGMT_SUP |
248 				    IB_PORT_VENDOR_CLASS_SUP;
249 	port_attr->ip_gids = true;
250 
251 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
252 	port_attr->bad_pkey_cntr = 0;
253 	port_attr->qkey_viol_cntr = 0;
254 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 	port_attr->lid = 0;
256 	port_attr->sm_lid = 0;
257 	port_attr->lmc = 0;
258 	port_attr->max_vl_num = 4;
259 	port_attr->sm_sl = 0;
260 	port_attr->subnet_timeout = 0;
261 	port_attr->init_type_reply = 0;
262 	port_attr->active_speed = rdev->active_speed;
263 	port_attr->active_width = rdev->active_width;
264 
265 	return 0;
266 }
267 
268 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269 			       struct ib_port_immutable *immutable)
270 {
271 	struct ib_port_attr port_attr;
272 
273 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 		return -EINVAL;
275 
276 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
278 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281 	return 0;
282 }
283 
284 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
285 {
286 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
287 
288 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
289 		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
290 		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
291 }
292 
293 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
294 		       u16 index, u16 *pkey)
295 {
296 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
297 
298 	/* Ignore port_num */
299 
300 	memset(pkey, 0, sizeof(*pkey));
301 	return bnxt_qplib_get_pkey(&rdev->qplib_res,
302 				   &rdev->qplib_res.pkey_tbl, index, pkey);
303 }
304 
305 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
306 		      int index, union ib_gid *gid)
307 {
308 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
309 	int rc = 0;
310 
311 	/* Ignore port_num */
312 	memset(gid, 0, sizeof(*gid));
313 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
314 				 &rdev->qplib_res.sgid_tbl, index,
315 				 (struct bnxt_qplib_gid *)gid);
316 	return rc;
317 }
318 
319 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
320 {
321 	int rc = 0;
322 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
323 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
324 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
325 	struct bnxt_qplib_gid *gid_to_del;
326 
327 	/* Delete the entry from the hardware */
328 	ctx = *context;
329 	if (!ctx)
330 		return -EINVAL;
331 
332 	if (sgid_tbl && sgid_tbl->active) {
333 		if (ctx->idx >= sgid_tbl->max)
334 			return -EINVAL;
335 		gid_to_del = &sgid_tbl->tbl[ctx->idx];
336 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
337 		 * or via the ib_unregister_device path. In the former case QP1
338 		 * may not be destroyed yet, in which case just return as FW
339 		 * needs that entry to be present and will fail it's deletion.
340 		 * We could get invoked again after QP1 is destroyed OR get an
341 		 * ADD_GID call with a different GID value for the same index
342 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
343 		 */
344 		if (ctx->idx == 0 &&
345 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
346 		    ctx->refcnt == 1 && rdev->qp1_sqp) {
347 			dev_dbg(rdev_to_dev(rdev),
348 				"Trying to delete GID0 while QP1 is alive\n");
349 			return -EFAULT;
350 		}
351 		ctx->refcnt--;
352 		if (!ctx->refcnt) {
353 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
354 			if (rc) {
355 				dev_err(rdev_to_dev(rdev),
356 					"Failed to remove GID: %#x", rc);
357 			} else {
358 				ctx_tbl = sgid_tbl->ctx;
359 				ctx_tbl[ctx->idx] = NULL;
360 				kfree(ctx);
361 			}
362 		}
363 	} else {
364 		return -EINVAL;
365 	}
366 	return rc;
367 }
368 
369 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
370 {
371 	int rc;
372 	u32 tbl_idx = 0;
373 	u16 vlan_id = 0xFFFF;
374 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
375 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
376 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
377 
378 	if ((attr->ndev) && is_vlan_dev(attr->ndev))
379 		vlan_id = vlan_dev_vlan_id(attr->ndev);
380 
381 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
382 				 rdev->qplib_res.netdev->dev_addr,
383 				 vlan_id, true, &tbl_idx);
384 	if (rc == -EALREADY) {
385 		ctx_tbl = sgid_tbl->ctx;
386 		ctx_tbl[tbl_idx]->refcnt++;
387 		*context = ctx_tbl[tbl_idx];
388 		return 0;
389 	}
390 
391 	if (rc < 0) {
392 		dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
393 		return rc;
394 	}
395 
396 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
397 	if (!ctx)
398 		return -ENOMEM;
399 	ctx_tbl = sgid_tbl->ctx;
400 	ctx->idx = tbl_idx;
401 	ctx->refcnt = 1;
402 	ctx_tbl[tbl_idx] = ctx;
403 	*context = ctx;
404 
405 	return rc;
406 }
407 
408 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
409 					    u8 port_num)
410 {
411 	return IB_LINK_LAYER_ETHERNET;
412 }
413 
414 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
415 
416 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
417 {
418 	struct bnxt_re_fence_data *fence = &pd->fence;
419 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
420 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
421 
422 	memset(wqe, 0, sizeof(*wqe));
423 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
424 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
425 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
426 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
427 	wqe->bind.zero_based = false;
428 	wqe->bind.parent_l_key = ib_mr->lkey;
429 	wqe->bind.va = (u64)(unsigned long)fence->va;
430 	wqe->bind.length = fence->size;
431 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
432 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
433 
434 	/* Save the initial rkey in fence structure for now;
435 	 * wqe->bind.r_key will be set at (re)bind time.
436 	 */
437 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
438 }
439 
440 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
441 {
442 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
443 					     qplib_qp);
444 	struct ib_pd *ib_pd = qp->ib_qp.pd;
445 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
446 	struct bnxt_re_fence_data *fence = &pd->fence;
447 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
448 	struct bnxt_qplib_swqe wqe;
449 	int rc;
450 
451 	memcpy(&wqe, fence_wqe, sizeof(wqe));
452 	wqe.bind.r_key = fence->bind_rkey;
453 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
454 
455 	dev_dbg(rdev_to_dev(qp->rdev),
456 		"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
457 		wqe.bind.r_key, qp->qplib_qp.id, pd);
458 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
459 	if (rc) {
460 		dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
461 		return rc;
462 	}
463 	bnxt_qplib_post_send_db(&qp->qplib_qp);
464 
465 	return rc;
466 }
467 
468 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
469 {
470 	struct bnxt_re_fence_data *fence = &pd->fence;
471 	struct bnxt_re_dev *rdev = pd->rdev;
472 	struct device *dev = &rdev->en_dev->pdev->dev;
473 	struct bnxt_re_mr *mr = fence->mr;
474 
475 	if (fence->mw) {
476 		bnxt_re_dealloc_mw(fence->mw);
477 		fence->mw = NULL;
478 	}
479 	if (mr) {
480 		if (mr->ib_mr.rkey)
481 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
482 					     true);
483 		if (mr->ib_mr.lkey)
484 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
485 		kfree(mr);
486 		fence->mr = NULL;
487 	}
488 	if (fence->dma_addr) {
489 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
490 				 DMA_BIDIRECTIONAL);
491 		fence->dma_addr = 0;
492 	}
493 }
494 
495 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
496 {
497 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
498 	struct bnxt_re_fence_data *fence = &pd->fence;
499 	struct bnxt_re_dev *rdev = pd->rdev;
500 	struct device *dev = &rdev->en_dev->pdev->dev;
501 	struct bnxt_re_mr *mr = NULL;
502 	dma_addr_t dma_addr = 0;
503 	struct ib_mw *mw;
504 	u64 pbl_tbl;
505 	int rc;
506 
507 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
508 				  DMA_BIDIRECTIONAL);
509 	rc = dma_mapping_error(dev, dma_addr);
510 	if (rc) {
511 		dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
512 		rc = -EIO;
513 		fence->dma_addr = 0;
514 		goto fail;
515 	}
516 	fence->dma_addr = dma_addr;
517 
518 	/* Allocate a MR */
519 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
520 	if (!mr) {
521 		rc = -ENOMEM;
522 		goto fail;
523 	}
524 	fence->mr = mr;
525 	mr->rdev = rdev;
526 	mr->qplib_mr.pd = &pd->qplib_pd;
527 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
528 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
529 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
530 	if (rc) {
531 		dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
532 		goto fail;
533 	}
534 
535 	/* Register MR */
536 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
537 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
538 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
539 	pbl_tbl = dma_addr;
540 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
541 			       BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
542 	if (rc) {
543 		dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
544 		goto fail;
545 	}
546 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
547 
548 	/* Create a fence MW only for kernel consumers */
549 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
550 	if (IS_ERR(mw)) {
551 		dev_err(rdev_to_dev(rdev),
552 			"Failed to create fence-MW for PD: %p\n", pd);
553 		rc = PTR_ERR(mw);
554 		goto fail;
555 	}
556 	fence->mw = mw;
557 
558 	bnxt_re_create_fence_wqe(pd);
559 	return 0;
560 
561 fail:
562 	bnxt_re_destroy_fence_mr(pd);
563 	return rc;
564 }
565 
566 /* Protection Domains */
567 void bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
568 {
569 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
570 	struct bnxt_re_dev *rdev = pd->rdev;
571 
572 	bnxt_re_destroy_fence_mr(pd);
573 
574 	if (pd->qplib_pd.id)
575 		bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
576 				      &pd->qplib_pd);
577 }
578 
579 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext,
580 		     struct ib_udata *udata)
581 {
582 	struct ib_device *ibdev = ibpd->device;
583 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
584 	struct bnxt_re_ucontext *ucntx = container_of(ucontext,
585 						      struct bnxt_re_ucontext,
586 						      ib_uctx);
587 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
588 	int rc;
589 
590 	pd->rdev = rdev;
591 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
592 		dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
593 		rc = -ENOMEM;
594 		goto fail;
595 	}
596 
597 	if (udata) {
598 		struct bnxt_re_pd_resp resp;
599 
600 		if (!ucntx->dpi.dbr) {
601 			/* Allocate DPI in alloc_pd to avoid failing of
602 			 * ibv_devinfo and family of application when DPIs
603 			 * are depleted.
604 			 */
605 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
606 						 &ucntx->dpi, ucntx)) {
607 				rc = -ENOMEM;
608 				goto dbfail;
609 			}
610 		}
611 
612 		resp.pdid = pd->qplib_pd.id;
613 		/* Still allow mapping this DBR to the new user PD. */
614 		resp.dpi = ucntx->dpi.dpi;
615 		resp.dbr = (u64)ucntx->dpi.umdbr;
616 
617 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
618 		if (rc) {
619 			dev_err(rdev_to_dev(rdev),
620 				"Failed to copy user response\n");
621 			goto dbfail;
622 		}
623 	}
624 
625 	if (!udata)
626 		if (bnxt_re_create_fence_mr(pd))
627 			dev_warn(rdev_to_dev(rdev),
628 				 "Failed to create Fence-MR\n");
629 	return 0;
630 dbfail:
631 	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
632 			      &pd->qplib_pd);
633 fail:
634 	return rc;
635 }
636 
637 /* Address Handles */
638 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
639 {
640 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
641 	struct bnxt_re_dev *rdev = ah->rdev;
642 	int rc;
643 
644 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
645 				   !(flags & RDMA_DESTROY_AH_SLEEPABLE));
646 	if (rc) {
647 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
648 		return rc;
649 	}
650 	kfree(ah);
651 	return 0;
652 }
653 
654 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
655 {
656 	u8 nw_type;
657 
658 	switch (ntype) {
659 	case RDMA_NETWORK_IPV4:
660 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
661 		break;
662 	case RDMA_NETWORK_IPV6:
663 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
664 		break;
665 	default:
666 		nw_type = CMDQ_CREATE_AH_TYPE_V1;
667 		break;
668 	}
669 	return nw_type;
670 }
671 
672 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
673 				struct rdma_ah_attr *ah_attr,
674 				u32 flags,
675 				struct ib_udata *udata)
676 {
677 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
678 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
679 	struct bnxt_re_dev *rdev = pd->rdev;
680 	const struct ib_gid_attr *sgid_attr;
681 	struct bnxt_re_ah *ah;
682 	u8 nw_type;
683 	int rc;
684 
685 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
686 		dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
687 		return ERR_PTR(-EINVAL);
688 	}
689 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
690 	if (!ah)
691 		return ERR_PTR(-ENOMEM);
692 
693 	ah->rdev = rdev;
694 	ah->qplib_ah.pd = &pd->qplib_pd;
695 
696 	/* Supply the configuration for the HW */
697 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
698 	       sizeof(union ib_gid));
699 	/*
700 	 * If RoCE V2 is enabled, stack will have two entries for
701 	 * each GID entry. Avoiding this duplicte entry in HW. Dividing
702 	 * the GID index by 2 for RoCE V2
703 	 */
704 	ah->qplib_ah.sgid_index = grh->sgid_index / 2;
705 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
706 	ah->qplib_ah.traffic_class = grh->traffic_class;
707 	ah->qplib_ah.flow_label = grh->flow_label;
708 	ah->qplib_ah.hop_limit = grh->hop_limit;
709 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
710 
711 	sgid_attr = grh->sgid_attr;
712 	/* Get network header type for this GID */
713 	nw_type = rdma_gid_attr_network_type(sgid_attr);
714 	ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
715 
716 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
717 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
718 				  !(flags & RDMA_CREATE_AH_SLEEPABLE));
719 	if (rc) {
720 		dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
721 		goto fail;
722 	}
723 
724 	/* Write AVID to shared page. */
725 	if (udata) {
726 		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
727 			udata, struct bnxt_re_ucontext, ib_uctx);
728 		unsigned long flag;
729 		u32 *wrptr;
730 
731 		spin_lock_irqsave(&uctx->sh_lock, flag);
732 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
733 		*wrptr = ah->qplib_ah.id;
734 		wmb(); /* make sure cache is updated. */
735 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
736 	}
737 
738 	return &ah->ib_ah;
739 
740 fail:
741 	kfree(ah);
742 	return ERR_PTR(rc);
743 }
744 
745 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
746 {
747 	return 0;
748 }
749 
750 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
751 {
752 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
753 
754 	ah_attr->type = ib_ah->type;
755 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
756 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
757 	rdma_ah_set_grh(ah_attr, NULL, 0,
758 			ah->qplib_ah.host_sgid_index,
759 			0, ah->qplib_ah.traffic_class);
760 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
761 	rdma_ah_set_port_num(ah_attr, 1);
762 	rdma_ah_set_static_rate(ah_attr, 0);
763 	return 0;
764 }
765 
766 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
767 	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
768 {
769 	unsigned long flags;
770 
771 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
772 	if (qp->rcq != qp->scq)
773 		spin_lock(&qp->rcq->cq_lock);
774 	else
775 		__acquire(&qp->rcq->cq_lock);
776 
777 	return flags;
778 }
779 
780 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
781 			unsigned long flags)
782 	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
783 {
784 	if (qp->rcq != qp->scq)
785 		spin_unlock(&qp->rcq->cq_lock);
786 	else
787 		__release(&qp->rcq->cq_lock);
788 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
789 }
790 
791 /* Queue Pairs */
792 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
793 {
794 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
795 	struct bnxt_re_dev *rdev = qp->rdev;
796 	unsigned int flags;
797 	int rc;
798 
799 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
800 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
801 	if (rc) {
802 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
803 		return rc;
804 	}
805 
806 	if (rdma_is_kernel_res(&qp->ib_qp.res)) {
807 		flags = bnxt_re_lock_cqs(qp);
808 		bnxt_qplib_clean_qp(&qp->qplib_qp);
809 		bnxt_re_unlock_cqs(qp, flags);
810 	}
811 
812 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
813 
814 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
815 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
816 					   &rdev->sqp_ah->qplib_ah, false);
817 		if (rc) {
818 			dev_err(rdev_to_dev(rdev),
819 				"Failed to destroy HW AH for shadow QP");
820 			return rc;
821 		}
822 
823 		bnxt_qplib_clean_qp(&qp->qplib_qp);
824 		rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
825 					   &rdev->qp1_sqp->qplib_qp);
826 		if (rc) {
827 			dev_err(rdev_to_dev(rdev),
828 				"Failed to destroy Shadow QP");
829 			return rc;
830 		}
831 		bnxt_qplib_free_qp_res(&rdev->qplib_res,
832 				       &rdev->qp1_sqp->qplib_qp);
833 		mutex_lock(&rdev->qp_lock);
834 		list_del(&rdev->qp1_sqp->list);
835 		atomic_dec(&rdev->qp_count);
836 		mutex_unlock(&rdev->qp_lock);
837 
838 		kfree(rdev->sqp_ah);
839 		kfree(rdev->qp1_sqp);
840 		rdev->qp1_sqp = NULL;
841 		rdev->sqp_ah = NULL;
842 	}
843 
844 	if (!IS_ERR_OR_NULL(qp->rumem))
845 		ib_umem_release(qp->rumem);
846 	if (!IS_ERR_OR_NULL(qp->sumem))
847 		ib_umem_release(qp->sumem);
848 
849 	mutex_lock(&rdev->qp_lock);
850 	list_del(&qp->list);
851 	atomic_dec(&rdev->qp_count);
852 	mutex_unlock(&rdev->qp_lock);
853 	kfree(qp);
854 	return 0;
855 }
856 
857 static u8 __from_ib_qp_type(enum ib_qp_type type)
858 {
859 	switch (type) {
860 	case IB_QPT_GSI:
861 		return CMDQ_CREATE_QP1_TYPE_GSI;
862 	case IB_QPT_RC:
863 		return CMDQ_CREATE_QP_TYPE_RC;
864 	case IB_QPT_UD:
865 		return CMDQ_CREATE_QP_TYPE_UD;
866 	default:
867 		return IB_QPT_MAX;
868 	}
869 }
870 
871 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
872 				struct bnxt_re_qp *qp, struct ib_udata *udata)
873 {
874 	struct bnxt_re_qp_req ureq;
875 	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
876 	struct ib_umem *umem;
877 	int bytes = 0, psn_sz;
878 	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
879 		udata, struct bnxt_re_ucontext, ib_uctx);
880 
881 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
882 		return -EFAULT;
883 
884 	bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
885 	/* Consider mapping PSN search memory only for RC QPs. */
886 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
887 		psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
888 					sizeof(struct sq_psn_search_ext) :
889 					sizeof(struct sq_psn_search);
890 		bytes += (qplib_qp->sq.max_wqe * psn_sz);
891 	}
892 	bytes = PAGE_ALIGN(bytes);
893 	umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
894 	if (IS_ERR(umem))
895 		return PTR_ERR(umem);
896 
897 	qp->sumem = umem;
898 	qplib_qp->sq.sglist = umem->sg_head.sgl;
899 	qplib_qp->sq.nmap = umem->nmap;
900 	qplib_qp->qp_handle = ureq.qp_handle;
901 
902 	if (!qp->qplib_qp.srq) {
903 		bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
904 		bytes = PAGE_ALIGN(bytes);
905 		umem = ib_umem_get(udata, ureq.qprva, bytes,
906 				   IB_ACCESS_LOCAL_WRITE, 1);
907 		if (IS_ERR(umem))
908 			goto rqfail;
909 		qp->rumem = umem;
910 		qplib_qp->rq.sglist = umem->sg_head.sgl;
911 		qplib_qp->rq.nmap = umem->nmap;
912 	}
913 
914 	qplib_qp->dpi = &cntx->dpi;
915 	return 0;
916 rqfail:
917 	ib_umem_release(qp->sumem);
918 	qp->sumem = NULL;
919 	qplib_qp->sq.sglist = NULL;
920 	qplib_qp->sq.nmap = 0;
921 
922 	return PTR_ERR(umem);
923 }
924 
925 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
926 				(struct bnxt_re_pd *pd,
927 				 struct bnxt_qplib_res *qp1_res,
928 				 struct bnxt_qplib_qp *qp1_qp)
929 {
930 	struct bnxt_re_dev *rdev = pd->rdev;
931 	struct bnxt_re_ah *ah;
932 	union ib_gid sgid;
933 	int rc;
934 
935 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
936 	if (!ah)
937 		return NULL;
938 
939 	ah->rdev = rdev;
940 	ah->qplib_ah.pd = &pd->qplib_pd;
941 
942 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
943 	if (rc)
944 		goto fail;
945 
946 	/* supply the dgid data same as sgid */
947 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
948 	       sizeof(union ib_gid));
949 	ah->qplib_ah.sgid_index = 0;
950 
951 	ah->qplib_ah.traffic_class = 0;
952 	ah->qplib_ah.flow_label = 0;
953 	ah->qplib_ah.hop_limit = 1;
954 	ah->qplib_ah.sl = 0;
955 	/* Have DMAC same as SMAC */
956 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
957 
958 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
959 	if (rc) {
960 		dev_err(rdev_to_dev(rdev),
961 			"Failed to allocate HW AH for Shadow QP");
962 		goto fail;
963 	}
964 
965 	return ah;
966 
967 fail:
968 	kfree(ah);
969 	return NULL;
970 }
971 
972 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
973 				(struct bnxt_re_pd *pd,
974 				 struct bnxt_qplib_res *qp1_res,
975 				 struct bnxt_qplib_qp *qp1_qp)
976 {
977 	struct bnxt_re_dev *rdev = pd->rdev;
978 	struct bnxt_re_qp *qp;
979 	int rc;
980 
981 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
982 	if (!qp)
983 		return NULL;
984 
985 	qp->rdev = rdev;
986 
987 	/* Initialize the shadow QP structure from the QP1 values */
988 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
989 
990 	qp->qplib_qp.pd = &pd->qplib_pd;
991 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
992 	qp->qplib_qp.type = IB_QPT_UD;
993 
994 	qp->qplib_qp.max_inline_data = 0;
995 	qp->qplib_qp.sig_type = true;
996 
997 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
998 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
999 	qp->qplib_qp.sq.max_sge = 2;
1000 	/* Q full delta can be 1 since it is internal QP */
1001 	qp->qplib_qp.sq.q_full_delta = 1;
1002 
1003 	qp->qplib_qp.scq = qp1_qp->scq;
1004 	qp->qplib_qp.rcq = qp1_qp->rcq;
1005 
1006 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1007 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1008 	/* Q full delta can be 1 since it is internal QP */
1009 	qp->qplib_qp.rq.q_full_delta = 1;
1010 
1011 	qp->qplib_qp.mtu = qp1_qp->mtu;
1012 
1013 	qp->qplib_qp.sq_hdr_buf_size = 0;
1014 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1015 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1016 
1017 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1018 	if (rc)
1019 		goto fail;
1020 
1021 	rdev->sqp_id = qp->qplib_qp.id;
1022 
1023 	spin_lock_init(&qp->sq_lock);
1024 	INIT_LIST_HEAD(&qp->list);
1025 	mutex_lock(&rdev->qp_lock);
1026 	list_add_tail(&qp->list, &rdev->qp_list);
1027 	atomic_inc(&rdev->qp_count);
1028 	mutex_unlock(&rdev->qp_lock);
1029 	return qp;
1030 fail:
1031 	kfree(qp);
1032 	return NULL;
1033 }
1034 
1035 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1036 				struct ib_qp_init_attr *qp_init_attr,
1037 				struct ib_udata *udata)
1038 {
1039 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1040 	struct bnxt_re_dev *rdev = pd->rdev;
1041 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1042 	struct bnxt_re_qp *qp;
1043 	struct bnxt_re_cq *cq;
1044 	struct bnxt_re_srq *srq;
1045 	int rc, entries;
1046 
1047 	if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1048 	    (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1049 	    (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1050 	    (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1051 	    (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1052 		return ERR_PTR(-EINVAL);
1053 
1054 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1055 	if (!qp)
1056 		return ERR_PTR(-ENOMEM);
1057 
1058 	qp->rdev = rdev;
1059 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1060 	qp->qplib_qp.pd = &pd->qplib_pd;
1061 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1062 	qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1063 
1064 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1065 	    bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
1066 		qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
1067 	if (qp->qplib_qp.type == IB_QPT_MAX) {
1068 		dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1069 			qp->qplib_qp.type);
1070 		rc = -EINVAL;
1071 		goto fail;
1072 	}
1073 
1074 	qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1075 	qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1076 				  IB_SIGNAL_ALL_WR) ? true : false);
1077 
1078 	qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1079 	if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1080 		qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1081 
1082 	if (qp_init_attr->send_cq) {
1083 		cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1084 				  ib_cq);
1085 		if (!cq) {
1086 			dev_err(rdev_to_dev(rdev), "Send CQ not found");
1087 			rc = -EINVAL;
1088 			goto fail;
1089 		}
1090 		qp->qplib_qp.scq = &cq->qplib_cq;
1091 		qp->scq = cq;
1092 	}
1093 
1094 	if (qp_init_attr->recv_cq) {
1095 		cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1096 				  ib_cq);
1097 		if (!cq) {
1098 			dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1099 			rc = -EINVAL;
1100 			goto fail;
1101 		}
1102 		qp->qplib_qp.rcq = &cq->qplib_cq;
1103 		qp->rcq = cq;
1104 	}
1105 
1106 	if (qp_init_attr->srq) {
1107 		srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1108 				   ib_srq);
1109 		if (!srq) {
1110 			dev_err(rdev_to_dev(rdev), "SRQ not found");
1111 			rc = -EINVAL;
1112 			goto fail;
1113 		}
1114 		qp->qplib_qp.srq = &srq->qplib_srq;
1115 		qp->qplib_qp.rq.max_wqe = 0;
1116 	} else {
1117 		/* Allocate 1 more than what's provided so posting max doesn't
1118 		 * mean empty
1119 		 */
1120 		entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1121 		qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1122 						dev_attr->max_qp_wqes + 1);
1123 
1124 		qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1125 						qp_init_attr->cap.max_recv_wr;
1126 
1127 		qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1128 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1129 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1130 	}
1131 
1132 	qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1133 
1134 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1135 	    !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
1136 		/* Allocate 1 more than what's provided */
1137 		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1138 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1139 						dev_attr->max_qp_wqes + 1);
1140 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1141 						qp_init_attr->cap.max_send_wr;
1142 		qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1143 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1144 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1145 		qp->qplib_qp.sq.max_sge++;
1146 		if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1147 			qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1148 
1149 		qp->qplib_qp.rq_hdr_buf_size =
1150 					BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1151 
1152 		qp->qplib_qp.sq_hdr_buf_size =
1153 					BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1154 		qp->qplib_qp.dpi = &rdev->dpi_privileged;
1155 		rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1156 		if (rc) {
1157 			dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1158 			goto fail;
1159 		}
1160 		/* Create a shadow QP to handle the QP1 traffic */
1161 		rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1162 							 &qp->qplib_qp);
1163 		if (!rdev->qp1_sqp) {
1164 			rc = -EINVAL;
1165 			dev_err(rdev_to_dev(rdev),
1166 				"Failed to create Shadow QP for QP1");
1167 			goto qp_destroy;
1168 		}
1169 		rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1170 							   &qp->qplib_qp);
1171 		if (!rdev->sqp_ah) {
1172 			bnxt_qplib_destroy_qp(&rdev->qplib_res,
1173 					      &rdev->qp1_sqp->qplib_qp);
1174 			rc = -EINVAL;
1175 			dev_err(rdev_to_dev(rdev),
1176 				"Failed to create AH entry for ShadowQP");
1177 			goto qp_destroy;
1178 		}
1179 
1180 	} else {
1181 		/* Allocate 128 + 1 more than what's provided */
1182 		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1183 					     BNXT_QPLIB_RESERVED_QP_WRS + 1);
1184 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1185 						dev_attr->max_qp_wqes +
1186 						BNXT_QPLIB_RESERVED_QP_WRS + 1);
1187 		qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1188 
1189 		/*
1190 		 * Reserving one slot for Phantom WQE. Application can
1191 		 * post one extra entry in this case. But allowing this to avoid
1192 		 * unexpected Queue full condition
1193 		 */
1194 
1195 		qp->qplib_qp.sq.q_full_delta -= 1;
1196 
1197 		qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1198 		qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1199 		if (udata) {
1200 			rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1201 			if (rc)
1202 				goto fail;
1203 		} else {
1204 			qp->qplib_qp.dpi = &rdev->dpi_privileged;
1205 		}
1206 
1207 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1208 		if (rc) {
1209 			dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1210 			goto free_umem;
1211 		}
1212 	}
1213 
1214 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1215 	spin_lock_init(&qp->sq_lock);
1216 	spin_lock_init(&qp->rq_lock);
1217 
1218 	if (udata) {
1219 		struct bnxt_re_qp_resp resp;
1220 
1221 		resp.qpid = qp->ib_qp.qp_num;
1222 		resp.rsvd = 0;
1223 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1224 		if (rc) {
1225 			dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1226 			goto qp_destroy;
1227 		}
1228 	}
1229 	INIT_LIST_HEAD(&qp->list);
1230 	mutex_lock(&rdev->qp_lock);
1231 	list_add_tail(&qp->list, &rdev->qp_list);
1232 	atomic_inc(&rdev->qp_count);
1233 	mutex_unlock(&rdev->qp_lock);
1234 
1235 	return &qp->ib_qp;
1236 qp_destroy:
1237 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1238 free_umem:
1239 	if (udata) {
1240 		if (qp->rumem)
1241 			ib_umem_release(qp->rumem);
1242 		if (qp->sumem)
1243 			ib_umem_release(qp->sumem);
1244 	}
1245 fail:
1246 	kfree(qp);
1247 	return ERR_PTR(rc);
1248 }
1249 
1250 static u8 __from_ib_qp_state(enum ib_qp_state state)
1251 {
1252 	switch (state) {
1253 	case IB_QPS_RESET:
1254 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1255 	case IB_QPS_INIT:
1256 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1257 	case IB_QPS_RTR:
1258 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1259 	case IB_QPS_RTS:
1260 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1261 	case IB_QPS_SQD:
1262 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1263 	case IB_QPS_SQE:
1264 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1265 	case IB_QPS_ERR:
1266 	default:
1267 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1268 	}
1269 }
1270 
1271 static enum ib_qp_state __to_ib_qp_state(u8 state)
1272 {
1273 	switch (state) {
1274 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1275 		return IB_QPS_RESET;
1276 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1277 		return IB_QPS_INIT;
1278 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1279 		return IB_QPS_RTR;
1280 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1281 		return IB_QPS_RTS;
1282 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1283 		return IB_QPS_SQD;
1284 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1285 		return IB_QPS_SQE;
1286 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1287 	default:
1288 		return IB_QPS_ERR;
1289 	}
1290 }
1291 
1292 static u32 __from_ib_mtu(enum ib_mtu mtu)
1293 {
1294 	switch (mtu) {
1295 	case IB_MTU_256:
1296 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1297 	case IB_MTU_512:
1298 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1299 	case IB_MTU_1024:
1300 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1301 	case IB_MTU_2048:
1302 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1303 	case IB_MTU_4096:
1304 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1305 	default:
1306 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1307 	}
1308 }
1309 
1310 static enum ib_mtu __to_ib_mtu(u32 mtu)
1311 {
1312 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1313 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1314 		return IB_MTU_256;
1315 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1316 		return IB_MTU_512;
1317 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1318 		return IB_MTU_1024;
1319 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1320 		return IB_MTU_2048;
1321 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1322 		return IB_MTU_4096;
1323 	default:
1324 		return IB_MTU_2048;
1325 	}
1326 }
1327 
1328 /* Shared Receive Queues */
1329 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1330 {
1331 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1332 					       ib_srq);
1333 	struct bnxt_re_dev *rdev = srq->rdev;
1334 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1335 	struct bnxt_qplib_nq *nq = NULL;
1336 	int rc;
1337 
1338 	if (qplib_srq->cq)
1339 		nq = qplib_srq->cq->nq;
1340 	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1341 	if (rc) {
1342 		dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1343 		return rc;
1344 	}
1345 
1346 	if (srq->umem)
1347 		ib_umem_release(srq->umem);
1348 	kfree(srq);
1349 	atomic_dec(&rdev->srq_count);
1350 	if (nq)
1351 		nq->budget--;
1352 	return 0;
1353 }
1354 
1355 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1356 				 struct bnxt_re_pd *pd,
1357 				 struct bnxt_re_srq *srq,
1358 				 struct ib_udata *udata)
1359 {
1360 	struct bnxt_re_srq_req ureq;
1361 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1362 	struct ib_umem *umem;
1363 	int bytes = 0;
1364 	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1365 		udata, struct bnxt_re_ucontext, ib_uctx);
1366 
1367 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1368 		return -EFAULT;
1369 
1370 	bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1371 	bytes = PAGE_ALIGN(bytes);
1372 	umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
1373 	if (IS_ERR(umem))
1374 		return PTR_ERR(umem);
1375 
1376 	srq->umem = umem;
1377 	qplib_srq->nmap = umem->nmap;
1378 	qplib_srq->sglist = umem->sg_head.sgl;
1379 	qplib_srq->srq_handle = ureq.srq_handle;
1380 	qplib_srq->dpi = &cntx->dpi;
1381 
1382 	return 0;
1383 }
1384 
1385 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1386 				  struct ib_srq_init_attr *srq_init_attr,
1387 				  struct ib_udata *udata)
1388 {
1389 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1390 	struct bnxt_re_dev *rdev = pd->rdev;
1391 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1392 	struct bnxt_re_srq *srq;
1393 	struct bnxt_qplib_nq *nq = NULL;
1394 	int rc, entries;
1395 
1396 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1397 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1398 		rc = -EINVAL;
1399 		goto exit;
1400 	}
1401 
1402 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1403 		rc = -EOPNOTSUPP;
1404 		goto exit;
1405 	}
1406 
1407 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1408 	if (!srq) {
1409 		rc = -ENOMEM;
1410 		goto exit;
1411 	}
1412 	srq->rdev = rdev;
1413 	srq->qplib_srq.pd = &pd->qplib_pd;
1414 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1415 	/* Allocate 1 more than what's provided so posting max doesn't
1416 	 * mean empty
1417 	 */
1418 	entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1419 	if (entries > dev_attr->max_srq_wqes + 1)
1420 		entries = dev_attr->max_srq_wqes + 1;
1421 
1422 	srq->qplib_srq.max_wqe = entries;
1423 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1424 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1425 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1426 	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1427 	nq = &rdev->nq[0];
1428 
1429 	if (udata) {
1430 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1431 		if (rc)
1432 			goto fail;
1433 	}
1434 
1435 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1436 	if (rc) {
1437 		dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1438 		goto fail;
1439 	}
1440 
1441 	if (udata) {
1442 		struct bnxt_re_srq_resp resp;
1443 
1444 		resp.srqid = srq->qplib_srq.id;
1445 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1446 		if (rc) {
1447 			dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1448 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1449 					       &srq->qplib_srq);
1450 			goto exit;
1451 		}
1452 	}
1453 	if (nq)
1454 		nq->budget++;
1455 	atomic_inc(&rdev->srq_count);
1456 
1457 	return &srq->ib_srq;
1458 
1459 fail:
1460 	if (srq->umem)
1461 		ib_umem_release(srq->umem);
1462 	kfree(srq);
1463 exit:
1464 	return ERR_PTR(rc);
1465 }
1466 
1467 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1468 		       enum ib_srq_attr_mask srq_attr_mask,
1469 		       struct ib_udata *udata)
1470 {
1471 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1472 					       ib_srq);
1473 	struct bnxt_re_dev *rdev = srq->rdev;
1474 	int rc;
1475 
1476 	switch (srq_attr_mask) {
1477 	case IB_SRQ_MAX_WR:
1478 		/* SRQ resize is not supported */
1479 		break;
1480 	case IB_SRQ_LIMIT:
1481 		/* Change the SRQ threshold */
1482 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1483 			return -EINVAL;
1484 
1485 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1486 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1487 		if (rc) {
1488 			dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1489 			return rc;
1490 		}
1491 		/* On success, update the shadow */
1492 		srq->srq_limit = srq_attr->srq_limit;
1493 		/* No need to Build and send response back to udata */
1494 		break;
1495 	default:
1496 		dev_err(rdev_to_dev(rdev),
1497 			"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1498 		return -EINVAL;
1499 	}
1500 	return 0;
1501 }
1502 
1503 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1504 {
1505 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1506 					       ib_srq);
1507 	struct bnxt_re_srq tsrq;
1508 	struct bnxt_re_dev *rdev = srq->rdev;
1509 	int rc;
1510 
1511 	/* Get live SRQ attr */
1512 	tsrq.qplib_srq.id = srq->qplib_srq.id;
1513 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1514 	if (rc) {
1515 		dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1516 		return rc;
1517 	}
1518 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1519 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1520 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1521 
1522 	return 0;
1523 }
1524 
1525 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1526 			  const struct ib_recv_wr **bad_wr)
1527 {
1528 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1529 					       ib_srq);
1530 	struct bnxt_qplib_swqe wqe;
1531 	unsigned long flags;
1532 	int rc = 0;
1533 
1534 	spin_lock_irqsave(&srq->lock, flags);
1535 	while (wr) {
1536 		/* Transcribe each ib_recv_wr to qplib_swqe */
1537 		wqe.num_sge = wr->num_sge;
1538 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1539 		wqe.wr_id = wr->wr_id;
1540 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1541 
1542 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1543 		if (rc) {
1544 			*bad_wr = wr;
1545 			break;
1546 		}
1547 		wr = wr->next;
1548 	}
1549 	spin_unlock_irqrestore(&srq->lock, flags);
1550 
1551 	return rc;
1552 }
1553 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1554 				    struct bnxt_re_qp *qp1_qp,
1555 				    int qp_attr_mask)
1556 {
1557 	struct bnxt_re_qp *qp = rdev->qp1_sqp;
1558 	int rc = 0;
1559 
1560 	if (qp_attr_mask & IB_QP_STATE) {
1561 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1562 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1563 	}
1564 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1565 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1566 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1567 	}
1568 
1569 	if (qp_attr_mask & IB_QP_QKEY) {
1570 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1571 		/* Using a Random  QKEY */
1572 		qp->qplib_qp.qkey = 0x81818181;
1573 	}
1574 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1575 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1576 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1577 	}
1578 
1579 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1580 	if (rc)
1581 		dev_err(rdev_to_dev(rdev),
1582 			"Failed to modify Shadow QP for QP1");
1583 	return rc;
1584 }
1585 
1586 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1587 		      int qp_attr_mask, struct ib_udata *udata)
1588 {
1589 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1590 	struct bnxt_re_dev *rdev = qp->rdev;
1591 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1592 	enum ib_qp_state curr_qp_state, new_qp_state;
1593 	int rc, entries;
1594 	unsigned int flags;
1595 	u8 nw_type;
1596 
1597 	qp->qplib_qp.modify_flags = 0;
1598 	if (qp_attr_mask & IB_QP_STATE) {
1599 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1600 		new_qp_state = qp_attr->qp_state;
1601 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1602 					ib_qp->qp_type, qp_attr_mask)) {
1603 			dev_err(rdev_to_dev(rdev),
1604 				"Invalid attribute mask: %#x specified ",
1605 				qp_attr_mask);
1606 			dev_err(rdev_to_dev(rdev),
1607 				"for qpn: %#x type: %#x",
1608 				ib_qp->qp_num, ib_qp->qp_type);
1609 			dev_err(rdev_to_dev(rdev),
1610 				"curr_qp_state=0x%x, new_qp_state=0x%x\n",
1611 				curr_qp_state, new_qp_state);
1612 			return -EINVAL;
1613 		}
1614 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1615 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1616 
1617 		if (!qp->sumem &&
1618 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1619 			dev_dbg(rdev_to_dev(rdev),
1620 				"Move QP = %p to flush list\n",
1621 				qp);
1622 			flags = bnxt_re_lock_cqs(qp);
1623 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1624 			bnxt_re_unlock_cqs(qp, flags);
1625 		}
1626 		if (!qp->sumem &&
1627 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1628 			dev_dbg(rdev_to_dev(rdev),
1629 				"Move QP = %p out of flush list\n",
1630 				qp);
1631 			flags = bnxt_re_lock_cqs(qp);
1632 			bnxt_qplib_clean_qp(&qp->qplib_qp);
1633 			bnxt_re_unlock_cqs(qp, flags);
1634 		}
1635 	}
1636 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1637 		qp->qplib_qp.modify_flags |=
1638 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1639 		qp->qplib_qp.en_sqd_async_notify = true;
1640 	}
1641 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1642 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1643 		qp->qplib_qp.access =
1644 			__from_ib_access_flags(qp_attr->qp_access_flags);
1645 		/* LOCAL_WRITE access must be set to allow RC receive */
1646 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1647 		/* Temp: Set all params on QP as of now */
1648 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1649 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1650 	}
1651 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1652 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1653 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1654 	}
1655 	if (qp_attr_mask & IB_QP_QKEY) {
1656 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1657 		qp->qplib_qp.qkey = qp_attr->qkey;
1658 	}
1659 	if (qp_attr_mask & IB_QP_AV) {
1660 		const struct ib_global_route *grh =
1661 			rdma_ah_read_grh(&qp_attr->ah_attr);
1662 		const struct ib_gid_attr *sgid_attr;
1663 
1664 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1665 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1666 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1667 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1668 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1669 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1670 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1671 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1672 		       sizeof(qp->qplib_qp.ah.dgid.data));
1673 		qp->qplib_qp.ah.flow_label = grh->flow_label;
1674 		/* If RoCE V2 is enabled, stack will have two entries for
1675 		 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1676 		 * the GID index by 2 for RoCE V2
1677 		 */
1678 		qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1679 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1680 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1681 		qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1682 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1683 		ether_addr_copy(qp->qplib_qp.ah.dmac,
1684 				qp_attr->ah_attr.roce.dmac);
1685 
1686 		sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
1687 		memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
1688 		       ETH_ALEN);
1689 		nw_type = rdma_gid_attr_network_type(sgid_attr);
1690 		switch (nw_type) {
1691 		case RDMA_NETWORK_IPV4:
1692 			qp->qplib_qp.nw_type =
1693 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1694 			break;
1695 		case RDMA_NETWORK_IPV6:
1696 			qp->qplib_qp.nw_type =
1697 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1698 			break;
1699 		default:
1700 			qp->qplib_qp.nw_type =
1701 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1702 			break;
1703 		}
1704 	}
1705 
1706 	if (qp_attr_mask & IB_QP_PATH_MTU) {
1707 		qp->qplib_qp.modify_flags |=
1708 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1709 		qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1710 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1711 	} else if (qp_attr->qp_state == IB_QPS_RTR) {
1712 		qp->qplib_qp.modify_flags |=
1713 			CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1714 		qp->qplib_qp.path_mtu =
1715 			__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1716 		qp->qplib_qp.mtu =
1717 			ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1718 	}
1719 
1720 	if (qp_attr_mask & IB_QP_TIMEOUT) {
1721 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1722 		qp->qplib_qp.timeout = qp_attr->timeout;
1723 	}
1724 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
1725 		qp->qplib_qp.modify_flags |=
1726 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1727 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1728 	}
1729 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
1730 		qp->qplib_qp.modify_flags |=
1731 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1732 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1733 	}
1734 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1735 		qp->qplib_qp.modify_flags |=
1736 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1737 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1738 	}
1739 	if (qp_attr_mask & IB_QP_RQ_PSN) {
1740 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1741 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1742 	}
1743 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1744 		qp->qplib_qp.modify_flags |=
1745 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1746 		/* Cap the max_rd_atomic to device max */
1747 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1748 						   dev_attr->max_qp_rd_atom);
1749 	}
1750 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1751 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1752 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1753 	}
1754 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1755 		if (qp_attr->max_dest_rd_atomic >
1756 		    dev_attr->max_qp_init_rd_atom) {
1757 			dev_err(rdev_to_dev(rdev),
1758 				"max_dest_rd_atomic requested%d is > dev_max%d",
1759 				qp_attr->max_dest_rd_atomic,
1760 				dev_attr->max_qp_init_rd_atom);
1761 			return -EINVAL;
1762 		}
1763 
1764 		qp->qplib_qp.modify_flags |=
1765 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1766 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1767 	}
1768 	if (qp_attr_mask & IB_QP_CAP) {
1769 		qp->qplib_qp.modify_flags |=
1770 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1771 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1772 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1773 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1774 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1775 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1776 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1777 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1778 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1779 		    (qp_attr->cap.max_inline_data >=
1780 						dev_attr->max_inline_data)) {
1781 			dev_err(rdev_to_dev(rdev),
1782 				"Create QP failed - max exceeded");
1783 			return -EINVAL;
1784 		}
1785 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1786 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1787 						dev_attr->max_qp_wqes + 1);
1788 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1789 						qp_attr->cap.max_send_wr;
1790 		/*
1791 		 * Reserving one slot for Phantom WQE. Some application can
1792 		 * post one extra entry in this case. Allowing this to avoid
1793 		 * unexpected Queue full condition
1794 		 */
1795 		qp->qplib_qp.sq.q_full_delta -= 1;
1796 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1797 		if (qp->qplib_qp.rq.max_wqe) {
1798 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1799 			qp->qplib_qp.rq.max_wqe =
1800 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1801 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1802 						       qp_attr->cap.max_recv_wr;
1803 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1804 		} else {
1805 			/* SRQ was used prior, just ignore the RQ caps */
1806 		}
1807 	}
1808 	if (qp_attr_mask & IB_QP_DEST_QPN) {
1809 		qp->qplib_qp.modify_flags |=
1810 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1811 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1812 	}
1813 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1814 	if (rc) {
1815 		dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1816 		return rc;
1817 	}
1818 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1819 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1820 	return rc;
1821 }
1822 
1823 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1824 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1825 {
1826 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1827 	struct bnxt_re_dev *rdev = qp->rdev;
1828 	struct bnxt_qplib_qp *qplib_qp;
1829 	int rc;
1830 
1831 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1832 	if (!qplib_qp)
1833 		return -ENOMEM;
1834 
1835 	qplib_qp->id = qp->qplib_qp.id;
1836 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1837 
1838 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1839 	if (rc) {
1840 		dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1841 		goto out;
1842 	}
1843 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1844 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1845 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1846 	qp_attr->pkey_index = qplib_qp->pkey_index;
1847 	qp_attr->qkey = qplib_qp->qkey;
1848 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1849 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1850 			qplib_qp->ah.host_sgid_index,
1851 			qplib_qp->ah.hop_limit,
1852 			qplib_qp->ah.traffic_class);
1853 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1854 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1855 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1856 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1857 	qp_attr->timeout = qplib_qp->timeout;
1858 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
1859 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
1860 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1861 	qp_attr->rq_psn = qplib_qp->rq.psn;
1862 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1863 	qp_attr->sq_psn = qplib_qp->sq.psn;
1864 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1865 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1866 							 IB_SIGNAL_REQ_WR;
1867 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1868 
1869 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1870 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1871 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1872 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1873 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1874 	qp_init_attr->cap = qp_attr->cap;
1875 
1876 out:
1877 	kfree(qplib_qp);
1878 	return rc;
1879 }
1880 
1881 /* Routine for sending QP1 packets for RoCE V1 an V2
1882  */
1883 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1884 				     const struct ib_send_wr *wr,
1885 				     struct bnxt_qplib_swqe *wqe,
1886 				     int payload_size)
1887 {
1888 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1889 					     ib_ah);
1890 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1891 	const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
1892 	struct bnxt_qplib_sge sge;
1893 	u8 nw_type;
1894 	u16 ether_type;
1895 	union ib_gid dgid;
1896 	bool is_eth = false;
1897 	bool is_vlan = false;
1898 	bool is_grh = false;
1899 	bool is_udp = false;
1900 	u8 ip_version = 0;
1901 	u16 vlan_id = 0xFFFF;
1902 	void *buf;
1903 	int i, rc = 0;
1904 
1905 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1906 
1907 	if (is_vlan_dev(sgid_attr->ndev))
1908 		vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
1909 	/* Get network header type for this GID */
1910 	nw_type = rdma_gid_attr_network_type(sgid_attr);
1911 	switch (nw_type) {
1912 	case RDMA_NETWORK_IPV4:
1913 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1914 		break;
1915 	case RDMA_NETWORK_IPV6:
1916 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1917 		break;
1918 	default:
1919 		nw_type = BNXT_RE_ROCE_V1_PACKET;
1920 		break;
1921 	}
1922 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1923 	is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1924 	if (is_udp) {
1925 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
1926 			ip_version = 4;
1927 			ether_type = ETH_P_IP;
1928 		} else {
1929 			ip_version = 6;
1930 			ether_type = ETH_P_IPV6;
1931 		}
1932 		is_grh = false;
1933 	} else {
1934 		ether_type = ETH_P_IBOE;
1935 		is_grh = true;
1936 	}
1937 
1938 	is_eth = true;
1939 	is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1940 
1941 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1942 			  ip_version, is_udp, 0, &qp->qp1_hdr);
1943 
1944 	/* ETH */
1945 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1946 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1947 
1948 	/* For vlan, check the sgid for vlan existence */
1949 
1950 	if (!is_vlan) {
1951 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1952 	} else {
1953 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1954 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1955 	}
1956 
1957 	if (is_grh || (ip_version == 6)) {
1958 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
1959 		       sizeof(sgid_attr->gid));
1960 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1961 		       sizeof(sgid_attr->gid));
1962 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1963 	}
1964 
1965 	if (ip_version == 4) {
1966 		qp->qp1_hdr.ip4.tos = 0;
1967 		qp->qp1_hdr.ip4.id = 0;
1968 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1969 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1970 
1971 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
1972 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1973 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1974 	}
1975 
1976 	if (is_udp) {
1977 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1978 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
1979 		qp->qp1_hdr.udp.csum = 0;
1980 	}
1981 
1982 	/* BTH */
1983 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1984 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1985 		qp->qp1_hdr.immediate_present = 1;
1986 	} else {
1987 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1988 	}
1989 	if (wr->send_flags & IB_SEND_SOLICITED)
1990 		qp->qp1_hdr.bth.solicited_event = 1;
1991 	/* pad_count */
1992 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1993 
1994 	/* P_key for QP1 is for all members */
1995 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1996 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1997 	qp->qp1_hdr.bth.ack_req = 0;
1998 	qp->send_psn++;
1999 	qp->send_psn &= BTH_PSN_MASK;
2000 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2001 	/* DETH */
2002 	/* Use the priviledged Q_Key for QP1 */
2003 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2004 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2005 
2006 	/* Pack the QP1 to the transmit buffer */
2007 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2008 	if (buf) {
2009 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2010 		for (i = wqe->num_sge; i; i--) {
2011 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2012 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2013 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2014 		}
2015 
2016 		/*
2017 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2018 		 * which is same as the QP1 SQ header buffer.
2019 		 * Header buf size for IPV4 RoCE V2 can be 66.
2020 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2021 		 * Subtract 20 bytes from QP1 SQ header buf size
2022 		 */
2023 		if (is_udp && ip_version == 4)
2024 			sge.size -= 20;
2025 		/*
2026 		 * Max Header buf size for RoCE V1 is 78.
2027 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2028 		 * Subtract 8 bytes from QP1 SQ header buf size
2029 		 */
2030 		if (!is_udp)
2031 			sge.size -= 8;
2032 
2033 		/* Subtract 4 bytes for non vlan packets */
2034 		if (!is_vlan)
2035 			sge.size -= 4;
2036 
2037 		wqe->sg_list[0].addr = sge.addr;
2038 		wqe->sg_list[0].lkey = sge.lkey;
2039 		wqe->sg_list[0].size = sge.size;
2040 		wqe->num_sge++;
2041 
2042 	} else {
2043 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2044 		rc = -ENOMEM;
2045 	}
2046 	return rc;
2047 }
2048 
2049 /* For the MAD layer, it only provides the recv SGE the size of
2050  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2051  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2052  * receive packet (334 bytes) with no VLAN and then copy the GRH
2053  * and the MAD datagram out to the provided SGE.
2054  */
2055 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2056 					    const struct ib_recv_wr *wr,
2057 					    struct bnxt_qplib_swqe *wqe,
2058 					    int payload_size)
2059 {
2060 	struct bnxt_qplib_sge ref, sge;
2061 	u32 rq_prod_index;
2062 	struct bnxt_re_sqp_entries *sqp_entry;
2063 
2064 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2065 
2066 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2067 		return -ENOMEM;
2068 
2069 	/* Create 1 SGE to receive the entire
2070 	 * ethernet packet
2071 	 */
2072 	/* Save the reference from ULP */
2073 	ref.addr = wqe->sg_list[0].addr;
2074 	ref.lkey = wqe->sg_list[0].lkey;
2075 	ref.size = wqe->sg_list[0].size;
2076 
2077 	sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2078 
2079 	/* SGE 1 */
2080 	wqe->sg_list[0].addr = sge.addr;
2081 	wqe->sg_list[0].lkey = sge.lkey;
2082 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2083 	sge.size -= wqe->sg_list[0].size;
2084 
2085 	sqp_entry->sge.addr = ref.addr;
2086 	sqp_entry->sge.lkey = ref.lkey;
2087 	sqp_entry->sge.size = ref.size;
2088 	/* Store the wrid for reporting completion */
2089 	sqp_entry->wrid = wqe->wr_id;
2090 	/* change the wqe->wrid to table index */
2091 	wqe->wr_id = rq_prod_index;
2092 	return 0;
2093 }
2094 
2095 static int is_ud_qp(struct bnxt_re_qp *qp)
2096 {
2097 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2098 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2099 }
2100 
2101 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2102 				  const struct ib_send_wr *wr,
2103 				  struct bnxt_qplib_swqe *wqe)
2104 {
2105 	struct bnxt_re_ah *ah = NULL;
2106 
2107 	if (is_ud_qp(qp)) {
2108 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2109 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2110 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2111 		wqe->send.avid = ah->qplib_ah.id;
2112 	}
2113 	switch (wr->opcode) {
2114 	case IB_WR_SEND:
2115 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2116 		break;
2117 	case IB_WR_SEND_WITH_IMM:
2118 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2119 		wqe->send.imm_data = wr->ex.imm_data;
2120 		break;
2121 	case IB_WR_SEND_WITH_INV:
2122 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2123 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2124 		break;
2125 	default:
2126 		return -EINVAL;
2127 	}
2128 	if (wr->send_flags & IB_SEND_SIGNALED)
2129 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2130 	if (wr->send_flags & IB_SEND_FENCE)
2131 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2132 	if (wr->send_flags & IB_SEND_SOLICITED)
2133 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2134 	if (wr->send_flags & IB_SEND_INLINE)
2135 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2136 
2137 	return 0;
2138 }
2139 
2140 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2141 				  struct bnxt_qplib_swqe *wqe)
2142 {
2143 	switch (wr->opcode) {
2144 	case IB_WR_RDMA_WRITE:
2145 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2146 		break;
2147 	case IB_WR_RDMA_WRITE_WITH_IMM:
2148 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2149 		wqe->rdma.imm_data = wr->ex.imm_data;
2150 		break;
2151 	case IB_WR_RDMA_READ:
2152 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2153 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2154 		break;
2155 	default:
2156 		return -EINVAL;
2157 	}
2158 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2159 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2160 	if (wr->send_flags & IB_SEND_SIGNALED)
2161 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2162 	if (wr->send_flags & IB_SEND_FENCE)
2163 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2164 	if (wr->send_flags & IB_SEND_SOLICITED)
2165 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2166 	if (wr->send_flags & IB_SEND_INLINE)
2167 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2168 
2169 	return 0;
2170 }
2171 
2172 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2173 				    struct bnxt_qplib_swqe *wqe)
2174 {
2175 	switch (wr->opcode) {
2176 	case IB_WR_ATOMIC_CMP_AND_SWP:
2177 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2178 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2179 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2180 		break;
2181 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2182 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2183 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2184 		break;
2185 	default:
2186 		return -EINVAL;
2187 	}
2188 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2189 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2190 	if (wr->send_flags & IB_SEND_SIGNALED)
2191 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2192 	if (wr->send_flags & IB_SEND_FENCE)
2193 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2194 	if (wr->send_flags & IB_SEND_SOLICITED)
2195 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2196 	return 0;
2197 }
2198 
2199 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2200 				 struct bnxt_qplib_swqe *wqe)
2201 {
2202 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2203 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2204 
2205 	/* Need unconditional fence for local invalidate
2206 	 * opcode to work as expected.
2207 	 */
2208 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2209 
2210 	if (wr->send_flags & IB_SEND_SIGNALED)
2211 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2212 	if (wr->send_flags & IB_SEND_SOLICITED)
2213 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2214 
2215 	return 0;
2216 }
2217 
2218 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2219 				 struct bnxt_qplib_swqe *wqe)
2220 {
2221 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2222 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2223 	int access = wr->access;
2224 
2225 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2226 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2227 	wqe->frmr.page_list = mr->pages;
2228 	wqe->frmr.page_list_len = mr->npages;
2229 	wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2230 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2231 
2232 	/* Need unconditional fence for reg_mr
2233 	 * opcode to function as expected.
2234 	 */
2235 
2236 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2237 
2238 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2239 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2240 
2241 	if (access & IB_ACCESS_LOCAL_WRITE)
2242 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2243 	if (access & IB_ACCESS_REMOTE_READ)
2244 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2245 	if (access & IB_ACCESS_REMOTE_WRITE)
2246 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2247 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2248 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2249 	if (access & IB_ACCESS_MW_BIND)
2250 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2251 
2252 	wqe->frmr.l_key = wr->key;
2253 	wqe->frmr.length = wr->mr->length;
2254 	wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2255 	wqe->frmr.va = wr->mr->iova;
2256 	return 0;
2257 }
2258 
2259 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2260 				    const struct ib_send_wr *wr,
2261 				    struct bnxt_qplib_swqe *wqe)
2262 {
2263 	/*  Copy the inline data to the data  field */
2264 	u8 *in_data;
2265 	u32 i, sge_len;
2266 	void *sge_addr;
2267 
2268 	in_data = wqe->inline_data;
2269 	for (i = 0; i < wr->num_sge; i++) {
2270 		sge_addr = (void *)(unsigned long)
2271 				wr->sg_list[i].addr;
2272 		sge_len = wr->sg_list[i].length;
2273 
2274 		if ((sge_len + wqe->inline_len) >
2275 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2276 			dev_err(rdev_to_dev(rdev),
2277 				"Inline data size requested > supported value");
2278 			return -EINVAL;
2279 		}
2280 		sge_len = wr->sg_list[i].length;
2281 
2282 		memcpy(in_data, sge_addr, sge_len);
2283 		in_data += wr->sg_list[i].length;
2284 		wqe->inline_len += wr->sg_list[i].length;
2285 	}
2286 	return wqe->inline_len;
2287 }
2288 
2289 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2290 				   const struct ib_send_wr *wr,
2291 				   struct bnxt_qplib_swqe *wqe)
2292 {
2293 	int payload_sz = 0;
2294 
2295 	if (wr->send_flags & IB_SEND_INLINE)
2296 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2297 	else
2298 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2299 					       wqe->num_sge);
2300 
2301 	return payload_sz;
2302 }
2303 
2304 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2305 {
2306 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2307 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2308 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2309 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2310 		int qp_attr_mask;
2311 		struct ib_qp_attr qp_attr;
2312 
2313 		qp_attr_mask = IB_QP_STATE;
2314 		qp_attr.qp_state = IB_QPS_RTS;
2315 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2316 		qp->qplib_qp.wqe_cnt = 0;
2317 	}
2318 }
2319 
2320 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2321 				       struct bnxt_re_qp *qp,
2322 				       const struct ib_send_wr *wr)
2323 {
2324 	struct bnxt_qplib_swqe wqe;
2325 	int rc = 0, payload_sz = 0;
2326 	unsigned long flags;
2327 
2328 	spin_lock_irqsave(&qp->sq_lock, flags);
2329 	memset(&wqe, 0, sizeof(wqe));
2330 	while (wr) {
2331 		/* House keeping */
2332 		memset(&wqe, 0, sizeof(wqe));
2333 
2334 		/* Common */
2335 		wqe.num_sge = wr->num_sge;
2336 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2337 			dev_err(rdev_to_dev(rdev),
2338 				"Limit exceeded for Send SGEs");
2339 			rc = -EINVAL;
2340 			goto bad;
2341 		}
2342 
2343 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2344 		if (payload_sz < 0) {
2345 			rc = -EINVAL;
2346 			goto bad;
2347 		}
2348 		wqe.wr_id = wr->wr_id;
2349 
2350 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2351 
2352 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2353 		if (!rc)
2354 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2355 bad:
2356 		if (rc) {
2357 			dev_err(rdev_to_dev(rdev),
2358 				"Post send failed opcode = %#x rc = %d",
2359 				wr->opcode, rc);
2360 			break;
2361 		}
2362 		wr = wr->next;
2363 	}
2364 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2365 	bnxt_ud_qp_hw_stall_workaround(qp);
2366 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2367 	return rc;
2368 }
2369 
2370 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2371 		      const struct ib_send_wr **bad_wr)
2372 {
2373 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2374 	struct bnxt_qplib_swqe wqe;
2375 	int rc = 0, payload_sz = 0;
2376 	unsigned long flags;
2377 
2378 	spin_lock_irqsave(&qp->sq_lock, flags);
2379 	while (wr) {
2380 		/* House keeping */
2381 		memset(&wqe, 0, sizeof(wqe));
2382 
2383 		/* Common */
2384 		wqe.num_sge = wr->num_sge;
2385 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2386 			dev_err(rdev_to_dev(qp->rdev),
2387 				"Limit exceeded for Send SGEs");
2388 			rc = -EINVAL;
2389 			goto bad;
2390 		}
2391 
2392 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2393 		if (payload_sz < 0) {
2394 			rc = -EINVAL;
2395 			goto bad;
2396 		}
2397 		wqe.wr_id = wr->wr_id;
2398 
2399 		switch (wr->opcode) {
2400 		case IB_WR_SEND:
2401 		case IB_WR_SEND_WITH_IMM:
2402 			if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2403 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2404 							       payload_sz);
2405 				if (rc)
2406 					goto bad;
2407 				wqe.rawqp1.lflags |=
2408 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2409 			}
2410 			switch (wr->send_flags) {
2411 			case IB_SEND_IP_CSUM:
2412 				wqe.rawqp1.lflags |=
2413 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2414 				break;
2415 			default:
2416 				break;
2417 			}
2418 			/* fall through */
2419 		case IB_WR_SEND_WITH_INV:
2420 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2421 			break;
2422 		case IB_WR_RDMA_WRITE:
2423 		case IB_WR_RDMA_WRITE_WITH_IMM:
2424 		case IB_WR_RDMA_READ:
2425 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2426 			break;
2427 		case IB_WR_ATOMIC_CMP_AND_SWP:
2428 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2429 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2430 			break;
2431 		case IB_WR_RDMA_READ_WITH_INV:
2432 			dev_err(rdev_to_dev(qp->rdev),
2433 				"RDMA Read with Invalidate is not supported");
2434 			rc = -EINVAL;
2435 			goto bad;
2436 		case IB_WR_LOCAL_INV:
2437 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2438 			break;
2439 		case IB_WR_REG_MR:
2440 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2441 			break;
2442 		default:
2443 			/* Unsupported WRs */
2444 			dev_err(rdev_to_dev(qp->rdev),
2445 				"WR (%#x) is not supported", wr->opcode);
2446 			rc = -EINVAL;
2447 			goto bad;
2448 		}
2449 		if (!rc)
2450 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2451 bad:
2452 		if (rc) {
2453 			dev_err(rdev_to_dev(qp->rdev),
2454 				"post_send failed op:%#x qps = %#x rc = %d\n",
2455 				wr->opcode, qp->qplib_qp.state, rc);
2456 			*bad_wr = wr;
2457 			break;
2458 		}
2459 		wr = wr->next;
2460 	}
2461 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2462 	bnxt_ud_qp_hw_stall_workaround(qp);
2463 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2464 
2465 	return rc;
2466 }
2467 
2468 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2469 				       struct bnxt_re_qp *qp,
2470 				       const struct ib_recv_wr *wr)
2471 {
2472 	struct bnxt_qplib_swqe wqe;
2473 	int rc = 0;
2474 
2475 	memset(&wqe, 0, sizeof(wqe));
2476 	while (wr) {
2477 		/* House keeping */
2478 		memset(&wqe, 0, sizeof(wqe));
2479 
2480 		/* Common */
2481 		wqe.num_sge = wr->num_sge;
2482 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2483 			dev_err(rdev_to_dev(rdev),
2484 				"Limit exceeded for Receive SGEs");
2485 			rc = -EINVAL;
2486 			break;
2487 		}
2488 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2489 		wqe.wr_id = wr->wr_id;
2490 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2491 
2492 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2493 		if (rc)
2494 			break;
2495 
2496 		wr = wr->next;
2497 	}
2498 	if (!rc)
2499 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2500 	return rc;
2501 }
2502 
2503 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2504 		      const struct ib_recv_wr **bad_wr)
2505 {
2506 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2507 	struct bnxt_qplib_swqe wqe;
2508 	int rc = 0, payload_sz = 0;
2509 	unsigned long flags;
2510 	u32 count = 0;
2511 
2512 	spin_lock_irqsave(&qp->rq_lock, flags);
2513 	while (wr) {
2514 		/* House keeping */
2515 		memset(&wqe, 0, sizeof(wqe));
2516 
2517 		/* Common */
2518 		wqe.num_sge = wr->num_sge;
2519 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2520 			dev_err(rdev_to_dev(qp->rdev),
2521 				"Limit exceeded for Receive SGEs");
2522 			rc = -EINVAL;
2523 			*bad_wr = wr;
2524 			break;
2525 		}
2526 
2527 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2528 					       wr->num_sge);
2529 		wqe.wr_id = wr->wr_id;
2530 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2531 
2532 		if (ib_qp->qp_type == IB_QPT_GSI &&
2533 		    qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2534 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2535 							      payload_sz);
2536 		if (!rc)
2537 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2538 		if (rc) {
2539 			*bad_wr = wr;
2540 			break;
2541 		}
2542 
2543 		/* Ring DB if the RQEs posted reaches a threshold value */
2544 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2545 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
2546 			count = 0;
2547 		}
2548 
2549 		wr = wr->next;
2550 	}
2551 
2552 	if (count)
2553 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2554 
2555 	spin_unlock_irqrestore(&qp->rq_lock, flags);
2556 
2557 	return rc;
2558 }
2559 
2560 /* Completion Queues */
2561 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2562 {
2563 	int rc;
2564 	struct bnxt_re_cq *cq;
2565 	struct bnxt_qplib_nq *nq;
2566 	struct bnxt_re_dev *rdev;
2567 
2568 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2569 	rdev = cq->rdev;
2570 	nq = cq->qplib_cq.nq;
2571 
2572 	rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2573 	if (rc) {
2574 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2575 		return rc;
2576 	}
2577 	if (!IS_ERR_OR_NULL(cq->umem))
2578 		ib_umem_release(cq->umem);
2579 
2580 	atomic_dec(&rdev->cq_count);
2581 	nq->budget--;
2582 	kfree(cq->cql);
2583 	kfree(cq);
2584 
2585 	return 0;
2586 }
2587 
2588 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2589 				const struct ib_cq_init_attr *attr,
2590 				struct ib_ucontext *context,
2591 				struct ib_udata *udata)
2592 {
2593 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2594 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2595 	struct bnxt_re_cq *cq = NULL;
2596 	int rc, entries;
2597 	int cqe = attr->cqe;
2598 	struct bnxt_qplib_nq *nq = NULL;
2599 	unsigned int nq_alloc_cnt;
2600 
2601 	/* Validate CQ fields */
2602 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2603 		dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2604 		return ERR_PTR(-EINVAL);
2605 	}
2606 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2607 	if (!cq)
2608 		return ERR_PTR(-ENOMEM);
2609 
2610 	cq->rdev = rdev;
2611 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2612 
2613 	entries = roundup_pow_of_two(cqe + 1);
2614 	if (entries > dev_attr->max_cq_wqes + 1)
2615 		entries = dev_attr->max_cq_wqes + 1;
2616 
2617 	if (context) {
2618 		struct bnxt_re_cq_req req;
2619 		struct bnxt_re_ucontext *uctx = container_of
2620 						(context,
2621 						 struct bnxt_re_ucontext,
2622 						 ib_uctx);
2623 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2624 			rc = -EFAULT;
2625 			goto fail;
2626 		}
2627 
2628 		cq->umem = ib_umem_get(udata, req.cq_va,
2629 				       entries * sizeof(struct cq_base),
2630 				       IB_ACCESS_LOCAL_WRITE, 1);
2631 		if (IS_ERR(cq->umem)) {
2632 			rc = PTR_ERR(cq->umem);
2633 			goto fail;
2634 		}
2635 		cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2636 		cq->qplib_cq.nmap = cq->umem->nmap;
2637 		cq->qplib_cq.dpi = &uctx->dpi;
2638 	} else {
2639 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2640 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2641 				  GFP_KERNEL);
2642 		if (!cq->cql) {
2643 			rc = -ENOMEM;
2644 			goto fail;
2645 		}
2646 
2647 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
2648 		cq->qplib_cq.sghead = NULL;
2649 		cq->qplib_cq.nmap = 0;
2650 	}
2651 	/*
2652 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2653 	 * used for getting the NQ index.
2654 	 */
2655 	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2656 	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2657 	cq->qplib_cq.max_wqe = entries;
2658 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2659 	cq->qplib_cq.nq	= nq;
2660 
2661 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2662 	if (rc) {
2663 		dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2664 		goto fail;
2665 	}
2666 
2667 	cq->ib_cq.cqe = entries;
2668 	cq->cq_period = cq->qplib_cq.period;
2669 	nq->budget++;
2670 
2671 	atomic_inc(&rdev->cq_count);
2672 	spin_lock_init(&cq->cq_lock);
2673 
2674 	if (context) {
2675 		struct bnxt_re_cq_resp resp;
2676 
2677 		resp.cqid = cq->qplib_cq.id;
2678 		resp.tail = cq->qplib_cq.hwq.cons;
2679 		resp.phase = cq->qplib_cq.period;
2680 		resp.rsvd = 0;
2681 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2682 		if (rc) {
2683 			dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2684 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2685 			goto c2fail;
2686 		}
2687 	}
2688 
2689 	return &cq->ib_cq;
2690 
2691 c2fail:
2692 	if (context)
2693 		ib_umem_release(cq->umem);
2694 fail:
2695 	kfree(cq->cql);
2696 	kfree(cq);
2697 	return ERR_PTR(rc);
2698 }
2699 
2700 static u8 __req_to_ib_wc_status(u8 qstatus)
2701 {
2702 	switch (qstatus) {
2703 	case CQ_REQ_STATUS_OK:
2704 		return IB_WC_SUCCESS;
2705 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2706 		return IB_WC_BAD_RESP_ERR;
2707 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2708 		return IB_WC_LOC_LEN_ERR;
2709 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2710 		return IB_WC_LOC_QP_OP_ERR;
2711 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2712 		return IB_WC_LOC_PROT_ERR;
2713 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2714 		return IB_WC_GENERAL_ERR;
2715 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2716 		return IB_WC_REM_INV_REQ_ERR;
2717 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2718 		return IB_WC_REM_ACCESS_ERR;
2719 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2720 		return IB_WC_REM_OP_ERR;
2721 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2722 		return IB_WC_RNR_RETRY_EXC_ERR;
2723 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2724 		return IB_WC_RETRY_EXC_ERR;
2725 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2726 		return IB_WC_WR_FLUSH_ERR;
2727 	default:
2728 		return IB_WC_GENERAL_ERR;
2729 	}
2730 	return 0;
2731 }
2732 
2733 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2734 {
2735 	switch (qstatus) {
2736 	case CQ_RES_RAWETH_QP1_STATUS_OK:
2737 		return IB_WC_SUCCESS;
2738 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2739 		return IB_WC_LOC_ACCESS_ERR;
2740 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2741 		return IB_WC_LOC_LEN_ERR;
2742 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2743 		return IB_WC_LOC_PROT_ERR;
2744 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2745 		return IB_WC_LOC_QP_OP_ERR;
2746 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2747 		return IB_WC_GENERAL_ERR;
2748 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2749 		return IB_WC_WR_FLUSH_ERR;
2750 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2751 		return IB_WC_WR_FLUSH_ERR;
2752 	default:
2753 		return IB_WC_GENERAL_ERR;
2754 	}
2755 }
2756 
2757 static u8 __rc_to_ib_wc_status(u8 qstatus)
2758 {
2759 	switch (qstatus) {
2760 	case CQ_RES_RC_STATUS_OK:
2761 		return IB_WC_SUCCESS;
2762 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2763 		return IB_WC_LOC_ACCESS_ERR;
2764 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2765 		return IB_WC_LOC_LEN_ERR;
2766 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2767 		return IB_WC_LOC_PROT_ERR;
2768 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2769 		return IB_WC_LOC_QP_OP_ERR;
2770 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2771 		return IB_WC_GENERAL_ERR;
2772 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2773 		return IB_WC_REM_INV_REQ_ERR;
2774 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2775 		return IB_WC_WR_FLUSH_ERR;
2776 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2777 		return IB_WC_WR_FLUSH_ERR;
2778 	default:
2779 		return IB_WC_GENERAL_ERR;
2780 	}
2781 }
2782 
2783 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2784 {
2785 	switch (cqe->type) {
2786 	case BNXT_QPLIB_SWQE_TYPE_SEND:
2787 		wc->opcode = IB_WC_SEND;
2788 		break;
2789 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2790 		wc->opcode = IB_WC_SEND;
2791 		wc->wc_flags |= IB_WC_WITH_IMM;
2792 		break;
2793 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2794 		wc->opcode = IB_WC_SEND;
2795 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2796 		break;
2797 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2798 		wc->opcode = IB_WC_RDMA_WRITE;
2799 		break;
2800 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2801 		wc->opcode = IB_WC_RDMA_WRITE;
2802 		wc->wc_flags |= IB_WC_WITH_IMM;
2803 		break;
2804 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2805 		wc->opcode = IB_WC_RDMA_READ;
2806 		break;
2807 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2808 		wc->opcode = IB_WC_COMP_SWAP;
2809 		break;
2810 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2811 		wc->opcode = IB_WC_FETCH_ADD;
2812 		break;
2813 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2814 		wc->opcode = IB_WC_LOCAL_INV;
2815 		break;
2816 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2817 		wc->opcode = IB_WC_REG_MR;
2818 		break;
2819 	default:
2820 		wc->opcode = IB_WC_SEND;
2821 		break;
2822 	}
2823 
2824 	wc->status = __req_to_ib_wc_status(cqe->status);
2825 }
2826 
2827 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2828 				     u16 raweth_qp1_flags2)
2829 {
2830 	bool is_ipv6 = false, is_ipv4 = false;
2831 
2832 	/* raweth_qp1_flags Bit 9-6 indicates itype */
2833 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2834 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2835 		return -1;
2836 
2837 	if (raweth_qp1_flags2 &
2838 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2839 	    raweth_qp1_flags2 &
2840 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2841 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2842 		(raweth_qp1_flags2 &
2843 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2844 			(is_ipv6 = true) : (is_ipv4 = true);
2845 		return ((is_ipv6) ?
2846 			 BNXT_RE_ROCEV2_IPV6_PACKET :
2847 			 BNXT_RE_ROCEV2_IPV4_PACKET);
2848 	} else {
2849 		return BNXT_RE_ROCE_V1_PACKET;
2850 	}
2851 }
2852 
2853 static int bnxt_re_to_ib_nw_type(int nw_type)
2854 {
2855 	u8 nw_hdr_type = 0xFF;
2856 
2857 	switch (nw_type) {
2858 	case BNXT_RE_ROCE_V1_PACKET:
2859 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2860 		break;
2861 	case BNXT_RE_ROCEV2_IPV4_PACKET:
2862 		nw_hdr_type = RDMA_NETWORK_IPV4;
2863 		break;
2864 	case BNXT_RE_ROCEV2_IPV6_PACKET:
2865 		nw_hdr_type = RDMA_NETWORK_IPV6;
2866 		break;
2867 	}
2868 	return nw_hdr_type;
2869 }
2870 
2871 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2872 				       void *rq_hdr_buf)
2873 {
2874 	u8 *tmp_buf = NULL;
2875 	struct ethhdr *eth_hdr;
2876 	u16 eth_type;
2877 	bool rc = false;
2878 
2879 	tmp_buf = (u8 *)rq_hdr_buf;
2880 	/*
2881 	 * If dest mac is not same as I/F mac, this could be a
2882 	 * loopback address or multicast address, check whether
2883 	 * it is a loopback packet
2884 	 */
2885 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2886 		tmp_buf += 4;
2887 		/* Check the  ether type */
2888 		eth_hdr = (struct ethhdr *)tmp_buf;
2889 		eth_type = ntohs(eth_hdr->h_proto);
2890 		switch (eth_type) {
2891 		case ETH_P_IBOE:
2892 			rc = true;
2893 			break;
2894 		case ETH_P_IP:
2895 		case ETH_P_IPV6: {
2896 			u32 len;
2897 			struct udphdr *udp_hdr;
2898 
2899 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2900 						      sizeof(struct ipv6hdr));
2901 			tmp_buf += sizeof(struct ethhdr) + len;
2902 			udp_hdr = (struct udphdr *)tmp_buf;
2903 			if (ntohs(udp_hdr->dest) ==
2904 				    ROCE_V2_UDP_DPORT)
2905 				rc = true;
2906 			break;
2907 			}
2908 		default:
2909 			break;
2910 		}
2911 	}
2912 
2913 	return rc;
2914 }
2915 
2916 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2917 					 struct bnxt_qplib_cqe *cqe)
2918 {
2919 	struct bnxt_re_dev *rdev = qp1_qp->rdev;
2920 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
2921 	struct bnxt_re_qp *qp = rdev->qp1_sqp;
2922 	struct ib_send_wr *swr;
2923 	struct ib_ud_wr udwr;
2924 	struct ib_recv_wr rwr;
2925 	int pkt_type = 0;
2926 	u32 tbl_idx;
2927 	void *rq_hdr_buf;
2928 	dma_addr_t rq_hdr_buf_map;
2929 	dma_addr_t shrq_hdr_buf_map;
2930 	u32 offset = 0;
2931 	u32 skip_bytes = 0;
2932 	struct ib_sge s_sge[2];
2933 	struct ib_sge r_sge[2];
2934 	int rc;
2935 
2936 	memset(&udwr, 0, sizeof(udwr));
2937 	memset(&rwr, 0, sizeof(rwr));
2938 	memset(&s_sge, 0, sizeof(s_sge));
2939 	memset(&r_sge, 0, sizeof(r_sge));
2940 
2941 	swr = &udwr.wr;
2942 	tbl_idx = cqe->wr_id;
2943 
2944 	rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2945 			(tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2946 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2947 							  tbl_idx);
2948 
2949 	/* Shadow QP header buffer */
2950 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2951 							    tbl_idx);
2952 	sqp_entry = &rdev->sqp_tbl[tbl_idx];
2953 
2954 	/* Store this cqe */
2955 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2956 	sqp_entry->qp1_qp = qp1_qp;
2957 
2958 	/* Find packet type from the cqe */
2959 
2960 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2961 					     cqe->raweth_qp1_flags2);
2962 	if (pkt_type < 0) {
2963 		dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2964 		return -EINVAL;
2965 	}
2966 
2967 	/* Adjust the offset for the user buffer and post in the rq */
2968 
2969 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2970 		offset = 20;
2971 
2972 	/*
2973 	 * QP1 loopback packet has 4 bytes of internal header before
2974 	 * ether header. Skip these four bytes.
2975 	 */
2976 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2977 		skip_bytes = 4;
2978 
2979 	/* First send SGE . Skip the ether header*/
2980 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2981 			+ skip_bytes;
2982 	s_sge[0].lkey = 0xFFFFFFFF;
2983 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2984 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2985 
2986 	/* Second Send SGE */
2987 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2988 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2989 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2990 		s_sge[1].addr += 8;
2991 	s_sge[1].lkey = 0xFFFFFFFF;
2992 	s_sge[1].length = 256;
2993 
2994 	/* First recv SGE */
2995 
2996 	r_sge[0].addr = shrq_hdr_buf_map;
2997 	r_sge[0].lkey = 0xFFFFFFFF;
2998 	r_sge[0].length = 40;
2999 
3000 	r_sge[1].addr = sqp_entry->sge.addr + offset;
3001 	r_sge[1].lkey = sqp_entry->sge.lkey;
3002 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3003 
3004 	/* Create receive work request */
3005 	rwr.num_sge = 2;
3006 	rwr.sg_list = r_sge;
3007 	rwr.wr_id = tbl_idx;
3008 	rwr.next = NULL;
3009 
3010 	rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3011 	if (rc) {
3012 		dev_err(rdev_to_dev(rdev),
3013 			"Failed to post Rx buffers to shadow QP");
3014 		return -ENOMEM;
3015 	}
3016 
3017 	swr->num_sge = 2;
3018 	swr->sg_list = s_sge;
3019 	swr->wr_id = tbl_idx;
3020 	swr->opcode = IB_WR_SEND;
3021 	swr->next = NULL;
3022 
3023 	udwr.ah = &rdev->sqp_ah->ib_ah;
3024 	udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3025 	udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3026 
3027 	/* post data received  in the send queue */
3028 	rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3029 
3030 	return 0;
3031 }
3032 
3033 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3034 					  struct bnxt_qplib_cqe *cqe)
3035 {
3036 	wc->opcode = IB_WC_RECV;
3037 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3038 	wc->wc_flags |= IB_WC_GRH;
3039 }
3040 
3041 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3042 				u16 *vid, u8 *sl)
3043 {
3044 	bool ret = false;
3045 	u32 metadata;
3046 	u16 tpid;
3047 
3048 	metadata = orig_cqe->raweth_qp1_metadata;
3049 	if (orig_cqe->raweth_qp1_flags2 &
3050 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3051 		tpid = ((metadata &
3052 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3053 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3054 		if (tpid == ETH_P_8021Q) {
3055 			*vid = metadata &
3056 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3057 			*sl = (metadata &
3058 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3059 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3060 			ret = true;
3061 		}
3062 	}
3063 
3064 	return ret;
3065 }
3066 
3067 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3068 				      struct bnxt_qplib_cqe *cqe)
3069 {
3070 	wc->opcode = IB_WC_RECV;
3071 	wc->status = __rc_to_ib_wc_status(cqe->status);
3072 
3073 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3074 		wc->wc_flags |= IB_WC_WITH_IMM;
3075 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3076 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3077 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3078 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3079 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3080 }
3081 
3082 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3083 					     struct ib_wc *wc,
3084 					     struct bnxt_qplib_cqe *cqe)
3085 {
3086 	struct bnxt_re_dev *rdev = qp->rdev;
3087 	struct bnxt_re_qp *qp1_qp = NULL;
3088 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3089 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3090 	int nw_type;
3091 	u32 tbl_idx;
3092 	u16 vlan_id;
3093 	u8 sl;
3094 
3095 	tbl_idx = cqe->wr_id;
3096 
3097 	sqp_entry = &rdev->sqp_tbl[tbl_idx];
3098 	qp1_qp = sqp_entry->qp1_qp;
3099 	orig_cqe = &sqp_entry->cqe;
3100 
3101 	wc->wr_id = sqp_entry->wrid;
3102 	wc->byte_len = orig_cqe->length;
3103 	wc->qp = &qp1_qp->ib_qp;
3104 
3105 	wc->ex.imm_data = orig_cqe->immdata;
3106 	wc->src_qp = orig_cqe->src_qp;
3107 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3108 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3109 		wc->vlan_id = vlan_id;
3110 		wc->sl = sl;
3111 		wc->wc_flags |= IB_WC_WITH_VLAN;
3112 	}
3113 	wc->port_num = 1;
3114 	wc->vendor_err = orig_cqe->status;
3115 
3116 	wc->opcode = IB_WC_RECV;
3117 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3118 	wc->wc_flags |= IB_WC_GRH;
3119 
3120 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3121 					    orig_cqe->raweth_qp1_flags2);
3122 	if (nw_type >= 0) {
3123 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3124 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3125 	}
3126 }
3127 
3128 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3129 				      struct ib_wc *wc,
3130 				      struct bnxt_qplib_cqe *cqe)
3131 {
3132 	u8 nw_type;
3133 
3134 	wc->opcode = IB_WC_RECV;
3135 	wc->status = __rc_to_ib_wc_status(cqe->status);
3136 
3137 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3138 		wc->wc_flags |= IB_WC_WITH_IMM;
3139 	/* report only on GSI QP for Thor */
3140 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3141 		wc->wc_flags |= IB_WC_GRH;
3142 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
3143 		wc->wc_flags |= IB_WC_WITH_SMAC;
3144 		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3145 			wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3146 			if (wc->vlan_id < 0x1000)
3147 				wc->wc_flags |= IB_WC_WITH_VLAN;
3148 		}
3149 		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3150 			   CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3151 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3152 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3153 	}
3154 
3155 }
3156 
3157 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3158 {
3159 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3160 	unsigned long flags;
3161 	int rc = 0;
3162 
3163 	spin_lock_irqsave(&qp->sq_lock, flags);
3164 
3165 	rc = bnxt_re_bind_fence_mw(lib_qp);
3166 	if (!rc) {
3167 		lib_qp->sq.phantom_wqe_cnt++;
3168 		dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3169 			"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3170 			lib_qp->id, lib_qp->sq.hwq.prod,
3171 			HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3172 			lib_qp->sq.phantom_wqe_cnt);
3173 	}
3174 
3175 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3176 	return rc;
3177 }
3178 
3179 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3180 {
3181 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3182 	struct bnxt_re_qp *qp;
3183 	struct bnxt_qplib_cqe *cqe;
3184 	int i, ncqe, budget;
3185 	struct bnxt_qplib_q *sq;
3186 	struct bnxt_qplib_qp *lib_qp;
3187 	u32 tbl_idx;
3188 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3189 	unsigned long flags;
3190 
3191 	spin_lock_irqsave(&cq->cq_lock, flags);
3192 	budget = min_t(u32, num_entries, cq->max_cql);
3193 	num_entries = budget;
3194 	if (!cq->cql) {
3195 		dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3196 		goto exit;
3197 	}
3198 	cqe = &cq->cql[0];
3199 	while (budget) {
3200 		lib_qp = NULL;
3201 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3202 		if (lib_qp) {
3203 			sq = &lib_qp->sq;
3204 			if (sq->send_phantom) {
3205 				qp = container_of(lib_qp,
3206 						  struct bnxt_re_qp, qplib_qp);
3207 				if (send_phantom_wqe(qp) == -ENOMEM)
3208 					dev_err(rdev_to_dev(cq->rdev),
3209 						"Phantom failed! Scheduled to send again\n");
3210 				else
3211 					sq->send_phantom = false;
3212 			}
3213 		}
3214 		if (ncqe < budget)
3215 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3216 							      cqe + ncqe,
3217 							      budget - ncqe);
3218 
3219 		if (!ncqe)
3220 			break;
3221 
3222 		for (i = 0; i < ncqe; i++, cqe++) {
3223 			/* Transcribe each qplib_wqe back to ib_wc */
3224 			memset(wc, 0, sizeof(*wc));
3225 
3226 			wc->wr_id = cqe->wr_id;
3227 			wc->byte_len = cqe->length;
3228 			qp = container_of
3229 				((struct bnxt_qplib_qp *)
3230 				 (unsigned long)(cqe->qp_handle),
3231 				 struct bnxt_re_qp, qplib_qp);
3232 			if (!qp) {
3233 				dev_err(rdev_to_dev(cq->rdev),
3234 					"POLL CQ : bad QP handle");
3235 				continue;
3236 			}
3237 			wc->qp = &qp->ib_qp;
3238 			wc->ex.imm_data = cqe->immdata;
3239 			wc->src_qp = cqe->src_qp;
3240 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3241 			wc->port_num = 1;
3242 			wc->vendor_err = cqe->status;
3243 
3244 			switch (cqe->opcode) {
3245 			case CQ_BASE_CQE_TYPE_REQ:
3246 				if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3247 				    qp->rdev->qp1_sqp->qplib_qp.id) {
3248 					/* Handle this completion with
3249 					 * the stored completion
3250 					 */
3251 					memset(wc, 0, sizeof(*wc));
3252 					continue;
3253 				}
3254 				bnxt_re_process_req_wc(wc, cqe);
3255 				break;
3256 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3257 				if (!cqe->status) {
3258 					int rc = 0;
3259 
3260 					rc = bnxt_re_process_raw_qp_pkt_rx
3261 								(qp, cqe);
3262 					if (!rc) {
3263 						memset(wc, 0, sizeof(*wc));
3264 						continue;
3265 					}
3266 					cqe->status = -1;
3267 				}
3268 				/* Errors need not be looped back.
3269 				 * But change the wr_id to the one
3270 				 * stored in the table
3271 				 */
3272 				tbl_idx = cqe->wr_id;
3273 				sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3274 				wc->wr_id = sqp_entry->wrid;
3275 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3276 				break;
3277 			case CQ_BASE_CQE_TYPE_RES_RC:
3278 				bnxt_re_process_res_rc_wc(wc, cqe);
3279 				break;
3280 			case CQ_BASE_CQE_TYPE_RES_UD:
3281 				if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3282 				    qp->rdev->qp1_sqp->qplib_qp.id) {
3283 					/* Handle this completion with
3284 					 * the stored completion
3285 					 */
3286 					if (cqe->status) {
3287 						continue;
3288 					} else {
3289 						bnxt_re_process_res_shadow_qp_wc
3290 								(qp, wc, cqe);
3291 						break;
3292 					}
3293 				}
3294 				bnxt_re_process_res_ud_wc(qp, wc, cqe);
3295 				break;
3296 			default:
3297 				dev_err(rdev_to_dev(cq->rdev),
3298 					"POLL CQ : type 0x%x not handled",
3299 					cqe->opcode);
3300 				continue;
3301 			}
3302 			wc++;
3303 			budget--;
3304 		}
3305 	}
3306 exit:
3307 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3308 	return num_entries - budget;
3309 }
3310 
3311 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3312 			  enum ib_cq_notify_flags ib_cqn_flags)
3313 {
3314 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3315 	int type = 0, rc = 0;
3316 	unsigned long flags;
3317 
3318 	spin_lock_irqsave(&cq->cq_lock, flags);
3319 	/* Trigger on the very next completion */
3320 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3321 		type = DBC_DBC_TYPE_CQ_ARMALL;
3322 	/* Trigger on the next solicited completion */
3323 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3324 		type = DBC_DBC_TYPE_CQ_ARMSE;
3325 
3326 	/* Poll to see if there are missed events */
3327 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3328 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3329 		rc = 1;
3330 		goto exit;
3331 	}
3332 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3333 
3334 exit:
3335 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3336 	return rc;
3337 }
3338 
3339 /* Memory Regions */
3340 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3341 {
3342 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3343 	struct bnxt_re_dev *rdev = pd->rdev;
3344 	struct bnxt_re_mr *mr;
3345 	u64 pbl = 0;
3346 	int rc;
3347 
3348 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3349 	if (!mr)
3350 		return ERR_PTR(-ENOMEM);
3351 
3352 	mr->rdev = rdev;
3353 	mr->qplib_mr.pd = &pd->qplib_pd;
3354 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3355 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3356 
3357 	/* Allocate and register 0 as the address */
3358 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3359 	if (rc)
3360 		goto fail;
3361 
3362 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3363 	mr->qplib_mr.total_size = -1; /* Infinte length */
3364 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3365 			       PAGE_SIZE);
3366 	if (rc)
3367 		goto fail_mr;
3368 
3369 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3370 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3371 			       IB_ACCESS_REMOTE_ATOMIC))
3372 		mr->ib_mr.rkey = mr->ib_mr.lkey;
3373 	atomic_inc(&rdev->mr_count);
3374 
3375 	return &mr->ib_mr;
3376 
3377 fail_mr:
3378 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3379 fail:
3380 	kfree(mr);
3381 	return ERR_PTR(rc);
3382 }
3383 
3384 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3385 {
3386 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3387 	struct bnxt_re_dev *rdev = mr->rdev;
3388 	int rc;
3389 
3390 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3391 	if (rc)
3392 		dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3393 
3394 	if (mr->pages) {
3395 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3396 							&mr->qplib_frpl);
3397 		kfree(mr->pages);
3398 		mr->npages = 0;
3399 		mr->pages = NULL;
3400 	}
3401 	if (!IS_ERR_OR_NULL(mr->ib_umem))
3402 		ib_umem_release(mr->ib_umem);
3403 
3404 	kfree(mr);
3405 	atomic_dec(&rdev->mr_count);
3406 	return rc;
3407 }
3408 
3409 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3410 {
3411 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3412 
3413 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3414 		return -ENOMEM;
3415 
3416 	mr->pages[mr->npages++] = addr;
3417 	return 0;
3418 }
3419 
3420 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3421 		      unsigned int *sg_offset)
3422 {
3423 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3424 
3425 	mr->npages = 0;
3426 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3427 }
3428 
3429 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3430 			       u32 max_num_sg)
3431 {
3432 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3433 	struct bnxt_re_dev *rdev = pd->rdev;
3434 	struct bnxt_re_mr *mr = NULL;
3435 	int rc;
3436 
3437 	if (type != IB_MR_TYPE_MEM_REG) {
3438 		dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3439 		return ERR_PTR(-EINVAL);
3440 	}
3441 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
3442 		return ERR_PTR(-EINVAL);
3443 
3444 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3445 	if (!mr)
3446 		return ERR_PTR(-ENOMEM);
3447 
3448 	mr->rdev = rdev;
3449 	mr->qplib_mr.pd = &pd->qplib_pd;
3450 	mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3451 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3452 
3453 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3454 	if (rc)
3455 		goto bail;
3456 
3457 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3458 	mr->ib_mr.rkey = mr->ib_mr.lkey;
3459 
3460 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3461 	if (!mr->pages) {
3462 		rc = -ENOMEM;
3463 		goto fail;
3464 	}
3465 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3466 						 &mr->qplib_frpl, max_num_sg);
3467 	if (rc) {
3468 		dev_err(rdev_to_dev(rdev),
3469 			"Failed to allocate HW FR page list");
3470 		goto fail_mr;
3471 	}
3472 
3473 	atomic_inc(&rdev->mr_count);
3474 	return &mr->ib_mr;
3475 
3476 fail_mr:
3477 	kfree(mr->pages);
3478 fail:
3479 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3480 bail:
3481 	kfree(mr);
3482 	return ERR_PTR(rc);
3483 }
3484 
3485 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3486 			       struct ib_udata *udata)
3487 {
3488 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3489 	struct bnxt_re_dev *rdev = pd->rdev;
3490 	struct bnxt_re_mw *mw;
3491 	int rc;
3492 
3493 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3494 	if (!mw)
3495 		return ERR_PTR(-ENOMEM);
3496 	mw->rdev = rdev;
3497 	mw->qplib_mw.pd = &pd->qplib_pd;
3498 
3499 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3500 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3501 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3502 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3503 	if (rc) {
3504 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3505 		goto fail;
3506 	}
3507 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
3508 
3509 	atomic_inc(&rdev->mw_count);
3510 	return &mw->ib_mw;
3511 
3512 fail:
3513 	kfree(mw);
3514 	return ERR_PTR(rc);
3515 }
3516 
3517 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3518 {
3519 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3520 	struct bnxt_re_dev *rdev = mw->rdev;
3521 	int rc;
3522 
3523 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3524 	if (rc) {
3525 		dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3526 		return rc;
3527 	}
3528 
3529 	kfree(mw);
3530 	atomic_dec(&rdev->mw_count);
3531 	return rc;
3532 }
3533 
3534 static int bnxt_re_page_size_ok(int page_shift)
3535 {
3536 	switch (page_shift) {
3537 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3538 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3539 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3540 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3541 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3542 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3543 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3544 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3545 		return 1;
3546 	default:
3547 		return 0;
3548 	}
3549 }
3550 
3551 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3552 			     int page_shift)
3553 {
3554 	u64 *pbl_tbl = pbl_tbl_orig;
3555 	u64 paddr;
3556 	u64 page_mask = (1ULL << page_shift) - 1;
3557 	struct sg_dma_page_iter sg_iter;
3558 
3559 	for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
3560 		paddr = sg_page_iter_dma_address(&sg_iter);
3561 		if (pbl_tbl == pbl_tbl_orig)
3562 			*pbl_tbl++ = paddr & ~page_mask;
3563 		else if ((paddr & page_mask) == 0)
3564 			*pbl_tbl++ = paddr;
3565 	}
3566 	return pbl_tbl - pbl_tbl_orig;
3567 }
3568 
3569 /* uverbs */
3570 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3571 				  u64 virt_addr, int mr_access_flags,
3572 				  struct ib_udata *udata)
3573 {
3574 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3575 	struct bnxt_re_dev *rdev = pd->rdev;
3576 	struct bnxt_re_mr *mr;
3577 	struct ib_umem *umem;
3578 	u64 *pbl_tbl = NULL;
3579 	int umem_pgs, page_shift, rc;
3580 
3581 	if (length > BNXT_RE_MAX_MR_SIZE) {
3582 		dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3583 			length, BNXT_RE_MAX_MR_SIZE);
3584 		return ERR_PTR(-ENOMEM);
3585 	}
3586 
3587 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3588 	if (!mr)
3589 		return ERR_PTR(-ENOMEM);
3590 
3591 	mr->rdev = rdev;
3592 	mr->qplib_mr.pd = &pd->qplib_pd;
3593 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3594 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3595 
3596 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3597 	if (rc) {
3598 		dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3599 		goto free_mr;
3600 	}
3601 	/* The fixed portion of the rkey is the same as the lkey */
3602 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
3603 
3604 	umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
3605 	if (IS_ERR(umem)) {
3606 		dev_err(rdev_to_dev(rdev), "Failed to get umem");
3607 		rc = -EFAULT;
3608 		goto free_mrw;
3609 	}
3610 	mr->ib_umem = umem;
3611 
3612 	mr->qplib_mr.va = virt_addr;
3613 	umem_pgs = ib_umem_page_count(umem);
3614 	if (!umem_pgs) {
3615 		dev_err(rdev_to_dev(rdev), "umem is invalid!");
3616 		rc = -EINVAL;
3617 		goto free_umem;
3618 	}
3619 	mr->qplib_mr.total_size = length;
3620 
3621 	pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3622 	if (!pbl_tbl) {
3623 		rc = -ENOMEM;
3624 		goto free_umem;
3625 	}
3626 
3627 	page_shift = PAGE_SHIFT;
3628 
3629 	if (!bnxt_re_page_size_ok(page_shift)) {
3630 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3631 		rc = -EFAULT;
3632 		goto fail;
3633 	}
3634 
3635 	if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3636 		dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3637 			length,	(u64)BNXT_RE_MAX_MR_SIZE_LOW);
3638 		rc = -EINVAL;
3639 		goto fail;
3640 	}
3641 	if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3642 		page_shift = BNXT_RE_PAGE_SHIFT_2M;
3643 		dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3644 			 1 << page_shift);
3645 	}
3646 
3647 	/* Map umem buf ptrs to the PBL */
3648 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3649 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3650 			       umem_pgs, false, 1 << page_shift);
3651 	if (rc) {
3652 		dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3653 		goto fail;
3654 	}
3655 
3656 	kfree(pbl_tbl);
3657 
3658 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3659 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
3660 	atomic_inc(&rdev->mr_count);
3661 
3662 	return &mr->ib_mr;
3663 fail:
3664 	kfree(pbl_tbl);
3665 free_umem:
3666 	ib_umem_release(umem);
3667 free_mrw:
3668 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3669 free_mr:
3670 	kfree(mr);
3671 	return ERR_PTR(rc);
3672 }
3673 
3674 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3675 {
3676 	struct ib_device *ibdev = ctx->device;
3677 	struct bnxt_re_ucontext *uctx =
3678 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3679 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3680 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3681 	struct bnxt_re_uctx_resp resp;
3682 	u32 chip_met_rev_num = 0;
3683 	int rc;
3684 
3685 	dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3686 		ibdev->uverbs_abi_ver);
3687 
3688 	if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3689 		dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3690 			BNXT_RE_ABI_VERSION);
3691 		return -EPERM;
3692 	}
3693 
3694 	uctx->rdev = rdev;
3695 
3696 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3697 	if (!uctx->shpg) {
3698 		rc = -ENOMEM;
3699 		goto fail;
3700 	}
3701 	spin_lock_init(&uctx->sh_lock);
3702 
3703 	resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3704 	chip_met_rev_num = rdev->chip_ctx.chip_num;
3705 	chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
3706 			     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3707 	chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
3708 			     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3709 	resp.chip_id0 = chip_met_rev_num;
3710 	/* Future extension of chip info */
3711 	resp.chip_id1 = 0;
3712 	/*Temp, Use idr_alloc instead */
3713 	resp.dev_id = rdev->en_dev->pdev->devfn;
3714 	resp.max_qp = rdev->qplib_ctx.qpc_count;
3715 	resp.pg_size = PAGE_SIZE;
3716 	resp.cqe_sz = sizeof(struct cq_base);
3717 	resp.max_cqd = dev_attr->max_cq_wqes;
3718 	resp.rsvd    = 0;
3719 
3720 	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3721 	if (rc) {
3722 		dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3723 		rc = -EFAULT;
3724 		goto cfail;
3725 	}
3726 
3727 	return 0;
3728 cfail:
3729 	free_page((unsigned long)uctx->shpg);
3730 	uctx->shpg = NULL;
3731 fail:
3732 	return rc;
3733 }
3734 
3735 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3736 {
3737 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3738 						   struct bnxt_re_ucontext,
3739 						   ib_uctx);
3740 
3741 	struct bnxt_re_dev *rdev = uctx->rdev;
3742 
3743 	if (uctx->shpg)
3744 		free_page((unsigned long)uctx->shpg);
3745 
3746 	if (uctx->dpi.dbr) {
3747 		/* Free DPI only if this is the first PD allocated by the
3748 		 * application and mark the context dpi as NULL
3749 		 */
3750 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3751 				       &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3752 		uctx->dpi.dbr = NULL;
3753 	}
3754 }
3755 
3756 /* Helper function to mmap the virtual memory from user app */
3757 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3758 {
3759 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3760 						   struct bnxt_re_ucontext,
3761 						   ib_uctx);
3762 	struct bnxt_re_dev *rdev = uctx->rdev;
3763 	u64 pfn;
3764 
3765 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3766 		return -EINVAL;
3767 
3768 	if (vma->vm_pgoff) {
3769 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3770 		if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3771 				       PAGE_SIZE, vma->vm_page_prot)) {
3772 			dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3773 			return -EAGAIN;
3774 		}
3775 	} else {
3776 		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3777 		if (remap_pfn_range(vma, vma->vm_start,
3778 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
3779 			dev_err(rdev_to_dev(rdev),
3780 				"Failed to map shared page");
3781 			return -EAGAIN;
3782 		}
3783 	}
3784 
3785 	return 0;
3786 }
3787