xref: /openbmc/linux/drivers/infiniband/hw/qedr/verbs.c (revision 0b26ca68)
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38 
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
46 
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
50 #include "qedr.h"
51 #include "verbs.h"
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
55 
56 #define QEDR_SRQ_WQE_ELEM_SIZE	sizeof(union rdma_srq_elm)
57 #define	RDMA_MAX_SGE_PER_SRQ	(4)
58 #define RDMA_MAX_SRQ_WQE_SIZE	(RDMA_MAX_SGE_PER_SRQ + 1)
59 
60 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61 
62 enum {
63 	QEDR_USER_MMAP_IO_WC = 0,
64 	QEDR_USER_MMAP_PHYS_PAGE,
65 };
66 
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68 					size_t len)
69 {
70 	size_t min_len = min_t(size_t, len, udata->outlen);
71 
72 	return ib_copy_to_udata(udata, src, min_len);
73 }
74 
75 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
76 {
77 	if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78 		return -EINVAL;
79 
80 	*pkey = QEDR_ROCE_PKEY_DEFAULT;
81 	return 0;
82 }
83 
84 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
85 		      int index, union ib_gid *sgid)
86 {
87 	struct qedr_dev *dev = get_qedr_dev(ibdev);
88 
89 	memset(sgid->raw, 0, sizeof(sgid->raw));
90 	ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91 
92 	DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 		 sgid->global.interface_id, sgid->global.subnet_prefix);
94 
95 	return 0;
96 }
97 
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99 {
100 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 	struct qedr_device_attr *qattr = &dev->attr;
102 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
103 
104 	srq_attr->srq_limit = srq->srq_limit;
105 	srq_attr->max_wr = qattr->max_srq_wr;
106 	srq_attr->max_sge = qattr->max_sge;
107 
108 	return 0;
109 }
110 
111 int qedr_query_device(struct ib_device *ibdev,
112 		      struct ib_device_attr *attr, struct ib_udata *udata)
113 {
114 	struct qedr_dev *dev = get_qedr_dev(ibdev);
115 	struct qedr_device_attr *qattr = &dev->attr;
116 
117 	if (!dev->rdma_ctx) {
118 		DP_ERR(dev,
119 		       "qedr_query_device called with invalid params rdma_ctx=%p\n",
120 		       dev->rdma_ctx);
121 		return -EINVAL;
122 	}
123 
124 	memset(attr, 0, sizeof(*attr));
125 
126 	attr->fw_ver = qattr->fw_ver;
127 	attr->sys_image_guid = qattr->sys_image_guid;
128 	attr->max_mr_size = qattr->max_mr_size;
129 	attr->page_size_cap = qattr->page_size_caps;
130 	attr->vendor_id = qattr->vendor_id;
131 	attr->vendor_part_id = qattr->vendor_part_id;
132 	attr->hw_ver = qattr->hw_ver;
133 	attr->max_qp = qattr->max_qp;
134 	attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 	    IB_DEVICE_RC_RNR_NAK_GEN |
137 	    IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
138 
139 	if (!rdma_protocol_iwarp(&dev->ibdev, 1))
140 		attr->device_cap_flags |= IB_DEVICE_XRC;
141 	attr->max_send_sge = qattr->max_sge;
142 	attr->max_recv_sge = qattr->max_sge;
143 	attr->max_sge_rd = qattr->max_sge;
144 	attr->max_cq = qattr->max_cq;
145 	attr->max_cqe = qattr->max_cqe;
146 	attr->max_mr = qattr->max_mr;
147 	attr->max_mw = qattr->max_mw;
148 	attr->max_pd = qattr->max_pd;
149 	attr->atomic_cap = dev->atomic_cap;
150 	attr->max_qp_init_rd_atom =
151 	    1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
152 	attr->max_qp_rd_atom =
153 	    min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
154 		attr->max_qp_init_rd_atom);
155 
156 	attr->max_srq = qattr->max_srq;
157 	attr->max_srq_sge = qattr->max_srq_sge;
158 	attr->max_srq_wr = qattr->max_srq_wr;
159 
160 	attr->local_ca_ack_delay = qattr->dev_ack_delay;
161 	attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
162 	attr->max_pkeys = qattr->max_pkey;
163 	attr->max_ah = qattr->max_ah;
164 
165 	return 0;
166 }
167 
168 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
169 					    u8 *ib_width)
170 {
171 	switch (speed) {
172 	case 1000:
173 		*ib_speed = IB_SPEED_SDR;
174 		*ib_width = IB_WIDTH_1X;
175 		break;
176 	case 10000:
177 		*ib_speed = IB_SPEED_QDR;
178 		*ib_width = IB_WIDTH_1X;
179 		break;
180 
181 	case 20000:
182 		*ib_speed = IB_SPEED_DDR;
183 		*ib_width = IB_WIDTH_4X;
184 		break;
185 
186 	case 25000:
187 		*ib_speed = IB_SPEED_EDR;
188 		*ib_width = IB_WIDTH_1X;
189 		break;
190 
191 	case 40000:
192 		*ib_speed = IB_SPEED_QDR;
193 		*ib_width = IB_WIDTH_4X;
194 		break;
195 
196 	case 50000:
197 		*ib_speed = IB_SPEED_HDR;
198 		*ib_width = IB_WIDTH_1X;
199 		break;
200 
201 	case 100000:
202 		*ib_speed = IB_SPEED_EDR;
203 		*ib_width = IB_WIDTH_4X;
204 		break;
205 
206 	default:
207 		/* Unsupported */
208 		*ib_speed = IB_SPEED_SDR;
209 		*ib_width = IB_WIDTH_1X;
210 	}
211 }
212 
213 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
214 {
215 	struct qedr_dev *dev;
216 	struct qed_rdma_port *rdma_port;
217 
218 	dev = get_qedr_dev(ibdev);
219 
220 	if (!dev->rdma_ctx) {
221 		DP_ERR(dev, "rdma_ctx is NULL\n");
222 		return -EINVAL;
223 	}
224 
225 	rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
226 
227 	/* *attr being zeroed by the caller, avoid zeroing it here */
228 	if (rdma_port->port_state == QED_RDMA_PORT_UP) {
229 		attr->state = IB_PORT_ACTIVE;
230 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
231 	} else {
232 		attr->state = IB_PORT_DOWN;
233 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
234 	}
235 	attr->max_mtu = IB_MTU_4096;
236 	attr->lid = 0;
237 	attr->lmc = 0;
238 	attr->sm_lid = 0;
239 	attr->sm_sl = 0;
240 	attr->ip_gids = true;
241 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
242 		attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
243 		attr->gid_tbl_len = 1;
244 	} else {
245 		attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
246 		attr->gid_tbl_len = QEDR_MAX_SGID;
247 		attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
248 	}
249 	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
250 	attr->qkey_viol_cntr = 0;
251 	get_link_speed_and_width(rdma_port->link_speed,
252 				 &attr->active_speed, &attr->active_width);
253 	attr->max_msg_sz = rdma_port->max_msg_size;
254 	attr->max_vl_num = 4;
255 
256 	return 0;
257 }
258 
259 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
260 {
261 	struct ib_device *ibdev = uctx->device;
262 	int rc;
263 	struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
264 	struct qedr_alloc_ucontext_resp uresp = {};
265 	struct qedr_alloc_ucontext_req ureq = {};
266 	struct qedr_dev *dev = get_qedr_dev(ibdev);
267 	struct qed_rdma_add_user_out_params oparams;
268 	struct qedr_user_mmap_entry *entry;
269 
270 	if (!udata)
271 		return -EFAULT;
272 
273 	if (udata->inlen) {
274 		rc = ib_copy_from_udata(&ureq, udata,
275 					min(sizeof(ureq), udata->inlen));
276 		if (rc) {
277 			DP_ERR(dev, "Problem copying data from user space\n");
278 			return -EFAULT;
279 		}
280 		ctx->edpm_mode = !!(ureq.context_flags &
281 				    QEDR_ALLOC_UCTX_EDPM_MODE);
282 		ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
283 	}
284 
285 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
286 	if (rc) {
287 		DP_ERR(dev,
288 		       "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
289 		       rc);
290 		return rc;
291 	}
292 
293 	ctx->dpi = oparams.dpi;
294 	ctx->dpi_addr = oparams.dpi_addr;
295 	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
296 	ctx->dpi_size = oparams.dpi_size;
297 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
298 	if (!entry) {
299 		rc = -ENOMEM;
300 		goto err;
301 	}
302 
303 	entry->io_address = ctx->dpi_phys_addr;
304 	entry->length = ctx->dpi_size;
305 	entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
306 	entry->dpi = ctx->dpi;
307 	entry->dev = dev;
308 	rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
309 					 ctx->dpi_size);
310 	if (rc) {
311 		kfree(entry);
312 		goto err;
313 	}
314 	ctx->db_mmap_entry = &entry->rdma_entry;
315 
316 	if (!dev->user_dpm_enabled)
317 		uresp.dpm_flags = 0;
318 	else if (rdma_protocol_iwarp(&dev->ibdev, 1))
319 		uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
320 	else
321 		uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
322 				  QEDR_DPM_TYPE_ROCE_LEGACY |
323 				  QEDR_DPM_TYPE_ROCE_EDPM_MODE;
324 
325 	if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
326 		uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
327 		uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
328 		uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
329 		uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
330 	}
331 
332 	uresp.wids_enabled = 1;
333 	uresp.wid_count = oparams.wid_count;
334 	uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
335 	uresp.db_size = ctx->dpi_size;
336 	uresp.max_send_wr = dev->attr.max_sqe;
337 	uresp.max_recv_wr = dev->attr.max_rqe;
338 	uresp.max_srq_wr = dev->attr.max_srq_wr;
339 	uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
340 	uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
341 	uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
342 	uresp.max_cqes = QEDR_MAX_CQES;
343 
344 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
345 	if (rc)
346 		goto err;
347 
348 	ctx->dev = dev;
349 
350 	DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
351 		 &ctx->ibucontext);
352 	return 0;
353 
354 err:
355 	if (!ctx->db_mmap_entry)
356 		dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
357 	else
358 		rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
359 
360 	return rc;
361 }
362 
363 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
364 {
365 	struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
366 
367 	DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
368 		 uctx);
369 
370 	rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
371 }
372 
373 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
374 {
375 	struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
376 	struct qedr_dev *dev = entry->dev;
377 
378 	if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
379 		free_page((unsigned long)entry->address);
380 	else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
381 		dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
382 
383 	kfree(entry);
384 }
385 
386 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
387 {
388 	struct ib_device *dev = ucontext->device;
389 	size_t length = vma->vm_end - vma->vm_start;
390 	struct rdma_user_mmap_entry *rdma_entry;
391 	struct qedr_user_mmap_entry *entry;
392 	int rc = 0;
393 	u64 pfn;
394 
395 	ibdev_dbg(dev,
396 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
397 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
398 
399 	rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
400 	if (!rdma_entry) {
401 		ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
402 			  vma->vm_pgoff);
403 		return -EINVAL;
404 	}
405 	entry = get_qedr_mmap_entry(rdma_entry);
406 	ibdev_dbg(dev,
407 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
408 		  entry->io_address, length, entry->mmap_flag);
409 
410 	switch (entry->mmap_flag) {
411 	case QEDR_USER_MMAP_IO_WC:
412 		pfn = entry->io_address >> PAGE_SHIFT;
413 		rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
414 				       pgprot_writecombine(vma->vm_page_prot),
415 				       rdma_entry);
416 		break;
417 	case QEDR_USER_MMAP_PHYS_PAGE:
418 		rc = vm_insert_page(vma, vma->vm_start,
419 				    virt_to_page(entry->address));
420 		break;
421 	default:
422 		rc = -EINVAL;
423 	}
424 
425 	if (rc)
426 		ibdev_dbg(dev,
427 			  "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
428 			  entry->io_address, length, entry->mmap_flag, rc);
429 
430 	rdma_user_mmap_entry_put(rdma_entry);
431 	return rc;
432 }
433 
434 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
435 {
436 	struct ib_device *ibdev = ibpd->device;
437 	struct qedr_dev *dev = get_qedr_dev(ibdev);
438 	struct qedr_pd *pd = get_qedr_pd(ibpd);
439 	u16 pd_id;
440 	int rc;
441 
442 	DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
443 		 udata ? "User Lib" : "Kernel");
444 
445 	if (!dev->rdma_ctx) {
446 		DP_ERR(dev, "invalid RDMA context\n");
447 		return -EINVAL;
448 	}
449 
450 	rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
451 	if (rc)
452 		return rc;
453 
454 	pd->pd_id = pd_id;
455 
456 	if (udata) {
457 		struct qedr_alloc_pd_uresp uresp = {
458 			.pd_id = pd_id,
459 		};
460 		struct qedr_ucontext *context = rdma_udata_to_drv_context(
461 			udata, struct qedr_ucontext, ibucontext);
462 
463 		rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
464 		if (rc) {
465 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
466 			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
467 			return rc;
468 		}
469 
470 		pd->uctx = context;
471 		pd->uctx->pd = pd;
472 	}
473 
474 	return 0;
475 }
476 
477 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
478 {
479 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
480 	struct qedr_pd *pd = get_qedr_pd(ibpd);
481 
482 	DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
483 	dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
484 	return 0;
485 }
486 
487 
488 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
489 {
490 	struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
491 	struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
492 
493 	return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
494 }
495 
496 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
497 {
498 	struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
499 	u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
500 
501 	dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
502 	return 0;
503 }
504 static void qedr_free_pbl(struct qedr_dev *dev,
505 			  struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
506 {
507 	struct pci_dev *pdev = dev->pdev;
508 	int i;
509 
510 	for (i = 0; i < pbl_info->num_pbls; i++) {
511 		if (!pbl[i].va)
512 			continue;
513 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
514 				  pbl[i].va, pbl[i].pa);
515 	}
516 
517 	kfree(pbl);
518 }
519 
520 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
521 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
522 
523 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
524 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
525 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
526 
527 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
528 					   struct qedr_pbl_info *pbl_info,
529 					   gfp_t flags)
530 {
531 	struct pci_dev *pdev = dev->pdev;
532 	struct qedr_pbl *pbl_table;
533 	dma_addr_t *pbl_main_tbl;
534 	dma_addr_t pa;
535 	void *va;
536 	int i;
537 
538 	pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
539 	if (!pbl_table)
540 		return ERR_PTR(-ENOMEM);
541 
542 	for (i = 0; i < pbl_info->num_pbls; i++) {
543 		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
544 					flags);
545 		if (!va)
546 			goto err;
547 
548 		pbl_table[i].va = va;
549 		pbl_table[i].pa = pa;
550 	}
551 
552 	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
553 	 * the first one with physical pointers to all of the rest
554 	 */
555 	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
556 	for (i = 0; i < pbl_info->num_pbls - 1; i++)
557 		pbl_main_tbl[i] = pbl_table[i + 1].pa;
558 
559 	return pbl_table;
560 
561 err:
562 	for (i--; i >= 0; i--)
563 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
564 				  pbl_table[i].va, pbl_table[i].pa);
565 
566 	qedr_free_pbl(dev, pbl_info, pbl_table);
567 
568 	return ERR_PTR(-ENOMEM);
569 }
570 
571 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
572 				struct qedr_pbl_info *pbl_info,
573 				u32 num_pbes, int two_layer_capable)
574 {
575 	u32 pbl_capacity;
576 	u32 pbl_size;
577 	u32 num_pbls;
578 
579 	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
580 		if (num_pbes > MAX_PBES_TWO_LAYER) {
581 			DP_ERR(dev, "prepare pbl table: too many pages %d\n",
582 			       num_pbes);
583 			return -EINVAL;
584 		}
585 
586 		/* calculate required pbl page size */
587 		pbl_size = MIN_FW_PBL_PAGE_SIZE;
588 		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
589 			       NUM_PBES_ON_PAGE(pbl_size);
590 
591 		while (pbl_capacity < num_pbes) {
592 			pbl_size *= 2;
593 			pbl_capacity = pbl_size / sizeof(u64);
594 			pbl_capacity = pbl_capacity * pbl_capacity;
595 		}
596 
597 		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
598 		num_pbls++;	/* One for the layer0 ( points to the pbls) */
599 		pbl_info->two_layered = true;
600 	} else {
601 		/* One layered PBL */
602 		num_pbls = 1;
603 		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
604 				 roundup_pow_of_two((num_pbes * sizeof(u64))));
605 		pbl_info->two_layered = false;
606 	}
607 
608 	pbl_info->num_pbls = num_pbls;
609 	pbl_info->pbl_size = pbl_size;
610 	pbl_info->num_pbes = num_pbes;
611 
612 	DP_DEBUG(dev, QEDR_MSG_MR,
613 		 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
614 		 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
615 
616 	return 0;
617 }
618 
619 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
620 			       struct qedr_pbl *pbl,
621 			       struct qedr_pbl_info *pbl_info, u32 pg_shift)
622 {
623 	int pbe_cnt, total_num_pbes = 0;
624 	struct qedr_pbl *pbl_tbl;
625 	struct ib_block_iter biter;
626 	struct regpair *pbe;
627 
628 	if (!pbl_info->num_pbes)
629 		return;
630 
631 	/* If we have a two layered pbl, the first pbl points to the rest
632 	 * of the pbls and the first entry lays on the second pbl in the table
633 	 */
634 	if (pbl_info->two_layered)
635 		pbl_tbl = &pbl[1];
636 	else
637 		pbl_tbl = pbl;
638 
639 	pbe = (struct regpair *)pbl_tbl->va;
640 	if (!pbe) {
641 		DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
642 		return;
643 	}
644 
645 	pbe_cnt = 0;
646 
647 	rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
648 		u64 pg_addr = rdma_block_iter_dma_address(&biter);
649 
650 		pbe->lo = cpu_to_le32(pg_addr);
651 		pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
652 
653 		pbe_cnt++;
654 		total_num_pbes++;
655 		pbe++;
656 
657 		if (total_num_pbes == pbl_info->num_pbes)
658 			return;
659 
660 		/* If the given pbl is full storing the pbes, move to next pbl.
661 		 */
662 		if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
663 			pbl_tbl++;
664 			pbe = (struct regpair *)pbl_tbl->va;
665 			pbe_cnt = 0;
666 		}
667 	}
668 }
669 
670 static int qedr_db_recovery_add(struct qedr_dev *dev,
671 				void __iomem *db_addr,
672 				void *db_data,
673 				enum qed_db_rec_width db_width,
674 				enum qed_db_rec_space db_space)
675 {
676 	if (!db_data) {
677 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
678 		return 0;
679 	}
680 
681 	return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
682 						 db_width, db_space);
683 }
684 
685 static void qedr_db_recovery_del(struct qedr_dev *dev,
686 				 void __iomem *db_addr,
687 				 void *db_data)
688 {
689 	if (!db_data) {
690 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
691 		return;
692 	}
693 
694 	/* Ignore return code as there is not much we can do about it. Error
695 	 * log will be printed inside.
696 	 */
697 	dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
698 }
699 
700 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
701 			      struct qedr_cq *cq, struct ib_udata *udata,
702 			      u32 db_offset)
703 {
704 	struct qedr_create_cq_uresp uresp;
705 	int rc;
706 
707 	memset(&uresp, 0, sizeof(uresp));
708 
709 	uresp.db_offset = db_offset;
710 	uresp.icid = cq->icid;
711 	if (cq->q.db_mmap_entry)
712 		uresp.db_rec_addr =
713 			rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
714 
715 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
716 	if (rc)
717 		DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
718 
719 	return rc;
720 }
721 
722 static void consume_cqe(struct qedr_cq *cq)
723 {
724 	if (cq->latest_cqe == cq->toggle_cqe)
725 		cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
726 
727 	cq->latest_cqe = qed_chain_consume(&cq->pbl);
728 }
729 
730 static inline int qedr_align_cq_entries(int entries)
731 {
732 	u64 size, aligned_size;
733 
734 	/* We allocate an extra entry that we don't report to the FW. */
735 	size = (entries + 1) * QEDR_CQE_SIZE;
736 	aligned_size = ALIGN(size, PAGE_SIZE);
737 
738 	return aligned_size / QEDR_CQE_SIZE;
739 }
740 
741 static int qedr_init_user_db_rec(struct ib_udata *udata,
742 				 struct qedr_dev *dev, struct qedr_userq *q,
743 				 bool requires_db_rec)
744 {
745 	struct qedr_ucontext *uctx =
746 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
747 					  ibucontext);
748 	struct qedr_user_mmap_entry *entry;
749 	int rc;
750 
751 	/* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
752 	if (requires_db_rec == 0 || !uctx->db_rec)
753 		return 0;
754 
755 	/* Allocate a page for doorbell recovery, add to mmap */
756 	q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
757 	if (!q->db_rec_data) {
758 		DP_ERR(dev, "get_zeroed_page failed\n");
759 		return -ENOMEM;
760 	}
761 
762 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
763 	if (!entry)
764 		goto err_free_db_data;
765 
766 	entry->address = q->db_rec_data;
767 	entry->length = PAGE_SIZE;
768 	entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
769 	rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
770 					 &entry->rdma_entry,
771 					 PAGE_SIZE);
772 	if (rc)
773 		goto err_free_entry;
774 
775 	q->db_mmap_entry = &entry->rdma_entry;
776 
777 	return 0;
778 
779 err_free_entry:
780 	kfree(entry);
781 
782 err_free_db_data:
783 	free_page((unsigned long)q->db_rec_data);
784 	q->db_rec_data = NULL;
785 	return -ENOMEM;
786 }
787 
788 static inline int qedr_init_user_queue(struct ib_udata *udata,
789 				       struct qedr_dev *dev,
790 				       struct qedr_userq *q, u64 buf_addr,
791 				       size_t buf_len, bool requires_db_rec,
792 				       int access,
793 				       int alloc_and_init)
794 {
795 	u32 fw_pages;
796 	int rc;
797 
798 	q->buf_addr = buf_addr;
799 	q->buf_len = buf_len;
800 	q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
801 	if (IS_ERR(q->umem)) {
802 		DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
803 		       PTR_ERR(q->umem));
804 		return PTR_ERR(q->umem);
805 	}
806 
807 	fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
808 	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
809 	if (rc)
810 		goto err0;
811 
812 	if (alloc_and_init) {
813 		q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
814 		if (IS_ERR(q->pbl_tbl)) {
815 			rc = PTR_ERR(q->pbl_tbl);
816 			goto err0;
817 		}
818 		qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
819 				   FW_PAGE_SHIFT);
820 	} else {
821 		q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
822 		if (!q->pbl_tbl) {
823 			rc = -ENOMEM;
824 			goto err0;
825 		}
826 	}
827 
828 	/* mmap the user address used to store doorbell data for recovery */
829 	return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
830 
831 err0:
832 	ib_umem_release(q->umem);
833 	q->umem = NULL;
834 
835 	return rc;
836 }
837 
838 static inline void qedr_init_cq_params(struct qedr_cq *cq,
839 				       struct qedr_ucontext *ctx,
840 				       struct qedr_dev *dev, int vector,
841 				       int chain_entries, int page_cnt,
842 				       u64 pbl_ptr,
843 				       struct qed_rdma_create_cq_in_params
844 				       *params)
845 {
846 	memset(params, 0, sizeof(*params));
847 	params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
848 	params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
849 	params->cnq_id = vector;
850 	params->cq_size = chain_entries - 1;
851 	params->dpi = (ctx) ? ctx->dpi : dev->dpi;
852 	params->pbl_num_pages = page_cnt;
853 	params->pbl_ptr = pbl_ptr;
854 	params->pbl_two_level = 0;
855 }
856 
857 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
858 {
859 	cq->db.data.agg_flags = flags;
860 	cq->db.data.value = cpu_to_le32(cons);
861 	writeq(cq->db.raw, cq->db_addr);
862 }
863 
864 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
865 {
866 	struct qedr_cq *cq = get_qedr_cq(ibcq);
867 	unsigned long sflags;
868 	struct qedr_dev *dev;
869 
870 	dev = get_qedr_dev(ibcq->device);
871 
872 	if (cq->destroyed) {
873 		DP_ERR(dev,
874 		       "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
875 		       cq, cq->icid);
876 		return -EINVAL;
877 	}
878 
879 
880 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
881 		return 0;
882 
883 	spin_lock_irqsave(&cq->cq_lock, sflags);
884 
885 	cq->arm_flags = 0;
886 
887 	if (flags & IB_CQ_SOLICITED)
888 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
889 
890 	if (flags & IB_CQ_NEXT_COMP)
891 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
892 
893 	doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
894 
895 	spin_unlock_irqrestore(&cq->cq_lock, sflags);
896 
897 	return 0;
898 }
899 
900 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
901 		   struct ib_udata *udata)
902 {
903 	struct ib_device *ibdev = ibcq->device;
904 	struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
905 		udata, struct qedr_ucontext, ibucontext);
906 	struct qed_rdma_destroy_cq_out_params destroy_oparams;
907 	struct qed_rdma_destroy_cq_in_params destroy_iparams;
908 	struct qed_chain_init_params chain_params = {
909 		.mode		= QED_CHAIN_MODE_PBL,
910 		.intended_use	= QED_CHAIN_USE_TO_CONSUME,
911 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
912 		.elem_size	= sizeof(union rdma_cqe),
913 	};
914 	struct qedr_dev *dev = get_qedr_dev(ibdev);
915 	struct qed_rdma_create_cq_in_params params;
916 	struct qedr_create_cq_ureq ureq = {};
917 	int vector = attr->comp_vector;
918 	int entries = attr->cqe;
919 	struct qedr_cq *cq = get_qedr_cq(ibcq);
920 	int chain_entries;
921 	u32 db_offset;
922 	int page_cnt;
923 	u64 pbl_ptr;
924 	u16 icid;
925 	int rc;
926 
927 	DP_DEBUG(dev, QEDR_MSG_INIT,
928 		 "create_cq: called from %s. entries=%d, vector=%d\n",
929 		 udata ? "User Lib" : "Kernel", entries, vector);
930 
931 	if (attr->flags)
932 		return -EOPNOTSUPP;
933 
934 	if (entries > QEDR_MAX_CQES) {
935 		DP_ERR(dev,
936 		       "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
937 		       entries, QEDR_MAX_CQES);
938 		return -EINVAL;
939 	}
940 
941 	chain_entries = qedr_align_cq_entries(entries);
942 	chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
943 	chain_params.num_elems = chain_entries;
944 
945 	/* calc db offset. user will add DPI base, kernel will add db addr */
946 	db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
947 
948 	if (udata) {
949 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
950 							 udata->inlen))) {
951 			DP_ERR(dev,
952 			       "create cq: problem copying data from user space\n");
953 			goto err0;
954 		}
955 
956 		if (!ureq.len) {
957 			DP_ERR(dev,
958 			       "create cq: cannot create a cq with 0 entries\n");
959 			goto err0;
960 		}
961 
962 		cq->cq_type = QEDR_CQ_TYPE_USER;
963 
964 		rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
965 					  ureq.len, true, IB_ACCESS_LOCAL_WRITE,
966 					  1);
967 		if (rc)
968 			goto err0;
969 
970 		pbl_ptr = cq->q.pbl_tbl->pa;
971 		page_cnt = cq->q.pbl_info.num_pbes;
972 
973 		cq->ibcq.cqe = chain_entries;
974 		cq->q.db_addr = ctx->dpi_addr + db_offset;
975 	} else {
976 		cq->cq_type = QEDR_CQ_TYPE_KERNEL;
977 
978 		rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
979 						   &chain_params);
980 		if (rc)
981 			goto err0;
982 
983 		page_cnt = qed_chain_get_page_cnt(&cq->pbl);
984 		pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
985 		cq->ibcq.cqe = cq->pbl.capacity;
986 	}
987 
988 	qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
989 			    pbl_ptr, &params);
990 
991 	rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
992 	if (rc)
993 		goto err1;
994 
995 	cq->icid = icid;
996 	cq->sig = QEDR_CQ_MAGIC_NUMBER;
997 	spin_lock_init(&cq->cq_lock);
998 
999 	if (udata) {
1000 		rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1001 		if (rc)
1002 			goto err2;
1003 
1004 		rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1005 					  &cq->q.db_rec_data->db_data,
1006 					  DB_REC_WIDTH_64B,
1007 					  DB_REC_USER);
1008 		if (rc)
1009 			goto err2;
1010 
1011 	} else {
1012 		/* Generate doorbell address. */
1013 		cq->db.data.icid = cq->icid;
1014 		cq->db_addr = dev->db_addr + db_offset;
1015 		cq->db.data.params = DB_AGG_CMD_MAX <<
1016 		    RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1017 
1018 		/* point to the very last element, passing it we will toggle */
1019 		cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1020 		cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1021 		cq->latest_cqe = NULL;
1022 		consume_cqe(cq);
1023 		cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1024 
1025 		rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1026 					  DB_REC_WIDTH_64B, DB_REC_KERNEL);
1027 		if (rc)
1028 			goto err2;
1029 	}
1030 
1031 	DP_DEBUG(dev, QEDR_MSG_CQ,
1032 		 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1033 		 cq->icid, cq, params.cq_size);
1034 
1035 	return 0;
1036 
1037 err2:
1038 	destroy_iparams.icid = cq->icid;
1039 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1040 				  &destroy_oparams);
1041 err1:
1042 	if (udata) {
1043 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1044 		ib_umem_release(cq->q.umem);
1045 		if (cq->q.db_mmap_entry)
1046 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1047 	} else {
1048 		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1049 	}
1050 err0:
1051 	return -EINVAL;
1052 }
1053 
1054 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1055 {
1056 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1057 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1058 
1059 	DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1060 
1061 	return 0;
1062 }
1063 
1064 #define QEDR_DESTROY_CQ_MAX_ITERATIONS		(10)
1065 #define QEDR_DESTROY_CQ_ITER_DURATION		(10)
1066 
1067 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1068 {
1069 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1070 	struct qed_rdma_destroy_cq_out_params oparams;
1071 	struct qed_rdma_destroy_cq_in_params iparams;
1072 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1073 	int iter;
1074 
1075 	DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1076 
1077 	cq->destroyed = 1;
1078 
1079 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
1080 	if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1081 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1082 		return 0;
1083 	}
1084 
1085 	iparams.icid = cq->icid;
1086 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1087 	dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1088 
1089 	if (udata) {
1090 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1091 		ib_umem_release(cq->q.umem);
1092 
1093 		if (cq->q.db_rec_data) {
1094 			qedr_db_recovery_del(dev, cq->q.db_addr,
1095 					     &cq->q.db_rec_data->db_data);
1096 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1097 		}
1098 	} else {
1099 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1100 	}
1101 
1102 	/* We don't want the IRQ handler to handle a non-existing CQ so we
1103 	 * wait until all CNQ interrupts, if any, are received. This will always
1104 	 * happen and will always happen very fast. If not, then a serious error
1105 	 * has occured. That is why we can use a long delay.
1106 	 * We spin for a short time so we don’t lose time on context switching
1107 	 * in case all the completions are handled in that span. Otherwise
1108 	 * we sleep for a while and check again. Since the CNQ may be
1109 	 * associated with (only) the current CPU we use msleep to allow the
1110 	 * current CPU to be freed.
1111 	 * The CNQ notification is increased in qedr_irq_handler().
1112 	 */
1113 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1114 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1115 		udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1116 		iter--;
1117 	}
1118 
1119 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1120 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1121 		msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1122 		iter--;
1123 	}
1124 
1125 	/* Note that we don't need to have explicit code to wait for the
1126 	 * completion of the event handler because it is invoked from the EQ.
1127 	 * Since the destroy CQ ramrod has also been received on the EQ we can
1128 	 * be certain that there's no event handler in process.
1129 	 */
1130 	return 0;
1131 }
1132 
1133 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1134 					  struct ib_qp_attr *attr,
1135 					  int attr_mask,
1136 					  struct qed_rdma_modify_qp_in_params
1137 					  *qp_params)
1138 {
1139 	const struct ib_gid_attr *gid_attr;
1140 	enum rdma_network_type nw_type;
1141 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1142 	u32 ipv4_addr;
1143 	int ret;
1144 	int i;
1145 
1146 	gid_attr = grh->sgid_attr;
1147 	ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1148 	if (ret)
1149 		return ret;
1150 
1151 	nw_type = rdma_gid_attr_network_type(gid_attr);
1152 	switch (nw_type) {
1153 	case RDMA_NETWORK_IPV6:
1154 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1155 		       sizeof(qp_params->sgid));
1156 		memcpy(&qp_params->dgid.bytes[0],
1157 		       &grh->dgid,
1158 		       sizeof(qp_params->dgid));
1159 		qp_params->roce_mode = ROCE_V2_IPV6;
1160 		SET_FIELD(qp_params->modify_flags,
1161 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1162 		break;
1163 	case RDMA_NETWORK_ROCE_V1:
1164 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1165 		       sizeof(qp_params->sgid));
1166 		memcpy(&qp_params->dgid.bytes[0],
1167 		       &grh->dgid,
1168 		       sizeof(qp_params->dgid));
1169 		qp_params->roce_mode = ROCE_V1;
1170 		break;
1171 	case RDMA_NETWORK_IPV4:
1172 		memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1173 		memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1174 		ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1175 		qp_params->sgid.ipv4_addr = ipv4_addr;
1176 		ipv4_addr =
1177 		    qedr_get_ipv4_from_gid(grh->dgid.raw);
1178 		qp_params->dgid.ipv4_addr = ipv4_addr;
1179 		SET_FIELD(qp_params->modify_flags,
1180 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1181 		qp_params->roce_mode = ROCE_V2_IPV4;
1182 		break;
1183 	default:
1184 		return -EINVAL;
1185 	}
1186 
1187 	for (i = 0; i < 4; i++) {
1188 		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1189 		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1190 	}
1191 
1192 	if (qp_params->vlan_id >= VLAN_CFI_MASK)
1193 		qp_params->vlan_id = 0;
1194 
1195 	return 0;
1196 }
1197 
1198 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1199 			       struct ib_qp_init_attr *attrs,
1200 			       struct ib_udata *udata)
1201 {
1202 	struct qedr_device_attr *qattr = &dev->attr;
1203 
1204 	/* QP0... attrs->qp_type == IB_QPT_GSI */
1205 	if (attrs->qp_type != IB_QPT_RC &&
1206 	    attrs->qp_type != IB_QPT_GSI &&
1207 	    attrs->qp_type != IB_QPT_XRC_INI &&
1208 	    attrs->qp_type != IB_QPT_XRC_TGT) {
1209 		DP_DEBUG(dev, QEDR_MSG_QP,
1210 			 "create qp: unsupported qp type=0x%x requested\n",
1211 			 attrs->qp_type);
1212 		return -EOPNOTSUPP;
1213 	}
1214 
1215 	if (attrs->cap.max_send_wr > qattr->max_sqe) {
1216 		DP_ERR(dev,
1217 		       "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1218 		       attrs->cap.max_send_wr, qattr->max_sqe);
1219 		return -EINVAL;
1220 	}
1221 
1222 	if (attrs->cap.max_inline_data > qattr->max_inline) {
1223 		DP_ERR(dev,
1224 		       "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1225 		       attrs->cap.max_inline_data, qattr->max_inline);
1226 		return -EINVAL;
1227 	}
1228 
1229 	if (attrs->cap.max_send_sge > qattr->max_sge) {
1230 		DP_ERR(dev,
1231 		       "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1232 		       attrs->cap.max_send_sge, qattr->max_sge);
1233 		return -EINVAL;
1234 	}
1235 
1236 	if (attrs->cap.max_recv_sge > qattr->max_sge) {
1237 		DP_ERR(dev,
1238 		       "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1239 		       attrs->cap.max_recv_sge, qattr->max_sge);
1240 		return -EINVAL;
1241 	}
1242 
1243 	/* verify consumer QPs are not trying to use GSI QP's CQ.
1244 	 * TGT QP isn't associated with RQ/SQ
1245 	 */
1246 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1247 	    (attrs->qp_type != IB_QPT_XRC_TGT)) {
1248 		struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1249 		struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1250 
1251 		if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1252 		    (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1253 			DP_ERR(dev,
1254 			       "create qp: consumer QP cannot use GSI CQs.\n");
1255 			return -EINVAL;
1256 		}
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1263 			       struct qedr_srq *srq, struct ib_udata *udata)
1264 {
1265 	struct qedr_create_srq_uresp uresp = {};
1266 	int rc;
1267 
1268 	uresp.srq_id = srq->srq_id;
1269 
1270 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1271 	if (rc)
1272 		DP_ERR(dev, "create srq: problem copying data to user space\n");
1273 
1274 	return rc;
1275 }
1276 
1277 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1278 			       struct qedr_create_qp_uresp *uresp,
1279 			       struct qedr_qp *qp)
1280 {
1281 	/* iWARP requires two doorbells per RQ. */
1282 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1283 		uresp->rq_db_offset =
1284 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1285 		uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1286 	} else {
1287 		uresp->rq_db_offset =
1288 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1289 	}
1290 
1291 	uresp->rq_icid = qp->icid;
1292 	if (qp->urq.db_mmap_entry)
1293 		uresp->rq_db_rec_addr =
1294 			rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1295 }
1296 
1297 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1298 			       struct qedr_create_qp_uresp *uresp,
1299 			       struct qedr_qp *qp)
1300 {
1301 	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1302 
1303 	/* iWARP uses the same cid for rq and sq */
1304 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1305 		uresp->sq_icid = qp->icid;
1306 	else
1307 		uresp->sq_icid = qp->icid + 1;
1308 
1309 	if (qp->usq.db_mmap_entry)
1310 		uresp->sq_db_rec_addr =
1311 			rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1312 }
1313 
1314 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1315 			      struct qedr_qp *qp, struct ib_udata *udata,
1316 			      struct qedr_create_qp_uresp *uresp)
1317 {
1318 	int rc;
1319 
1320 	memset(uresp, 0, sizeof(*uresp));
1321 
1322 	if (qedr_qp_has_sq(qp))
1323 		qedr_copy_sq_uresp(dev, uresp, qp);
1324 
1325 	if (qedr_qp_has_rq(qp))
1326 		qedr_copy_rq_uresp(dev, uresp, qp);
1327 
1328 	uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1329 	uresp->qp_id = qp->qp_id;
1330 
1331 	rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1332 	if (rc)
1333 		DP_ERR(dev,
1334 		       "create qp: failed a copy to user space with qp icid=0x%x.\n",
1335 		       qp->icid);
1336 
1337 	return rc;
1338 }
1339 
1340 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1341 				      struct qedr_qp *qp,
1342 				      struct qedr_pd *pd,
1343 				      struct ib_qp_init_attr *attrs)
1344 {
1345 	spin_lock_init(&qp->q_lock);
1346 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1347 		kref_init(&qp->refcnt);
1348 		init_completion(&qp->iwarp_cm_comp);
1349 	}
1350 
1351 	qp->pd = pd;
1352 	qp->qp_type = attrs->qp_type;
1353 	qp->max_inline_data = attrs->cap.max_inline_data;
1354 	qp->state = QED_ROCE_QP_STATE_RESET;
1355 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1356 	qp->dev = dev;
1357 	if (qedr_qp_has_sq(qp)) {
1358 		qp->sq.max_sges = attrs->cap.max_send_sge;
1359 		qp->sq_cq = get_qedr_cq(attrs->send_cq);
1360 		DP_DEBUG(dev, QEDR_MSG_QP,
1361 			 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1362 			 qp->sq.max_sges, qp->sq_cq->icid);
1363 	}
1364 
1365 	if (attrs->srq)
1366 		qp->srq = get_qedr_srq(attrs->srq);
1367 
1368 	if (qedr_qp_has_rq(qp)) {
1369 		qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1370 		qp->rq.max_sges = attrs->cap.max_recv_sge;
1371 		DP_DEBUG(dev, QEDR_MSG_QP,
1372 			 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1373 			 qp->rq.max_sges, qp->rq_cq->icid);
1374 	}
1375 
1376 	DP_DEBUG(dev, QEDR_MSG_QP,
1377 		 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1378 		 pd->pd_id, qp->qp_type, qp->max_inline_data,
1379 		 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1380 	DP_DEBUG(dev, QEDR_MSG_QP,
1381 		 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1382 		 qp->sq.max_sges, qp->sq_cq->icid);
1383 }
1384 
1385 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1386 {
1387 	int rc = 0;
1388 
1389 	if (qedr_qp_has_sq(qp)) {
1390 		qp->sq.db = dev->db_addr +
1391 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1392 		qp->sq.db_data.data.icid = qp->icid + 1;
1393 		rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1394 					  DB_REC_WIDTH_32B, DB_REC_KERNEL);
1395 		if (rc)
1396 			return rc;
1397 	}
1398 
1399 	if (qedr_qp_has_rq(qp)) {
1400 		qp->rq.db = dev->db_addr +
1401 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1402 		qp->rq.db_data.data.icid = qp->icid;
1403 		rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1404 					  DB_REC_WIDTH_32B, DB_REC_KERNEL);
1405 		if (rc && qedr_qp_has_sq(qp))
1406 			qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1407 	}
1408 
1409 	return rc;
1410 }
1411 
1412 static int qedr_check_srq_params(struct qedr_dev *dev,
1413 				 struct ib_srq_init_attr *attrs,
1414 				 struct ib_udata *udata)
1415 {
1416 	struct qedr_device_attr *qattr = &dev->attr;
1417 
1418 	if (attrs->attr.max_wr > qattr->max_srq_wr) {
1419 		DP_ERR(dev,
1420 		       "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1421 		       attrs->attr.max_wr, qattr->max_srq_wr);
1422 		return -EINVAL;
1423 	}
1424 
1425 	if (attrs->attr.max_sge > qattr->max_sge) {
1426 		DP_ERR(dev,
1427 		       "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1428 		       attrs->attr.max_sge, qattr->max_sge);
1429 	}
1430 
1431 	if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1432 		DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1433 		return -EINVAL;
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1440 {
1441 	qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1442 	ib_umem_release(srq->usrq.umem);
1443 	ib_umem_release(srq->prod_umem);
1444 }
1445 
1446 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1447 {
1448 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1449 	struct qedr_dev *dev = srq->dev;
1450 
1451 	dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1452 
1453 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1454 			  hw_srq->virt_prod_pair_addr,
1455 			  hw_srq->phy_prod_pair_addr);
1456 }
1457 
1458 static int qedr_init_srq_user_params(struct ib_udata *udata,
1459 				     struct qedr_srq *srq,
1460 				     struct qedr_create_srq_ureq *ureq,
1461 				     int access)
1462 {
1463 	struct scatterlist *sg;
1464 	int rc;
1465 
1466 	rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1467 				  ureq->srq_len, false, access, 1);
1468 	if (rc)
1469 		return rc;
1470 
1471 	srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1472 				     sizeof(struct rdma_srq_producers), access);
1473 	if (IS_ERR(srq->prod_umem)) {
1474 		qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1475 		ib_umem_release(srq->usrq.umem);
1476 		DP_ERR(srq->dev,
1477 		       "create srq: failed ib_umem_get for producer, got %ld\n",
1478 		       PTR_ERR(srq->prod_umem));
1479 		return PTR_ERR(srq->prod_umem);
1480 	}
1481 
1482 	sg = srq->prod_umem->sg_head.sgl;
1483 	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1484 
1485 	return 0;
1486 }
1487 
1488 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1489 					struct qedr_dev *dev,
1490 					struct ib_srq_init_attr *init_attr)
1491 {
1492 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1493 	struct qed_chain_init_params params = {
1494 		.mode		= QED_CHAIN_MODE_PBL,
1495 		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1496 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
1497 		.elem_size	= QEDR_SRQ_WQE_ELEM_SIZE,
1498 	};
1499 	dma_addr_t phy_prod_pair_addr;
1500 	u32 num_elems;
1501 	void *va;
1502 	int rc;
1503 
1504 	va = dma_alloc_coherent(&dev->pdev->dev,
1505 				sizeof(struct rdma_srq_producers),
1506 				&phy_prod_pair_addr, GFP_KERNEL);
1507 	if (!va) {
1508 		DP_ERR(dev,
1509 		       "create srq: failed to allocate dma memory for producer\n");
1510 		return -ENOMEM;
1511 	}
1512 
1513 	hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1514 	hw_srq->virt_prod_pair_addr = va;
1515 
1516 	num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1517 	params.num_elems = num_elems;
1518 
1519 	rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
1520 	if (rc)
1521 		goto err0;
1522 
1523 	hw_srq->num_elems = num_elems;
1524 
1525 	return 0;
1526 
1527 err0:
1528 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1529 			  va, phy_prod_pair_addr);
1530 	return rc;
1531 }
1532 
1533 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1534 		    struct ib_udata *udata)
1535 {
1536 	struct qed_rdma_destroy_srq_in_params destroy_in_params;
1537 	struct qed_rdma_create_srq_in_params in_params = {};
1538 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1539 	struct qed_rdma_create_srq_out_params out_params;
1540 	struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1541 	struct qedr_create_srq_ureq ureq = {};
1542 	u64 pbl_base_addr, phy_prod_pair_addr;
1543 	struct qedr_srq_hwq_info *hw_srq;
1544 	u32 page_cnt, page_size;
1545 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1546 	int rc = 0;
1547 
1548 	DP_DEBUG(dev, QEDR_MSG_QP,
1549 		 "create SRQ called from %s (pd %p)\n",
1550 		 (udata) ? "User lib" : "kernel", pd);
1551 
1552 	if (init_attr->srq_type != IB_SRQT_BASIC &&
1553 	    init_attr->srq_type != IB_SRQT_XRC)
1554 		return -EOPNOTSUPP;
1555 
1556 	rc = qedr_check_srq_params(dev, init_attr, udata);
1557 	if (rc)
1558 		return -EINVAL;
1559 
1560 	srq->dev = dev;
1561 	srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1562 	hw_srq = &srq->hw_srq;
1563 	spin_lock_init(&srq->lock);
1564 
1565 	hw_srq->max_wr = init_attr->attr.max_wr;
1566 	hw_srq->max_sges = init_attr->attr.max_sge;
1567 
1568 	if (udata) {
1569 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1570 							 udata->inlen))) {
1571 			DP_ERR(dev,
1572 			       "create srq: problem copying data from user space\n");
1573 			goto err0;
1574 		}
1575 
1576 		rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1577 		if (rc)
1578 			goto err0;
1579 
1580 		page_cnt = srq->usrq.pbl_info.num_pbes;
1581 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
1582 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1583 		page_size = PAGE_SIZE;
1584 	} else {
1585 		struct qed_chain *pbl;
1586 
1587 		rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1588 		if (rc)
1589 			goto err0;
1590 
1591 		pbl = &hw_srq->pbl;
1592 		page_cnt = qed_chain_get_page_cnt(pbl);
1593 		pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1594 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1595 		page_size = QED_CHAIN_PAGE_SIZE;
1596 	}
1597 
1598 	in_params.pd_id = pd->pd_id;
1599 	in_params.pbl_base_addr = pbl_base_addr;
1600 	in_params.prod_pair_addr = phy_prod_pair_addr;
1601 	in_params.num_pages = page_cnt;
1602 	in_params.page_size = page_size;
1603 	if (srq->is_xrc) {
1604 		struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1605 		struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1606 
1607 		in_params.is_xrc = 1;
1608 		in_params.xrcd_id = xrcd->xrcd_id;
1609 		in_params.cq_cid = cq->icid;
1610 	}
1611 
1612 	rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1613 	if (rc)
1614 		goto err1;
1615 
1616 	srq->srq_id = out_params.srq_id;
1617 
1618 	if (udata) {
1619 		rc = qedr_copy_srq_uresp(dev, srq, udata);
1620 		if (rc)
1621 			goto err2;
1622 	}
1623 
1624 	rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1625 	if (rc)
1626 		goto err2;
1627 
1628 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1629 		 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1630 	return 0;
1631 
1632 err2:
1633 	destroy_in_params.srq_id = srq->srq_id;
1634 
1635 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1636 err1:
1637 	if (udata)
1638 		qedr_free_srq_user_params(srq);
1639 	else
1640 		qedr_free_srq_kernel_params(srq);
1641 err0:
1642 	return -EFAULT;
1643 }
1644 
1645 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1646 {
1647 	struct qed_rdma_destroy_srq_in_params in_params = {};
1648 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1649 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1650 
1651 	xa_erase_irq(&dev->srqs, srq->srq_id);
1652 	in_params.srq_id = srq->srq_id;
1653 	in_params.is_xrc = srq->is_xrc;
1654 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1655 
1656 	if (ibsrq->uobject)
1657 		qedr_free_srq_user_params(srq);
1658 	else
1659 		qedr_free_srq_kernel_params(srq);
1660 
1661 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1662 		 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1663 		 srq->srq_id);
1664 	return 0;
1665 }
1666 
1667 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1668 		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1669 {
1670 	struct qed_rdma_modify_srq_in_params in_params = {};
1671 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1672 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1673 	int rc;
1674 
1675 	if (attr_mask & IB_SRQ_MAX_WR) {
1676 		DP_ERR(dev,
1677 		       "modify srq: invalid attribute mask=0x%x specified for %p\n",
1678 		       attr_mask, srq);
1679 		return -EINVAL;
1680 	}
1681 
1682 	if (attr_mask & IB_SRQ_LIMIT) {
1683 		if (attr->srq_limit >= srq->hw_srq.max_wr) {
1684 			DP_ERR(dev,
1685 			       "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1686 			       attr->srq_limit, srq->hw_srq.max_wr);
1687 			return -EINVAL;
1688 		}
1689 
1690 		in_params.srq_id = srq->srq_id;
1691 		in_params.wqe_limit = attr->srq_limit;
1692 		rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1693 		if (rc)
1694 			return rc;
1695 	}
1696 
1697 	srq->srq_limit = attr->srq_limit;
1698 
1699 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1700 		 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1701 
1702 	return 0;
1703 }
1704 
1705 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1706 {
1707 	switch (ib_qp_type) {
1708 	case IB_QPT_RC:
1709 		return QED_RDMA_QP_TYPE_RC;
1710 	case IB_QPT_XRC_INI:
1711 		return QED_RDMA_QP_TYPE_XRC_INI;
1712 	case IB_QPT_XRC_TGT:
1713 		return QED_RDMA_QP_TYPE_XRC_TGT;
1714 	default:
1715 		return QED_RDMA_QP_TYPE_INVAL;
1716 	}
1717 }
1718 
1719 static inline void
1720 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1721 			      struct qedr_pd *pd,
1722 			      struct qedr_qp *qp,
1723 			      struct ib_qp_init_attr *attrs,
1724 			      bool fmr_and_reserved_lkey,
1725 			      struct qed_rdma_create_qp_in_params *params)
1726 {
1727 	/* QP handle to be written in an async event */
1728 	params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1729 	params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1730 
1731 	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1732 	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1733 	params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1734 	params->stats_queue = 0;
1735 
1736 	if (pd) {
1737 		params->pd = pd->pd_id;
1738 		params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1739 	}
1740 
1741 	if (qedr_qp_has_sq(qp))
1742 		params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1743 
1744 	if (qedr_qp_has_rq(qp))
1745 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1746 
1747 	if (qedr_qp_has_srq(qp)) {
1748 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1749 		params->srq_id = qp->srq->srq_id;
1750 		params->use_srq = true;
1751 	} else {
1752 		params->srq_id = 0;
1753 		params->use_srq = false;
1754 	}
1755 }
1756 
1757 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1758 {
1759 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1760 		 "qp=%p. "
1761 		 "sq_addr=0x%llx, "
1762 		 "sq_len=%zd, "
1763 		 "rq_addr=0x%llx, "
1764 		 "rq_len=%zd"
1765 		 "\n",
1766 		 qp,
1767 		 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1768 		 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1769 		 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1770 		 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1771 }
1772 
1773 static inline void
1774 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1775 			    struct qedr_qp *qp,
1776 			    struct qed_rdma_create_qp_out_params *out_params)
1777 {
1778 	qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1779 	qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1780 
1781 	qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1782 			   &qp->usq.pbl_info, FW_PAGE_SHIFT);
1783 	if (!qp->srq) {
1784 		qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1785 		qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1786 	}
1787 
1788 	qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1789 			   &qp->urq.pbl_info, FW_PAGE_SHIFT);
1790 }
1791 
1792 static void qedr_cleanup_user(struct qedr_dev *dev,
1793 			      struct qedr_ucontext *ctx,
1794 			      struct qedr_qp *qp)
1795 {
1796 	if (qedr_qp_has_sq(qp)) {
1797 		ib_umem_release(qp->usq.umem);
1798 		qp->usq.umem = NULL;
1799 	}
1800 
1801 	if (qedr_qp_has_rq(qp)) {
1802 		ib_umem_release(qp->urq.umem);
1803 		qp->urq.umem = NULL;
1804 	}
1805 
1806 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
1807 		qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1808 		qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1809 	} else {
1810 		kfree(qp->usq.pbl_tbl);
1811 		kfree(qp->urq.pbl_tbl);
1812 	}
1813 
1814 	if (qp->usq.db_rec_data) {
1815 		qedr_db_recovery_del(dev, qp->usq.db_addr,
1816 				     &qp->usq.db_rec_data->db_data);
1817 		rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1818 	}
1819 
1820 	if (qp->urq.db_rec_data) {
1821 		qedr_db_recovery_del(dev, qp->urq.db_addr,
1822 				     &qp->urq.db_rec_data->db_data);
1823 		rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1824 	}
1825 
1826 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1827 		qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1828 				     &qp->urq.db_rec_db2_data);
1829 }
1830 
1831 static int qedr_create_user_qp(struct qedr_dev *dev,
1832 			       struct qedr_qp *qp,
1833 			       struct ib_pd *ibpd,
1834 			       struct ib_udata *udata,
1835 			       struct ib_qp_init_attr *attrs)
1836 {
1837 	struct qed_rdma_create_qp_in_params in_params;
1838 	struct qed_rdma_create_qp_out_params out_params;
1839 	struct qedr_create_qp_uresp uresp = {};
1840 	struct qedr_create_qp_ureq ureq = {};
1841 	int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1842 	struct qedr_ucontext *ctx = NULL;
1843 	struct qedr_pd *pd = NULL;
1844 	int rc = 0;
1845 
1846 	qp->create_type = QEDR_QP_CREATE_USER;
1847 
1848 	if (ibpd) {
1849 		pd = get_qedr_pd(ibpd);
1850 		ctx = pd->uctx;
1851 	}
1852 
1853 	if (udata) {
1854 		rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1855 					udata->inlen));
1856 		if (rc) {
1857 			DP_ERR(dev, "Problem copying data from user space\n");
1858 			return rc;
1859 		}
1860 	}
1861 
1862 	if (qedr_qp_has_sq(qp)) {
1863 		/* SQ - read access only (0) */
1864 		rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1865 					  ureq.sq_len, true, 0, alloc_and_init);
1866 		if (rc)
1867 			return rc;
1868 	}
1869 
1870 	if (qedr_qp_has_rq(qp)) {
1871 		/* RQ - read access only (0) */
1872 		rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1873 					  ureq.rq_len, true, 0, alloc_and_init);
1874 		if (rc)
1875 			return rc;
1876 	}
1877 
1878 	memset(&in_params, 0, sizeof(in_params));
1879 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1880 	in_params.qp_handle_lo = ureq.qp_handle_lo;
1881 	in_params.qp_handle_hi = ureq.qp_handle_hi;
1882 
1883 	if (qp->qp_type == IB_QPT_XRC_TGT) {
1884 		struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1885 
1886 		in_params.xrcd_id = xrcd->xrcd_id;
1887 		in_params.qp_handle_lo = qp->qp_id;
1888 		in_params.use_srq = 1;
1889 	}
1890 
1891 	if (qedr_qp_has_sq(qp)) {
1892 		in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1893 		in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1894 	}
1895 
1896 	if (qedr_qp_has_rq(qp)) {
1897 		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1898 		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1899 	}
1900 
1901 	if (ctx)
1902 		SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1903 
1904 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1905 					      &in_params, &out_params);
1906 
1907 	if (!qp->qed_qp) {
1908 		rc = -ENOMEM;
1909 		goto err1;
1910 	}
1911 
1912 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1913 		qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1914 
1915 	qp->qp_id = out_params.qp_id;
1916 	qp->icid = out_params.icid;
1917 
1918 	if (udata) {
1919 		rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1920 		if (rc)
1921 			goto err;
1922 	}
1923 
1924 	/* db offset was calculated in copy_qp_uresp, now set in the user q */
1925 	if (qedr_qp_has_sq(qp)) {
1926 		qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1927 		rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1928 					  &qp->usq.db_rec_data->db_data,
1929 					  DB_REC_WIDTH_32B,
1930 					  DB_REC_USER);
1931 		if (rc)
1932 			goto err;
1933 	}
1934 
1935 	if (qedr_qp_has_rq(qp)) {
1936 		qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1937 		rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1938 					  &qp->urq.db_rec_data->db_data,
1939 					  DB_REC_WIDTH_32B,
1940 					  DB_REC_USER);
1941 		if (rc)
1942 			goto err;
1943 	}
1944 
1945 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1946 		qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1947 
1948 		/* calculate the db_rec_db2 data since it is constant so no
1949 		 * need to reflect from user
1950 		 */
1951 		qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1952 		qp->urq.db_rec_db2_data.data.value =
1953 			cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1954 
1955 		rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1956 					  &qp->urq.db_rec_db2_data,
1957 					  DB_REC_WIDTH_32B,
1958 					  DB_REC_USER);
1959 		if (rc)
1960 			goto err;
1961 	}
1962 	qedr_qp_user_print(dev, qp);
1963 	return rc;
1964 err:
1965 	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1966 	if (rc)
1967 		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1968 
1969 err1:
1970 	qedr_cleanup_user(dev, ctx, qp);
1971 	return rc;
1972 }
1973 
1974 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1975 {
1976 	int rc;
1977 
1978 	qp->sq.db = dev->db_addr +
1979 	    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1980 	qp->sq.db_data.data.icid = qp->icid;
1981 
1982 	rc = qedr_db_recovery_add(dev, qp->sq.db,
1983 				  &qp->sq.db_data,
1984 				  DB_REC_WIDTH_32B,
1985 				  DB_REC_KERNEL);
1986 	if (rc)
1987 		return rc;
1988 
1989 	qp->rq.db = dev->db_addr +
1990 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1991 	qp->rq.db_data.data.icid = qp->icid;
1992 	qp->rq.iwarp_db2 = dev->db_addr +
1993 			   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1994 	qp->rq.iwarp_db2_data.data.icid = qp->icid;
1995 	qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1996 
1997 	rc = qedr_db_recovery_add(dev, qp->rq.db,
1998 				  &qp->rq.db_data,
1999 				  DB_REC_WIDTH_32B,
2000 				  DB_REC_KERNEL);
2001 	if (rc)
2002 		return rc;
2003 
2004 	rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2005 				  &qp->rq.iwarp_db2_data,
2006 				  DB_REC_WIDTH_32B,
2007 				  DB_REC_KERNEL);
2008 	return rc;
2009 }
2010 
2011 static int
2012 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2013 			   struct qedr_qp *qp,
2014 			   struct qed_rdma_create_qp_in_params *in_params,
2015 			   u32 n_sq_elems, u32 n_rq_elems)
2016 {
2017 	struct qed_rdma_create_qp_out_params out_params;
2018 	struct qed_chain_init_params params = {
2019 		.mode		= QED_CHAIN_MODE_PBL,
2020 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
2021 	};
2022 	int rc;
2023 
2024 	params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2025 	params.num_elems = n_sq_elems;
2026 	params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2027 
2028 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2029 	if (rc)
2030 		return rc;
2031 
2032 	in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2033 	in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2034 
2035 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2036 	params.num_elems = n_rq_elems;
2037 	params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2038 
2039 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2040 	if (rc)
2041 		return rc;
2042 
2043 	in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2044 	in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2045 
2046 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2047 					      in_params, &out_params);
2048 
2049 	if (!qp->qed_qp)
2050 		return -EINVAL;
2051 
2052 	qp->qp_id = out_params.qp_id;
2053 	qp->icid = out_params.icid;
2054 
2055 	return qedr_set_roce_db_info(dev, qp);
2056 }
2057 
2058 static int
2059 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2060 			    struct qedr_qp *qp,
2061 			    struct qed_rdma_create_qp_in_params *in_params,
2062 			    u32 n_sq_elems, u32 n_rq_elems)
2063 {
2064 	struct qed_rdma_create_qp_out_params out_params;
2065 	struct qed_chain_init_params params = {
2066 		.mode		= QED_CHAIN_MODE_PBL,
2067 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
2068 	};
2069 	int rc;
2070 
2071 	in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2072 						     QEDR_SQE_ELEMENT_SIZE,
2073 						     QED_CHAIN_PAGE_SIZE,
2074 						     QED_CHAIN_MODE_PBL);
2075 	in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2076 						     QEDR_RQE_ELEMENT_SIZE,
2077 						     QED_CHAIN_PAGE_SIZE,
2078 						     QED_CHAIN_MODE_PBL);
2079 
2080 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2081 					      in_params, &out_params);
2082 
2083 	if (!qp->qed_qp)
2084 		return -EINVAL;
2085 
2086 	/* Now we allocate the chain */
2087 
2088 	params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2089 	params.num_elems = n_sq_elems;
2090 	params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2091 	params.ext_pbl_virt = out_params.sq_pbl_virt;
2092 	params.ext_pbl_phys = out_params.sq_pbl_phys;
2093 
2094 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2095 	if (rc)
2096 		goto err;
2097 
2098 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2099 	params.num_elems = n_rq_elems;
2100 	params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2101 	params.ext_pbl_virt = out_params.rq_pbl_virt;
2102 	params.ext_pbl_phys = out_params.rq_pbl_phys;
2103 
2104 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2105 	if (rc)
2106 		goto err;
2107 
2108 	qp->qp_id = out_params.qp_id;
2109 	qp->icid = out_params.icid;
2110 
2111 	return qedr_set_iwarp_db_info(dev, qp);
2112 
2113 err:
2114 	dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2115 
2116 	return rc;
2117 }
2118 
2119 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2120 {
2121 	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2122 	kfree(qp->wqe_wr_id);
2123 
2124 	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2125 	kfree(qp->rqe_wr_id);
2126 
2127 	/* GSI qp is not registered to db mechanism so no need to delete */
2128 	if (qp->qp_type == IB_QPT_GSI)
2129 		return;
2130 
2131 	qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2132 
2133 	if (!qp->srq) {
2134 		qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2135 
2136 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2137 			qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2138 					     &qp->rq.iwarp_db2_data);
2139 	}
2140 }
2141 
2142 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2143 				 struct qedr_qp *qp,
2144 				 struct ib_pd *ibpd,
2145 				 struct ib_qp_init_attr *attrs)
2146 {
2147 	struct qed_rdma_create_qp_in_params in_params;
2148 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2149 	int rc = -EINVAL;
2150 	u32 n_rq_elems;
2151 	u32 n_sq_elems;
2152 	u32 n_sq_entries;
2153 
2154 	memset(&in_params, 0, sizeof(in_params));
2155 	qp->create_type = QEDR_QP_CREATE_KERNEL;
2156 
2157 	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2158 	 * the ring. The ring should allow at least a single WR, even if the
2159 	 * user requested none, due to allocation issues.
2160 	 * We should add an extra WR since the prod and cons indices of
2161 	 * wqe_wr_id are managed in such a way that the WQ is considered full
2162 	 * when (prod+1)%max_wr==cons. We currently don't do that because we
2163 	 * double the number of entries due an iSER issue that pushes far more
2164 	 * WRs than indicated. If we decline its ib_post_send() then we get
2165 	 * error prints in the dmesg we'd like to avoid.
2166 	 */
2167 	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2168 			      dev->attr.max_sqe);
2169 
2170 	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2171 				GFP_KERNEL);
2172 	if (!qp->wqe_wr_id) {
2173 		DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2174 		return -ENOMEM;
2175 	}
2176 
2177 	/* QP handle to be written in CQE */
2178 	in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2179 	in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2180 
2181 	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2182 	 * the ring. There ring should allow at least a single WR, even if the
2183 	 * user requested none, due to allocation issues.
2184 	 */
2185 	qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2186 
2187 	/* Allocate driver internal RQ array */
2188 	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2189 				GFP_KERNEL);
2190 	if (!qp->rqe_wr_id) {
2191 		DP_ERR(dev,
2192 		       "create qp: failed RQ shadow memory allocation\n");
2193 		kfree(qp->wqe_wr_id);
2194 		return -ENOMEM;
2195 	}
2196 
2197 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2198 
2199 	n_sq_entries = attrs->cap.max_send_wr;
2200 	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2201 	n_sq_entries = max_t(u32, n_sq_entries, 1);
2202 	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2203 
2204 	n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2205 
2206 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2207 		rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2208 						 n_sq_elems, n_rq_elems);
2209 	else
2210 		rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2211 						n_sq_elems, n_rq_elems);
2212 	if (rc)
2213 		qedr_cleanup_kernel(dev, qp);
2214 
2215 	return rc;
2216 }
2217 
2218 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2219 				  struct ib_udata *udata)
2220 {
2221 	struct qedr_ucontext *ctx =
2222 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2223 					  ibucontext);
2224 	int rc;
2225 
2226 	if (qp->qp_type != IB_QPT_GSI) {
2227 		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2228 		if (rc)
2229 			return rc;
2230 	}
2231 
2232 	if (qp->create_type == QEDR_QP_CREATE_USER)
2233 		qedr_cleanup_user(dev, ctx, qp);
2234 	else
2235 		qedr_cleanup_kernel(dev, qp);
2236 
2237 	return 0;
2238 }
2239 
2240 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
2241 			     struct ib_qp_init_attr *attrs,
2242 			     struct ib_udata *udata)
2243 {
2244 	struct qedr_xrcd *xrcd = NULL;
2245 	struct qedr_pd *pd = NULL;
2246 	struct qedr_dev *dev;
2247 	struct qedr_qp *qp;
2248 	struct ib_qp *ibqp;
2249 	int rc = 0;
2250 
2251 	if (attrs->create_flags)
2252 		return ERR_PTR(-EOPNOTSUPP);
2253 
2254 	if (attrs->qp_type == IB_QPT_XRC_TGT) {
2255 		xrcd = get_qedr_xrcd(attrs->xrcd);
2256 		dev = get_qedr_dev(xrcd->ibxrcd.device);
2257 	} else {
2258 		pd = get_qedr_pd(ibpd);
2259 		dev = get_qedr_dev(ibpd->device);
2260 	}
2261 
2262 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2263 		 udata ? "user library" : "kernel", pd);
2264 
2265 	rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2266 	if (rc)
2267 		return ERR_PTR(rc);
2268 
2269 	DP_DEBUG(dev, QEDR_MSG_QP,
2270 		 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2271 		 udata ? "user library" : "kernel", attrs->event_handler, pd,
2272 		 get_qedr_cq(attrs->send_cq),
2273 		 get_qedr_cq(attrs->send_cq)->icid,
2274 		 get_qedr_cq(attrs->recv_cq),
2275 		 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2276 
2277 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2278 	if (!qp) {
2279 		DP_ERR(dev, "create qp: failed allocating memory\n");
2280 		return ERR_PTR(-ENOMEM);
2281 	}
2282 
2283 	qedr_set_common_qp_params(dev, qp, pd, attrs);
2284 
2285 	if (attrs->qp_type == IB_QPT_GSI) {
2286 		ibqp = qedr_create_gsi_qp(dev, attrs, qp);
2287 		if (IS_ERR(ibqp))
2288 			kfree(qp);
2289 		return ibqp;
2290 	}
2291 
2292 	if (udata || xrcd)
2293 		rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2294 	else
2295 		rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2296 
2297 	if (rc)
2298 		goto out_free_qp;
2299 
2300 	qp->ibqp.qp_num = qp->qp_id;
2301 
2302 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2303 		rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2304 		if (rc)
2305 			goto out_free_qp_resources;
2306 	}
2307 
2308 	return &qp->ibqp;
2309 
2310 out_free_qp_resources:
2311 	qedr_free_qp_resources(dev, qp, udata);
2312 out_free_qp:
2313 	kfree(qp);
2314 
2315 	return ERR_PTR(-EFAULT);
2316 }
2317 
2318 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2319 {
2320 	switch (qp_state) {
2321 	case QED_ROCE_QP_STATE_RESET:
2322 		return IB_QPS_RESET;
2323 	case QED_ROCE_QP_STATE_INIT:
2324 		return IB_QPS_INIT;
2325 	case QED_ROCE_QP_STATE_RTR:
2326 		return IB_QPS_RTR;
2327 	case QED_ROCE_QP_STATE_RTS:
2328 		return IB_QPS_RTS;
2329 	case QED_ROCE_QP_STATE_SQD:
2330 		return IB_QPS_SQD;
2331 	case QED_ROCE_QP_STATE_ERR:
2332 		return IB_QPS_ERR;
2333 	case QED_ROCE_QP_STATE_SQE:
2334 		return IB_QPS_SQE;
2335 	}
2336 	return IB_QPS_ERR;
2337 }
2338 
2339 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2340 					enum ib_qp_state qp_state)
2341 {
2342 	switch (qp_state) {
2343 	case IB_QPS_RESET:
2344 		return QED_ROCE_QP_STATE_RESET;
2345 	case IB_QPS_INIT:
2346 		return QED_ROCE_QP_STATE_INIT;
2347 	case IB_QPS_RTR:
2348 		return QED_ROCE_QP_STATE_RTR;
2349 	case IB_QPS_RTS:
2350 		return QED_ROCE_QP_STATE_RTS;
2351 	case IB_QPS_SQD:
2352 		return QED_ROCE_QP_STATE_SQD;
2353 	case IB_QPS_ERR:
2354 		return QED_ROCE_QP_STATE_ERR;
2355 	default:
2356 		return QED_ROCE_QP_STATE_ERR;
2357 	}
2358 }
2359 
2360 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2361 {
2362 	qed_chain_reset(&qph->pbl);
2363 	qph->prod = 0;
2364 	qph->cons = 0;
2365 	qph->wqe_cons = 0;
2366 	qph->db_data.data.value = cpu_to_le16(0);
2367 }
2368 
2369 static int qedr_update_qp_state(struct qedr_dev *dev,
2370 				struct qedr_qp *qp,
2371 				enum qed_roce_qp_state cur_state,
2372 				enum qed_roce_qp_state new_state)
2373 {
2374 	int status = 0;
2375 
2376 	if (new_state == cur_state)
2377 		return 0;
2378 
2379 	switch (cur_state) {
2380 	case QED_ROCE_QP_STATE_RESET:
2381 		switch (new_state) {
2382 		case QED_ROCE_QP_STATE_INIT:
2383 			qp->prev_wqe_size = 0;
2384 			qedr_reset_qp_hwq_info(&qp->sq);
2385 			qedr_reset_qp_hwq_info(&qp->rq);
2386 			break;
2387 		default:
2388 			status = -EINVAL;
2389 			break;
2390 		}
2391 		break;
2392 	case QED_ROCE_QP_STATE_INIT:
2393 		switch (new_state) {
2394 		case QED_ROCE_QP_STATE_RTR:
2395 			/* Update doorbell (in case post_recv was
2396 			 * done before move to RTR)
2397 			 */
2398 
2399 			if (rdma_protocol_roce(&dev->ibdev, 1)) {
2400 				writel(qp->rq.db_data.raw, qp->rq.db);
2401 			}
2402 			break;
2403 		case QED_ROCE_QP_STATE_ERR:
2404 			break;
2405 		default:
2406 			/* Invalid state change. */
2407 			status = -EINVAL;
2408 			break;
2409 		}
2410 		break;
2411 	case QED_ROCE_QP_STATE_RTR:
2412 		/* RTR->XXX */
2413 		switch (new_state) {
2414 		case QED_ROCE_QP_STATE_RTS:
2415 			break;
2416 		case QED_ROCE_QP_STATE_ERR:
2417 			break;
2418 		default:
2419 			/* Invalid state change. */
2420 			status = -EINVAL;
2421 			break;
2422 		}
2423 		break;
2424 	case QED_ROCE_QP_STATE_RTS:
2425 		/* RTS->XXX */
2426 		switch (new_state) {
2427 		case QED_ROCE_QP_STATE_SQD:
2428 			break;
2429 		case QED_ROCE_QP_STATE_ERR:
2430 			break;
2431 		default:
2432 			/* Invalid state change. */
2433 			status = -EINVAL;
2434 			break;
2435 		}
2436 		break;
2437 	case QED_ROCE_QP_STATE_SQD:
2438 		/* SQD->XXX */
2439 		switch (new_state) {
2440 		case QED_ROCE_QP_STATE_RTS:
2441 		case QED_ROCE_QP_STATE_ERR:
2442 			break;
2443 		default:
2444 			/* Invalid state change. */
2445 			status = -EINVAL;
2446 			break;
2447 		}
2448 		break;
2449 	case QED_ROCE_QP_STATE_ERR:
2450 		/* ERR->XXX */
2451 		switch (new_state) {
2452 		case QED_ROCE_QP_STATE_RESET:
2453 			if ((qp->rq.prod != qp->rq.cons) ||
2454 			    (qp->sq.prod != qp->sq.cons)) {
2455 				DP_NOTICE(dev,
2456 					  "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2457 					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
2458 					  qp->sq.cons);
2459 				status = -EINVAL;
2460 			}
2461 			break;
2462 		default:
2463 			status = -EINVAL;
2464 			break;
2465 		}
2466 		break;
2467 	default:
2468 		status = -EINVAL;
2469 		break;
2470 	}
2471 
2472 	return status;
2473 }
2474 
2475 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2476 		   int attr_mask, struct ib_udata *udata)
2477 {
2478 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2479 	struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2480 	struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2481 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2482 	enum ib_qp_state old_qp_state, new_qp_state;
2483 	enum qed_roce_qp_state cur_state;
2484 	int rc = 0;
2485 
2486 	DP_DEBUG(dev, QEDR_MSG_QP,
2487 		 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2488 		 attr->qp_state);
2489 
2490 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2491 		return -EOPNOTSUPP;
2492 
2493 	old_qp_state = qedr_get_ibqp_state(qp->state);
2494 	if (attr_mask & IB_QP_STATE)
2495 		new_qp_state = attr->qp_state;
2496 	else
2497 		new_qp_state = old_qp_state;
2498 
2499 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2500 		if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2501 					ibqp->qp_type, attr_mask)) {
2502 			DP_ERR(dev,
2503 			       "modify qp: invalid attribute mask=0x%x specified for\n"
2504 			       "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2505 			       attr_mask, qp->qp_id, ibqp->qp_type,
2506 			       old_qp_state, new_qp_state);
2507 			rc = -EINVAL;
2508 			goto err;
2509 		}
2510 	}
2511 
2512 	/* Translate the masks... */
2513 	if (attr_mask & IB_QP_STATE) {
2514 		SET_FIELD(qp_params.modify_flags,
2515 			  QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2516 		qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2517 	}
2518 
2519 	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2520 		qp_params.sqd_async = true;
2521 
2522 	if (attr_mask & IB_QP_PKEY_INDEX) {
2523 		SET_FIELD(qp_params.modify_flags,
2524 			  QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2525 		if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2526 			rc = -EINVAL;
2527 			goto err;
2528 		}
2529 
2530 		qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2531 	}
2532 
2533 	if (attr_mask & IB_QP_QKEY)
2534 		qp->qkey = attr->qkey;
2535 
2536 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
2537 		SET_FIELD(qp_params.modify_flags,
2538 			  QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2539 		qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2540 						  IB_ACCESS_REMOTE_READ;
2541 		qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2542 						   IB_ACCESS_REMOTE_WRITE;
2543 		qp_params.incoming_atomic_en = attr->qp_access_flags &
2544 					       IB_ACCESS_REMOTE_ATOMIC;
2545 	}
2546 
2547 	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2548 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2549 			return -EINVAL;
2550 
2551 		if (attr_mask & IB_QP_PATH_MTU) {
2552 			if (attr->path_mtu < IB_MTU_256 ||
2553 			    attr->path_mtu > IB_MTU_4096) {
2554 				pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2555 				rc = -EINVAL;
2556 				goto err;
2557 			}
2558 			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2559 				      ib_mtu_enum_to_int(iboe_get_mtu
2560 							 (dev->ndev->mtu)));
2561 		}
2562 
2563 		if (!qp->mtu) {
2564 			qp->mtu =
2565 			ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2566 			pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2567 		}
2568 
2569 		SET_FIELD(qp_params.modify_flags,
2570 			  QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2571 
2572 		qp_params.traffic_class_tos = grh->traffic_class;
2573 		qp_params.flow_label = grh->flow_label;
2574 		qp_params.hop_limit_ttl = grh->hop_limit;
2575 
2576 		qp->sgid_idx = grh->sgid_index;
2577 
2578 		rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2579 		if (rc) {
2580 			DP_ERR(dev,
2581 			       "modify qp: problems with GID index %d (rc=%d)\n",
2582 			       grh->sgid_index, rc);
2583 			return rc;
2584 		}
2585 
2586 		rc = qedr_get_dmac(dev, &attr->ah_attr,
2587 				   qp_params.remote_mac_addr);
2588 		if (rc)
2589 			return rc;
2590 
2591 		qp_params.use_local_mac = true;
2592 		ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2593 
2594 		DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2595 			 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2596 			 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2597 		DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2598 			 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2599 			 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2600 		DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2601 			 qp_params.remote_mac_addr);
2602 
2603 		qp_params.mtu = qp->mtu;
2604 		qp_params.lb_indication = false;
2605 	}
2606 
2607 	if (!qp_params.mtu) {
2608 		/* Stay with current MTU */
2609 		if (qp->mtu)
2610 			qp_params.mtu = qp->mtu;
2611 		else
2612 			qp_params.mtu =
2613 			    ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2614 	}
2615 
2616 	if (attr_mask & IB_QP_TIMEOUT) {
2617 		SET_FIELD(qp_params.modify_flags,
2618 			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2619 
2620 		/* The received timeout value is an exponent used like this:
2621 		 *    "12.7.34 LOCAL ACK TIMEOUT
2622 		 *    Value representing the transport (ACK) timeout for use by
2623 		 *    the remote, expressed as: 4.096 * 2^timeout [usec]"
2624 		 * The FW expects timeout in msec so we need to divide the usec
2625 		 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2626 		 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2627 		 * The value of zero means infinite so we use a 'max_t' to make
2628 		 * sure that sub 1 msec values will be configured as 1 msec.
2629 		 */
2630 		if (attr->timeout)
2631 			qp_params.ack_timeout =
2632 					1 << max_t(int, attr->timeout - 8, 0);
2633 		else
2634 			qp_params.ack_timeout = 0;
2635 	}
2636 
2637 	if (attr_mask & IB_QP_RETRY_CNT) {
2638 		SET_FIELD(qp_params.modify_flags,
2639 			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2640 		qp_params.retry_cnt = attr->retry_cnt;
2641 	}
2642 
2643 	if (attr_mask & IB_QP_RNR_RETRY) {
2644 		SET_FIELD(qp_params.modify_flags,
2645 			  QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2646 		qp_params.rnr_retry_cnt = attr->rnr_retry;
2647 	}
2648 
2649 	if (attr_mask & IB_QP_RQ_PSN) {
2650 		SET_FIELD(qp_params.modify_flags,
2651 			  QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2652 		qp_params.rq_psn = attr->rq_psn;
2653 		qp->rq_psn = attr->rq_psn;
2654 	}
2655 
2656 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2657 		if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2658 			rc = -EINVAL;
2659 			DP_ERR(dev,
2660 			       "unsupported max_rd_atomic=%d, supported=%d\n",
2661 			       attr->max_rd_atomic,
2662 			       dev->attr.max_qp_req_rd_atomic_resc);
2663 			goto err;
2664 		}
2665 
2666 		SET_FIELD(qp_params.modify_flags,
2667 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2668 		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2669 	}
2670 
2671 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2672 		SET_FIELD(qp_params.modify_flags,
2673 			  QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2674 		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2675 	}
2676 
2677 	if (attr_mask & IB_QP_SQ_PSN) {
2678 		SET_FIELD(qp_params.modify_flags,
2679 			  QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2680 		qp_params.sq_psn = attr->sq_psn;
2681 		qp->sq_psn = attr->sq_psn;
2682 	}
2683 
2684 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2685 		if (attr->max_dest_rd_atomic >
2686 		    dev->attr.max_qp_resp_rd_atomic_resc) {
2687 			DP_ERR(dev,
2688 			       "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2689 			       attr->max_dest_rd_atomic,
2690 			       dev->attr.max_qp_resp_rd_atomic_resc);
2691 
2692 			rc = -EINVAL;
2693 			goto err;
2694 		}
2695 
2696 		SET_FIELD(qp_params.modify_flags,
2697 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2698 		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2699 	}
2700 
2701 	if (attr_mask & IB_QP_DEST_QPN) {
2702 		SET_FIELD(qp_params.modify_flags,
2703 			  QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2704 
2705 		qp_params.dest_qp = attr->dest_qp_num;
2706 		qp->dest_qp_num = attr->dest_qp_num;
2707 	}
2708 
2709 	cur_state = qp->state;
2710 
2711 	/* Update the QP state before the actual ramrod to prevent a race with
2712 	 * fast path. Modifying the QP state to error will cause the device to
2713 	 * flush the CQEs and while polling the flushed CQEs will considered as
2714 	 * a potential issue if the QP isn't in error state.
2715 	 */
2716 	if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2717 	    !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2718 		qp->state = QED_ROCE_QP_STATE_ERR;
2719 
2720 	if (qp->qp_type != IB_QPT_GSI)
2721 		rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2722 					      qp->qed_qp, &qp_params);
2723 
2724 	if (attr_mask & IB_QP_STATE) {
2725 		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2726 			rc = qedr_update_qp_state(dev, qp, cur_state,
2727 						  qp_params.new_state);
2728 		qp->state = qp_params.new_state;
2729 	}
2730 
2731 err:
2732 	return rc;
2733 }
2734 
2735 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2736 {
2737 	int ib_qp_acc_flags = 0;
2738 
2739 	if (params->incoming_rdma_write_en)
2740 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2741 	if (params->incoming_rdma_read_en)
2742 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2743 	if (params->incoming_atomic_en)
2744 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2745 	ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2746 	return ib_qp_acc_flags;
2747 }
2748 
2749 int qedr_query_qp(struct ib_qp *ibqp,
2750 		  struct ib_qp_attr *qp_attr,
2751 		  int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2752 {
2753 	struct qed_rdma_query_qp_out_params params;
2754 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2755 	struct qedr_dev *dev = qp->dev;
2756 	int rc = 0;
2757 
2758 	memset(&params, 0, sizeof(params));
2759 
2760 	rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2761 	if (rc)
2762 		goto err;
2763 
2764 	memset(qp_attr, 0, sizeof(*qp_attr));
2765 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2766 
2767 	qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2768 	qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2769 	qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2770 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
2771 	qp_attr->rq_psn = params.rq_psn;
2772 	qp_attr->sq_psn = params.sq_psn;
2773 	qp_attr->dest_qp_num = params.dest_qp;
2774 
2775 	qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2776 
2777 	qp_attr->cap.max_send_wr = qp->sq.max_wr;
2778 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2779 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
2780 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2781 	qp_attr->cap.max_inline_data = dev->attr.max_inline;
2782 	qp_init_attr->cap = qp_attr->cap;
2783 
2784 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2785 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2786 			params.flow_label, qp->sgid_idx,
2787 			params.hop_limit_ttl, params.traffic_class_tos);
2788 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2789 	rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2790 	rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2791 	qp_attr->timeout = params.timeout;
2792 	qp_attr->rnr_retry = params.rnr_retry;
2793 	qp_attr->retry_cnt = params.retry_cnt;
2794 	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2795 	qp_attr->pkey_index = params.pkey_index;
2796 	qp_attr->port_num = 1;
2797 	rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2798 	rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2799 	qp_attr->alt_pkey_index = 0;
2800 	qp_attr->alt_port_num = 0;
2801 	qp_attr->alt_timeout = 0;
2802 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2803 
2804 	qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2805 	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2806 	qp_attr->max_rd_atomic = params.max_rd_atomic;
2807 	qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2808 
2809 	DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2810 		 qp_attr->cap.max_inline_data);
2811 
2812 err:
2813 	return rc;
2814 }
2815 
2816 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2817 {
2818 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2819 	struct qedr_dev *dev = qp->dev;
2820 	struct ib_qp_attr attr;
2821 	int attr_mask = 0;
2822 
2823 	DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2824 		 qp, qp->qp_type);
2825 
2826 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2827 		if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2828 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
2829 		    (qp->state != QED_ROCE_QP_STATE_INIT)) {
2830 
2831 			attr.qp_state = IB_QPS_ERR;
2832 			attr_mask |= IB_QP_STATE;
2833 
2834 			/* Change the QP state to ERROR */
2835 			qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2836 		}
2837 	} else {
2838 		/* If connection establishment started the WAIT_FOR_CONNECT
2839 		 * bit will be on and we need to Wait for the establishment
2840 		 * to complete before destroying the qp.
2841 		 */
2842 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2843 				     &qp->iwarp_cm_flags))
2844 			wait_for_completion(&qp->iwarp_cm_comp);
2845 
2846 		/* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2847 		 * bit will be on, and we need to wait for the disconnect to
2848 		 * complete before continuing. We can use the same completion,
2849 		 * iwarp_cm_comp, since this is the only place that waits for
2850 		 * this completion and it is sequential. In addition,
2851 		 * disconnect can't occur before the connection is fully
2852 		 * established, therefore if WAIT_FOR_DISCONNECT is on it
2853 		 * means WAIT_FOR_CONNECT is also on and the completion for
2854 		 * CONNECT already occurred.
2855 		 */
2856 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2857 				     &qp->iwarp_cm_flags))
2858 			wait_for_completion(&qp->iwarp_cm_comp);
2859 	}
2860 
2861 	if (qp->qp_type == IB_QPT_GSI)
2862 		qedr_destroy_gsi_qp(dev);
2863 
2864 	/* We need to remove the entry from the xarray before we release the
2865 	 * qp_id to avoid a race of the qp_id being reallocated and failing
2866 	 * on xa_insert
2867 	 */
2868 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2869 		xa_erase(&dev->qps, qp->qp_id);
2870 
2871 	qedr_free_qp_resources(dev, qp, udata);
2872 
2873 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2874 		qedr_iw_qp_rem_ref(&qp->ibqp);
2875 	else
2876 		kfree(qp);
2877 
2878 	return 0;
2879 }
2880 
2881 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2882 		   struct ib_udata *udata)
2883 {
2884 	struct qedr_ah *ah = get_qedr_ah(ibah);
2885 
2886 	rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2887 
2888 	return 0;
2889 }
2890 
2891 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2892 {
2893 	struct qedr_ah *ah = get_qedr_ah(ibah);
2894 
2895 	rdma_destroy_ah_attr(&ah->attr);
2896 	return 0;
2897 }
2898 
2899 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2900 {
2901 	struct qedr_pbl *pbl, *tmp;
2902 
2903 	if (info->pbl_table)
2904 		list_add_tail(&info->pbl_table->list_entry,
2905 			      &info->free_pbl_list);
2906 
2907 	if (!list_empty(&info->inuse_pbl_list))
2908 		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2909 
2910 	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2911 		list_del(&pbl->list_entry);
2912 		qedr_free_pbl(dev, &info->pbl_info, pbl);
2913 	}
2914 }
2915 
2916 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2917 			size_t page_list_len, bool two_layered)
2918 {
2919 	struct qedr_pbl *tmp;
2920 	int rc;
2921 
2922 	INIT_LIST_HEAD(&info->free_pbl_list);
2923 	INIT_LIST_HEAD(&info->inuse_pbl_list);
2924 
2925 	rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2926 				  page_list_len, two_layered);
2927 	if (rc)
2928 		goto done;
2929 
2930 	info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2931 	if (IS_ERR(info->pbl_table)) {
2932 		rc = PTR_ERR(info->pbl_table);
2933 		goto done;
2934 	}
2935 
2936 	DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2937 		 &info->pbl_table->pa);
2938 
2939 	/* in usual case we use 2 PBLs, so we add one to free
2940 	 * list and allocating another one
2941 	 */
2942 	tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2943 	if (IS_ERR(tmp)) {
2944 		DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2945 		goto done;
2946 	}
2947 
2948 	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2949 
2950 	DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2951 
2952 done:
2953 	if (rc)
2954 		free_mr_info(dev, info);
2955 
2956 	return rc;
2957 }
2958 
2959 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2960 			       u64 usr_addr, int acc, struct ib_udata *udata)
2961 {
2962 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2963 	struct qedr_mr *mr;
2964 	struct qedr_pd *pd;
2965 	int rc = -ENOMEM;
2966 
2967 	pd = get_qedr_pd(ibpd);
2968 	DP_DEBUG(dev, QEDR_MSG_MR,
2969 		 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2970 		 pd->pd_id, start, len, usr_addr, acc);
2971 
2972 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2973 		return ERR_PTR(-EINVAL);
2974 
2975 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2976 	if (!mr)
2977 		return ERR_PTR(rc);
2978 
2979 	mr->type = QEDR_MR_USER;
2980 
2981 	mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2982 	if (IS_ERR(mr->umem)) {
2983 		rc = -EFAULT;
2984 		goto err0;
2985 	}
2986 
2987 	rc = init_mr_info(dev, &mr->info,
2988 			  ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2989 	if (rc)
2990 		goto err1;
2991 
2992 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2993 			   &mr->info.pbl_info, PAGE_SHIFT);
2994 
2995 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2996 	if (rc) {
2997 		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2998 		goto err1;
2999 	}
3000 
3001 	/* Index only, 18 bit long, lkey = itid << 8 | key */
3002 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3003 	mr->hw_mr.key = 0;
3004 	mr->hw_mr.pd = pd->pd_id;
3005 	mr->hw_mr.local_read = 1;
3006 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3007 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3008 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3009 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3010 	mr->hw_mr.mw_bind = false;
3011 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3012 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3013 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3014 	mr->hw_mr.page_size_log = PAGE_SHIFT;
3015 	mr->hw_mr.length = len;
3016 	mr->hw_mr.vaddr = usr_addr;
3017 	mr->hw_mr.phy_mr = false;
3018 	mr->hw_mr.dma_mr = false;
3019 
3020 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3021 	if (rc) {
3022 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3023 		goto err2;
3024 	}
3025 
3026 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3027 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3028 	    mr->hw_mr.remote_atomic)
3029 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3030 
3031 	DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3032 		 mr->ibmr.lkey);
3033 	return &mr->ibmr;
3034 
3035 err2:
3036 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3037 err1:
3038 	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3039 err0:
3040 	kfree(mr);
3041 	return ERR_PTR(rc);
3042 }
3043 
3044 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3045 {
3046 	struct qedr_mr *mr = get_qedr_mr(ib_mr);
3047 	struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3048 	int rc = 0;
3049 
3050 	rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3051 	if (rc)
3052 		return rc;
3053 
3054 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3055 
3056 	if (mr->type != QEDR_MR_DMA)
3057 		free_mr_info(dev, &mr->info);
3058 
3059 	/* it could be user registered memory. */
3060 	ib_umem_release(mr->umem);
3061 
3062 	kfree(mr);
3063 
3064 	return rc;
3065 }
3066 
3067 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3068 				       int max_page_list_len)
3069 {
3070 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3071 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3072 	struct qedr_mr *mr;
3073 	int rc = -ENOMEM;
3074 
3075 	DP_DEBUG(dev, QEDR_MSG_MR,
3076 		 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3077 		 max_page_list_len);
3078 
3079 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3080 	if (!mr)
3081 		return ERR_PTR(rc);
3082 
3083 	mr->dev = dev;
3084 	mr->type = QEDR_MR_FRMR;
3085 
3086 	rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3087 	if (rc)
3088 		goto err0;
3089 
3090 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3091 	if (rc) {
3092 		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
3093 		goto err0;
3094 	}
3095 
3096 	/* Index only, 18 bit long, lkey = itid << 8 | key */
3097 	mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3098 	mr->hw_mr.key = 0;
3099 	mr->hw_mr.pd = pd->pd_id;
3100 	mr->hw_mr.local_read = 1;
3101 	mr->hw_mr.local_write = 0;
3102 	mr->hw_mr.remote_read = 0;
3103 	mr->hw_mr.remote_write = 0;
3104 	mr->hw_mr.remote_atomic = 0;
3105 	mr->hw_mr.mw_bind = false;
3106 	mr->hw_mr.pbl_ptr = 0;
3107 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3108 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3109 	mr->hw_mr.length = 0;
3110 	mr->hw_mr.vaddr = 0;
3111 	mr->hw_mr.phy_mr = true;
3112 	mr->hw_mr.dma_mr = false;
3113 
3114 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3115 	if (rc) {
3116 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3117 		goto err1;
3118 	}
3119 
3120 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3121 	mr->ibmr.rkey = mr->ibmr.lkey;
3122 
3123 	DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3124 	return mr;
3125 
3126 err1:
3127 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3128 err0:
3129 	kfree(mr);
3130 	return ERR_PTR(rc);
3131 }
3132 
3133 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3134 			    u32 max_num_sg)
3135 {
3136 	struct qedr_mr *mr;
3137 
3138 	if (mr_type != IB_MR_TYPE_MEM_REG)
3139 		return ERR_PTR(-EINVAL);
3140 
3141 	mr = __qedr_alloc_mr(ibpd, max_num_sg);
3142 
3143 	if (IS_ERR(mr))
3144 		return ERR_PTR(-EINVAL);
3145 
3146 	return &mr->ibmr;
3147 }
3148 
3149 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3150 {
3151 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3152 	struct qedr_pbl *pbl_table;
3153 	struct regpair *pbe;
3154 	u32 pbes_in_page;
3155 
3156 	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3157 		DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3158 		return -ENOMEM;
3159 	}
3160 
3161 	DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3162 		 mr->npages, addr);
3163 
3164 	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3165 	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3166 	pbe = (struct regpair *)pbl_table->va;
3167 	pbe +=  mr->npages % pbes_in_page;
3168 	pbe->lo = cpu_to_le32((u32)addr);
3169 	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3170 
3171 	mr->npages++;
3172 
3173 	return 0;
3174 }
3175 
3176 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3177 {
3178 	int work = info->completed - info->completed_handled - 1;
3179 
3180 	DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3181 	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3182 		struct qedr_pbl *pbl;
3183 
3184 		/* Free all the page list that are possible to be freed
3185 		 * (all the ones that were invalidated), under the assumption
3186 		 * that if an FMR was completed successfully that means that
3187 		 * if there was an invalidate operation before it also ended
3188 		 */
3189 		pbl = list_first_entry(&info->inuse_pbl_list,
3190 				       struct qedr_pbl, list_entry);
3191 		list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3192 		info->completed_handled++;
3193 	}
3194 }
3195 
3196 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3197 		   int sg_nents, unsigned int *sg_offset)
3198 {
3199 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3200 
3201 	mr->npages = 0;
3202 
3203 	handle_completed_mrs(mr->dev, &mr->info);
3204 	return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3205 }
3206 
3207 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3208 {
3209 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3210 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3211 	struct qedr_mr *mr;
3212 	int rc;
3213 
3214 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3215 	if (!mr)
3216 		return ERR_PTR(-ENOMEM);
3217 
3218 	mr->type = QEDR_MR_DMA;
3219 
3220 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3221 	if (rc) {
3222 		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
3223 		goto err1;
3224 	}
3225 
3226 	/* index only, 18 bit long, lkey = itid << 8 | key */
3227 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3228 	mr->hw_mr.pd = pd->pd_id;
3229 	mr->hw_mr.local_read = 1;
3230 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3231 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3232 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3233 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3234 	mr->hw_mr.dma_mr = true;
3235 
3236 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3237 	if (rc) {
3238 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3239 		goto err2;
3240 	}
3241 
3242 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3243 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3244 	    mr->hw_mr.remote_atomic)
3245 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3246 
3247 	DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3248 	return &mr->ibmr;
3249 
3250 err2:
3251 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3252 err1:
3253 	kfree(mr);
3254 	return ERR_PTR(rc);
3255 }
3256 
3257 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3258 {
3259 	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3260 }
3261 
3262 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3263 {
3264 	int i, len = 0;
3265 
3266 	for (i = 0; i < num_sge; i++)
3267 		len += sg_list[i].length;
3268 
3269 	return len;
3270 }
3271 
3272 static void swap_wqe_data64(u64 *p)
3273 {
3274 	int i;
3275 
3276 	for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3277 		*p = cpu_to_be64(cpu_to_le64(*p));
3278 }
3279 
3280 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3281 				       struct qedr_qp *qp, u8 *wqe_size,
3282 				       const struct ib_send_wr *wr,
3283 				       const struct ib_send_wr **bad_wr,
3284 				       u8 *bits, u8 bit)
3285 {
3286 	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3287 	char *seg_prt, *wqe;
3288 	int i, seg_siz;
3289 
3290 	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3291 		DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3292 		*bad_wr = wr;
3293 		return 0;
3294 	}
3295 
3296 	if (!data_size)
3297 		return data_size;
3298 
3299 	*bits |= bit;
3300 
3301 	seg_prt = NULL;
3302 	wqe = NULL;
3303 	seg_siz = 0;
3304 
3305 	/* Copy data inline */
3306 	for (i = 0; i < wr->num_sge; i++) {
3307 		u32 len = wr->sg_list[i].length;
3308 		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3309 
3310 		while (len > 0) {
3311 			u32 cur;
3312 
3313 			/* New segment required */
3314 			if (!seg_siz) {
3315 				wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3316 				seg_prt = wqe;
3317 				seg_siz = sizeof(struct rdma_sq_common_wqe);
3318 				(*wqe_size)++;
3319 			}
3320 
3321 			/* Calculate currently allowed length */
3322 			cur = min_t(u32, len, seg_siz);
3323 			memcpy(seg_prt, src, cur);
3324 
3325 			/* Update segment variables */
3326 			seg_prt += cur;
3327 			seg_siz -= cur;
3328 
3329 			/* Update sge variables */
3330 			src += cur;
3331 			len -= cur;
3332 
3333 			/* Swap fully-completed segments */
3334 			if (!seg_siz)
3335 				swap_wqe_data64((u64 *)wqe);
3336 		}
3337 	}
3338 
3339 	/* swap last not completed segment */
3340 	if (seg_siz)
3341 		swap_wqe_data64((u64 *)wqe);
3342 
3343 	return data_size;
3344 }
3345 
3346 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
3347 	do {							\
3348 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3349 		(sge)->length = cpu_to_le32(vlength);		\
3350 		(sge)->flags = cpu_to_le32(vflags);		\
3351 	} while (0)
3352 
3353 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
3354 	do {							\
3355 		DMA_REGPAIR_LE(hdr->wr_id, vwr_id);		\
3356 		(hdr)->num_sges = num_sge;			\
3357 	} while (0)
3358 
3359 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
3360 	do {							\
3361 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3362 		(sge)->length = cpu_to_le32(vlength);		\
3363 		(sge)->l_key = cpu_to_le32(vlkey);		\
3364 	} while (0)
3365 
3366 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3367 				const struct ib_send_wr *wr)
3368 {
3369 	u32 data_size = 0;
3370 	int i;
3371 
3372 	for (i = 0; i < wr->num_sge; i++) {
3373 		struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3374 
3375 		DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3376 		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3377 		sge->length = cpu_to_le32(wr->sg_list[i].length);
3378 		data_size += wr->sg_list[i].length;
3379 	}
3380 
3381 	if (wqe_size)
3382 		*wqe_size += wr->num_sge;
3383 
3384 	return data_size;
3385 }
3386 
3387 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3388 				     struct qedr_qp *qp,
3389 				     struct rdma_sq_rdma_wqe_1st *rwqe,
3390 				     struct rdma_sq_rdma_wqe_2nd *rwqe2,
3391 				     const struct ib_send_wr *wr,
3392 				     const struct ib_send_wr **bad_wr)
3393 {
3394 	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3395 	DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3396 
3397 	if (wr->send_flags & IB_SEND_INLINE &&
3398 	    (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3399 	     wr->opcode == IB_WR_RDMA_WRITE)) {
3400 		u8 flags = 0;
3401 
3402 		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3403 		return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3404 						   bad_wr, &rwqe->flags, flags);
3405 	}
3406 
3407 	return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3408 }
3409 
3410 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3411 				     struct qedr_qp *qp,
3412 				     struct rdma_sq_send_wqe_1st *swqe,
3413 				     struct rdma_sq_send_wqe_2st *swqe2,
3414 				     const struct ib_send_wr *wr,
3415 				     const struct ib_send_wr **bad_wr)
3416 {
3417 	memset(swqe2, 0, sizeof(*swqe2));
3418 	if (wr->send_flags & IB_SEND_INLINE) {
3419 		u8 flags = 0;
3420 
3421 		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3422 		return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3423 						   bad_wr, &swqe->flags, flags);
3424 	}
3425 
3426 	return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3427 }
3428 
3429 static int qedr_prepare_reg(struct qedr_qp *qp,
3430 			    struct rdma_sq_fmr_wqe_1st *fwqe1,
3431 			    const struct ib_reg_wr *wr)
3432 {
3433 	struct qedr_mr *mr = get_qedr_mr(wr->mr);
3434 	struct rdma_sq_fmr_wqe_2nd *fwqe2;
3435 
3436 	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3437 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3438 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3439 	fwqe1->l_key = wr->key;
3440 
3441 	fwqe2->access_ctrl = 0;
3442 
3443 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3444 		   !!(wr->access & IB_ACCESS_REMOTE_READ));
3445 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3446 		   !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3447 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3448 		   !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3449 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3450 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3451 		   !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3452 	fwqe2->fmr_ctrl = 0;
3453 
3454 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3455 		   ilog2(mr->ibmr.page_size) - 12);
3456 
3457 	fwqe2->length_hi = 0;
3458 	fwqe2->length_lo = mr->ibmr.length;
3459 	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3460 	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3461 
3462 	qp->wqe_wr_id[qp->sq.prod].mr = mr;
3463 
3464 	return 0;
3465 }
3466 
3467 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3468 {
3469 	switch (opcode) {
3470 	case IB_WR_RDMA_WRITE:
3471 	case IB_WR_RDMA_WRITE_WITH_IMM:
3472 		return IB_WC_RDMA_WRITE;
3473 	case IB_WR_SEND_WITH_IMM:
3474 	case IB_WR_SEND:
3475 	case IB_WR_SEND_WITH_INV:
3476 		return IB_WC_SEND;
3477 	case IB_WR_RDMA_READ:
3478 	case IB_WR_RDMA_READ_WITH_INV:
3479 		return IB_WC_RDMA_READ;
3480 	case IB_WR_ATOMIC_CMP_AND_SWP:
3481 		return IB_WC_COMP_SWAP;
3482 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3483 		return IB_WC_FETCH_ADD;
3484 	case IB_WR_REG_MR:
3485 		return IB_WC_REG_MR;
3486 	case IB_WR_LOCAL_INV:
3487 		return IB_WC_LOCAL_INV;
3488 	default:
3489 		return IB_WC_SEND;
3490 	}
3491 }
3492 
3493 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3494 				      const struct ib_send_wr *wr)
3495 {
3496 	int wq_is_full, err_wr, pbl_is_full;
3497 	struct qedr_dev *dev = qp->dev;
3498 
3499 	/* prevent SQ overflow and/or processing of a bad WR */
3500 	err_wr = wr->num_sge > qp->sq.max_sges;
3501 	wq_is_full = qedr_wq_is_full(&qp->sq);
3502 	pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3503 		      QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3504 	if (wq_is_full || err_wr || pbl_is_full) {
3505 		if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3506 			DP_ERR(dev,
3507 			       "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3508 			       qp);
3509 			qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3510 		}
3511 
3512 		if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3513 			DP_ERR(dev,
3514 			       "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3515 			       qp);
3516 			qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3517 		}
3518 
3519 		if (pbl_is_full &&
3520 		    !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3521 			DP_ERR(dev,
3522 			       "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3523 			       qp);
3524 			qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3525 		}
3526 		return false;
3527 	}
3528 	return true;
3529 }
3530 
3531 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3532 			    const struct ib_send_wr **bad_wr)
3533 {
3534 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3535 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3536 	struct rdma_sq_atomic_wqe_1st *awqe1;
3537 	struct rdma_sq_atomic_wqe_2nd *awqe2;
3538 	struct rdma_sq_atomic_wqe_3rd *awqe3;
3539 	struct rdma_sq_send_wqe_2st *swqe2;
3540 	struct rdma_sq_local_inv_wqe *iwqe;
3541 	struct rdma_sq_rdma_wqe_2nd *rwqe2;
3542 	struct rdma_sq_send_wqe_1st *swqe;
3543 	struct rdma_sq_rdma_wqe_1st *rwqe;
3544 	struct rdma_sq_fmr_wqe_1st *fwqe1;
3545 	struct rdma_sq_common_wqe *wqe;
3546 	u32 length;
3547 	int rc = 0;
3548 	bool comp;
3549 
3550 	if (!qedr_can_post_send(qp, wr)) {
3551 		*bad_wr = wr;
3552 		return -ENOMEM;
3553 	}
3554 
3555 	wqe = qed_chain_produce(&qp->sq.pbl);
3556 	qp->wqe_wr_id[qp->sq.prod].signaled =
3557 		!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3558 
3559 	wqe->flags = 0;
3560 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3561 		   !!(wr->send_flags & IB_SEND_SOLICITED));
3562 	comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3563 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3564 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3565 		   !!(wr->send_flags & IB_SEND_FENCE));
3566 	wqe->prev_wqe_size = qp->prev_wqe_size;
3567 
3568 	qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3569 
3570 	switch (wr->opcode) {
3571 	case IB_WR_SEND_WITH_IMM:
3572 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3573 			rc = -EINVAL;
3574 			*bad_wr = wr;
3575 			break;
3576 		}
3577 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3578 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3579 		swqe->wqe_size = 2;
3580 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3581 
3582 		swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3583 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3584 						   wr, bad_wr);
3585 		swqe->length = cpu_to_le32(length);
3586 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3587 		qp->prev_wqe_size = swqe->wqe_size;
3588 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3589 		break;
3590 	case IB_WR_SEND:
3591 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3592 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3593 
3594 		swqe->wqe_size = 2;
3595 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3596 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3597 						   wr, bad_wr);
3598 		swqe->length = cpu_to_le32(length);
3599 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3600 		qp->prev_wqe_size = swqe->wqe_size;
3601 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3602 		break;
3603 	case IB_WR_SEND_WITH_INV:
3604 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3605 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3606 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3607 		swqe->wqe_size = 2;
3608 		swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3609 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3610 						   wr, bad_wr);
3611 		swqe->length = cpu_to_le32(length);
3612 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3613 		qp->prev_wqe_size = swqe->wqe_size;
3614 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3615 		break;
3616 
3617 	case IB_WR_RDMA_WRITE_WITH_IMM:
3618 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3619 			rc = -EINVAL;
3620 			*bad_wr = wr;
3621 			break;
3622 		}
3623 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3624 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3625 
3626 		rwqe->wqe_size = 2;
3627 		rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3628 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3629 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3630 						   wr, bad_wr);
3631 		rwqe->length = cpu_to_le32(length);
3632 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3633 		qp->prev_wqe_size = rwqe->wqe_size;
3634 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3635 		break;
3636 	case IB_WR_RDMA_WRITE:
3637 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3638 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3639 
3640 		rwqe->wqe_size = 2;
3641 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3642 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3643 						   wr, bad_wr);
3644 		rwqe->length = cpu_to_le32(length);
3645 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3646 		qp->prev_wqe_size = rwqe->wqe_size;
3647 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3648 		break;
3649 	case IB_WR_RDMA_READ_WITH_INV:
3650 		SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3651 		fallthrough;	/* same is identical to RDMA READ */
3652 
3653 	case IB_WR_RDMA_READ:
3654 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3655 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3656 
3657 		rwqe->wqe_size = 2;
3658 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3659 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3660 						   wr, bad_wr);
3661 		rwqe->length = cpu_to_le32(length);
3662 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3663 		qp->prev_wqe_size = rwqe->wqe_size;
3664 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3665 		break;
3666 
3667 	case IB_WR_ATOMIC_CMP_AND_SWP:
3668 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3669 		awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3670 		awqe1->wqe_size = 4;
3671 
3672 		awqe2 = qed_chain_produce(&qp->sq.pbl);
3673 		DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3674 		awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3675 
3676 		awqe3 = qed_chain_produce(&qp->sq.pbl);
3677 
3678 		if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3679 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3680 			DMA_REGPAIR_LE(awqe3->swap_data,
3681 				       atomic_wr(wr)->compare_add);
3682 		} else {
3683 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3684 			DMA_REGPAIR_LE(awqe3->swap_data,
3685 				       atomic_wr(wr)->swap);
3686 			DMA_REGPAIR_LE(awqe3->cmp_data,
3687 				       atomic_wr(wr)->compare_add);
3688 		}
3689 
3690 		qedr_prepare_sq_sges(qp, NULL, wr);
3691 
3692 		qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3693 		qp->prev_wqe_size = awqe1->wqe_size;
3694 		break;
3695 
3696 	case IB_WR_LOCAL_INV:
3697 		iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3698 		iwqe->wqe_size = 1;
3699 
3700 		iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3701 		iwqe->inv_l_key = wr->ex.invalidate_rkey;
3702 		qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3703 		qp->prev_wqe_size = iwqe->wqe_size;
3704 		break;
3705 	case IB_WR_REG_MR:
3706 		DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3707 		wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3708 		fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3709 		fwqe1->wqe_size = 2;
3710 
3711 		rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3712 		if (rc) {
3713 			DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3714 			*bad_wr = wr;
3715 			break;
3716 		}
3717 
3718 		qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3719 		qp->prev_wqe_size = fwqe1->wqe_size;
3720 		break;
3721 	default:
3722 		DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3723 		rc = -EINVAL;
3724 		*bad_wr = wr;
3725 		break;
3726 	}
3727 
3728 	if (*bad_wr) {
3729 		u16 value;
3730 
3731 		/* Restore prod to its position before
3732 		 * this WR was processed
3733 		 */
3734 		value = le16_to_cpu(qp->sq.db_data.data.value);
3735 		qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3736 
3737 		/* Restore prev_wqe_size */
3738 		qp->prev_wqe_size = wqe->prev_wqe_size;
3739 		rc = -EINVAL;
3740 		DP_ERR(dev, "POST SEND FAILED\n");
3741 	}
3742 
3743 	return rc;
3744 }
3745 
3746 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3747 		   const struct ib_send_wr **bad_wr)
3748 {
3749 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3750 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3751 	unsigned long flags;
3752 	int rc = 0;
3753 
3754 	*bad_wr = NULL;
3755 
3756 	if (qp->qp_type == IB_QPT_GSI)
3757 		return qedr_gsi_post_send(ibqp, wr, bad_wr);
3758 
3759 	spin_lock_irqsave(&qp->q_lock, flags);
3760 
3761 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
3762 		if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3763 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
3764 		    (qp->state != QED_ROCE_QP_STATE_SQD)) {
3765 			spin_unlock_irqrestore(&qp->q_lock, flags);
3766 			*bad_wr = wr;
3767 			DP_DEBUG(dev, QEDR_MSG_CQ,
3768 				 "QP in wrong state! QP icid=0x%x state %d\n",
3769 				 qp->icid, qp->state);
3770 			return -EINVAL;
3771 		}
3772 	}
3773 
3774 	while (wr) {
3775 		rc = __qedr_post_send(ibqp, wr, bad_wr);
3776 		if (rc)
3777 			break;
3778 
3779 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3780 
3781 		qedr_inc_sw_prod(&qp->sq);
3782 
3783 		qp->sq.db_data.data.value++;
3784 
3785 		wr = wr->next;
3786 	}
3787 
3788 	/* Trigger doorbell
3789 	 * If there was a failure in the first WR then it will be triggered in
3790 	 * vane. However this is not harmful (as long as the producer value is
3791 	 * unchanged). For performance reasons we avoid checking for this
3792 	 * redundant doorbell.
3793 	 *
3794 	 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3795 	 * soon as we give the doorbell, we could get a completion
3796 	 * for this wr, therefore we need to make sure that the
3797 	 * memory is updated before giving the doorbell.
3798 	 * During qedr_poll_cq, rmb is called before accessing the
3799 	 * cqe. This covers for the smp_rmb as well.
3800 	 */
3801 	smp_wmb();
3802 	writel(qp->sq.db_data.raw, qp->sq.db);
3803 
3804 	spin_unlock_irqrestore(&qp->q_lock, flags);
3805 
3806 	return rc;
3807 }
3808 
3809 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3810 {
3811 	u32 used;
3812 
3813 	/* Calculate number of elements used based on producer
3814 	 * count and consumer count and subtract it from max
3815 	 * work request supported so that we get elements left.
3816 	 */
3817 	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3818 
3819 	return hw_srq->max_wr - used;
3820 }
3821 
3822 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3823 		       const struct ib_recv_wr **bad_wr)
3824 {
3825 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
3826 	struct qedr_srq_hwq_info *hw_srq;
3827 	struct qedr_dev *dev = srq->dev;
3828 	struct qed_chain *pbl;
3829 	unsigned long flags;
3830 	int status = 0;
3831 	u32 num_sge;
3832 
3833 	spin_lock_irqsave(&srq->lock, flags);
3834 
3835 	hw_srq = &srq->hw_srq;
3836 	pbl = &srq->hw_srq.pbl;
3837 	while (wr) {
3838 		struct rdma_srq_wqe_header *hdr;
3839 		int i;
3840 
3841 		if (!qedr_srq_elem_left(hw_srq) ||
3842 		    wr->num_sge > srq->hw_srq.max_sges) {
3843 			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
3844 			       hw_srq->wr_prod_cnt,
3845 			       atomic_read(&hw_srq->wr_cons_cnt),
3846 			       wr->num_sge, srq->hw_srq.max_sges);
3847 			status = -ENOMEM;
3848 			*bad_wr = wr;
3849 			break;
3850 		}
3851 
3852 		hdr = qed_chain_produce(pbl);
3853 		num_sge = wr->num_sge;
3854 		/* Set number of sge and work request id in header */
3855 		SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3856 
3857 		srq->hw_srq.wr_prod_cnt++;
3858 		hw_srq->wqe_prod++;
3859 		hw_srq->sge_prod++;
3860 
3861 		DP_DEBUG(dev, QEDR_MSG_SRQ,
3862 			 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3863 			 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3864 
3865 		for (i = 0; i < wr->num_sge; i++) {
3866 			struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3867 
3868 			/* Set SGE length, lkey and address */
3869 			SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3870 				    wr->sg_list[i].length, wr->sg_list[i].lkey);
3871 
3872 			DP_DEBUG(dev, QEDR_MSG_SRQ,
3873 				 "[%d]: len %d key %x addr %x:%x\n",
3874 				 i, srq_sge->length, srq_sge->l_key,
3875 				 srq_sge->addr.hi, srq_sge->addr.lo);
3876 			hw_srq->sge_prod++;
3877 		}
3878 
3879 		/* Update WQE and SGE information before
3880 		 * updating producer.
3881 		 */
3882 		dma_wmb();
3883 
3884 		/* SRQ producer is 8 bytes. Need to update SGE producer index
3885 		 * in first 4 bytes and need to update WQE producer in
3886 		 * next 4 bytes.
3887 		 */
3888 		srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3889 		/* Make sure sge producer is updated first */
3890 		dma_wmb();
3891 		srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3892 
3893 		wr = wr->next;
3894 	}
3895 
3896 	DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3897 		 qed_chain_get_elem_left(pbl));
3898 	spin_unlock_irqrestore(&srq->lock, flags);
3899 
3900 	return status;
3901 }
3902 
3903 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3904 		   const struct ib_recv_wr **bad_wr)
3905 {
3906 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3907 	struct qedr_dev *dev = qp->dev;
3908 	unsigned long flags;
3909 	int status = 0;
3910 
3911 	if (qp->qp_type == IB_QPT_GSI)
3912 		return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3913 
3914 	spin_lock_irqsave(&qp->q_lock, flags);
3915 
3916 	if (qp->state == QED_ROCE_QP_STATE_RESET) {
3917 		spin_unlock_irqrestore(&qp->q_lock, flags);
3918 		*bad_wr = wr;
3919 		return -EINVAL;
3920 	}
3921 
3922 	while (wr) {
3923 		int i;
3924 
3925 		if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3926 		    QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3927 		    wr->num_sge > qp->rq.max_sges) {
3928 			DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3929 			       qed_chain_get_elem_left_u32(&qp->rq.pbl),
3930 			       QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3931 			       qp->rq.max_sges);
3932 			status = -ENOMEM;
3933 			*bad_wr = wr;
3934 			break;
3935 		}
3936 		for (i = 0; i < wr->num_sge; i++) {
3937 			u32 flags = 0;
3938 			struct rdma_rq_sge *rqe =
3939 			    qed_chain_produce(&qp->rq.pbl);
3940 
3941 			/* First one must include the number
3942 			 * of SGE in the list
3943 			 */
3944 			if (!i)
3945 				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3946 					  wr->num_sge);
3947 
3948 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3949 				  wr->sg_list[i].lkey);
3950 
3951 			RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3952 				   wr->sg_list[i].length, flags);
3953 		}
3954 
3955 		/* Special case of no sges. FW requires between 1-4 sges...
3956 		 * in this case we need to post 1 sge with length zero. this is
3957 		 * because rdma write with immediate consumes an RQ.
3958 		 */
3959 		if (!wr->num_sge) {
3960 			u32 flags = 0;
3961 			struct rdma_rq_sge *rqe =
3962 			    qed_chain_produce(&qp->rq.pbl);
3963 
3964 			/* First one must include the number
3965 			 * of SGE in the list
3966 			 */
3967 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3968 			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3969 
3970 			RQ_SGE_SET(rqe, 0, 0, flags);
3971 			i = 1;
3972 		}
3973 
3974 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3975 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3976 
3977 		qedr_inc_sw_prod(&qp->rq);
3978 
3979 		/* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3980 		 * soon as we give the doorbell, we could get a completion
3981 		 * for this wr, therefore we need to make sure that the
3982 		 * memory is update before giving the doorbell.
3983 		 * During qedr_poll_cq, rmb is called before accessing the
3984 		 * cqe. This covers for the smp_rmb as well.
3985 		 */
3986 		smp_wmb();
3987 
3988 		qp->rq.db_data.data.value++;
3989 
3990 		writel(qp->rq.db_data.raw, qp->rq.db);
3991 
3992 		if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3993 			writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3994 		}
3995 
3996 		wr = wr->next;
3997 	}
3998 
3999 	spin_unlock_irqrestore(&qp->q_lock, flags);
4000 
4001 	return status;
4002 }
4003 
4004 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4005 {
4006 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4007 
4008 	return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4009 		cq->pbl_toggle;
4010 }
4011 
4012 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4013 {
4014 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4015 	struct qedr_qp *qp;
4016 
4017 	qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4018 						   resp_cqe->qp_handle.lo,
4019 						   u64);
4020 	return qp;
4021 }
4022 
4023 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4024 {
4025 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4026 
4027 	return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4028 }
4029 
4030 /* Return latest CQE (needs processing) */
4031 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4032 {
4033 	return cq->latest_cqe;
4034 }
4035 
4036 /* In fmr we need to increase the number of fmr completed counter for the fmr
4037  * algorithm determining whether we can free a pbl or not.
4038  * we need to perform this whether the work request was signaled or not. for
4039  * this purpose we call this function from the condition that checks if a wr
4040  * should be skipped, to make sure we don't miss it ( possibly this fmr
4041  * operation was not signalted)
4042  */
4043 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4044 {
4045 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4046 		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4047 }
4048 
4049 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4050 		       struct qedr_cq *cq, int num_entries,
4051 		       struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4052 		       int force)
4053 {
4054 	u16 cnt = 0;
4055 
4056 	while (num_entries && qp->sq.wqe_cons != hw_cons) {
4057 		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4058 			qedr_chk_if_fmr(qp);
4059 			/* skip WC */
4060 			goto next_cqe;
4061 		}
4062 
4063 		/* fill WC */
4064 		wc->status = status;
4065 		wc->vendor_err = 0;
4066 		wc->wc_flags = 0;
4067 		wc->src_qp = qp->id;
4068 		wc->qp = &qp->ibqp;
4069 
4070 		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4071 		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4072 
4073 		switch (wc->opcode) {
4074 		case IB_WC_RDMA_WRITE:
4075 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4076 			break;
4077 		case IB_WC_COMP_SWAP:
4078 		case IB_WC_FETCH_ADD:
4079 			wc->byte_len = 8;
4080 			break;
4081 		case IB_WC_REG_MR:
4082 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4083 			break;
4084 		case IB_WC_RDMA_READ:
4085 		case IB_WC_SEND:
4086 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4087 			break;
4088 		default:
4089 			break;
4090 		}
4091 
4092 		num_entries--;
4093 		wc++;
4094 		cnt++;
4095 next_cqe:
4096 		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4097 			qed_chain_consume(&qp->sq.pbl);
4098 		qedr_inc_sw_cons(&qp->sq);
4099 	}
4100 
4101 	return cnt;
4102 }
4103 
4104 static int qedr_poll_cq_req(struct qedr_dev *dev,
4105 			    struct qedr_qp *qp, struct qedr_cq *cq,
4106 			    int num_entries, struct ib_wc *wc,
4107 			    struct rdma_cqe_requester *req)
4108 {
4109 	int cnt = 0;
4110 
4111 	switch (req->status) {
4112 	case RDMA_CQE_REQ_STS_OK:
4113 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4114 				  IB_WC_SUCCESS, 0);
4115 		break;
4116 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4117 		if (qp->state != QED_ROCE_QP_STATE_ERR)
4118 			DP_DEBUG(dev, QEDR_MSG_CQ,
4119 				 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4120 				 cq->icid, qp->icid);
4121 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4122 				  IB_WC_WR_FLUSH_ERR, 1);
4123 		break;
4124 	default:
4125 		/* process all WQE before the cosumer */
4126 		qp->state = QED_ROCE_QP_STATE_ERR;
4127 		cnt = process_req(dev, qp, cq, num_entries, wc,
4128 				  req->sq_cons - 1, IB_WC_SUCCESS, 0);
4129 		wc += cnt;
4130 		/* if we have extra WC fill it with actual error info */
4131 		if (cnt < num_entries) {
4132 			enum ib_wc_status wc_status;
4133 
4134 			switch (req->status) {
4135 			case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4136 				DP_ERR(dev,
4137 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4138 				       cq->icid, qp->icid);
4139 				wc_status = IB_WC_BAD_RESP_ERR;
4140 				break;
4141 			case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4142 				DP_ERR(dev,
4143 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4144 				       cq->icid, qp->icid);
4145 				wc_status = IB_WC_LOC_LEN_ERR;
4146 				break;
4147 			case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4148 				DP_ERR(dev,
4149 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4150 				       cq->icid, qp->icid);
4151 				wc_status = IB_WC_LOC_QP_OP_ERR;
4152 				break;
4153 			case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4154 				DP_ERR(dev,
4155 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4156 				       cq->icid, qp->icid);
4157 				wc_status = IB_WC_LOC_PROT_ERR;
4158 				break;
4159 			case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4160 				DP_ERR(dev,
4161 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4162 				       cq->icid, qp->icid);
4163 				wc_status = IB_WC_MW_BIND_ERR;
4164 				break;
4165 			case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4166 				DP_ERR(dev,
4167 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4168 				       cq->icid, qp->icid);
4169 				wc_status = IB_WC_REM_INV_REQ_ERR;
4170 				break;
4171 			case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4172 				DP_ERR(dev,
4173 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4174 				       cq->icid, qp->icid);
4175 				wc_status = IB_WC_REM_ACCESS_ERR;
4176 				break;
4177 			case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4178 				DP_ERR(dev,
4179 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4180 				       cq->icid, qp->icid);
4181 				wc_status = IB_WC_REM_OP_ERR;
4182 				break;
4183 			case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4184 				DP_ERR(dev,
4185 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4186 				       cq->icid, qp->icid);
4187 				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4188 				break;
4189 			case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4190 				DP_ERR(dev,
4191 				       "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4192 				       cq->icid, qp->icid);
4193 				wc_status = IB_WC_RETRY_EXC_ERR;
4194 				break;
4195 			default:
4196 				DP_ERR(dev,
4197 				       "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4198 				       cq->icid, qp->icid);
4199 				wc_status = IB_WC_GENERAL_ERR;
4200 			}
4201 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4202 					   wc_status, 1);
4203 		}
4204 	}
4205 
4206 	return cnt;
4207 }
4208 
4209 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4210 {
4211 	switch (status) {
4212 	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4213 		return IB_WC_LOC_ACCESS_ERR;
4214 	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4215 		return IB_WC_LOC_LEN_ERR;
4216 	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4217 		return IB_WC_LOC_QP_OP_ERR;
4218 	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4219 		return IB_WC_LOC_PROT_ERR;
4220 	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4221 		return IB_WC_MW_BIND_ERR;
4222 	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4223 		return IB_WC_REM_INV_RD_REQ_ERR;
4224 	case RDMA_CQE_RESP_STS_OK:
4225 		return IB_WC_SUCCESS;
4226 	default:
4227 		return IB_WC_GENERAL_ERR;
4228 	}
4229 }
4230 
4231 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4232 					  struct ib_wc *wc)
4233 {
4234 	wc->status = IB_WC_SUCCESS;
4235 	wc->byte_len = le32_to_cpu(resp->length);
4236 
4237 	if (resp->flags & QEDR_RESP_IMM) {
4238 		wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4239 		wc->wc_flags |= IB_WC_WITH_IMM;
4240 
4241 		if (resp->flags & QEDR_RESP_RDMA)
4242 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4243 
4244 		if (resp->flags & QEDR_RESP_INV)
4245 			return -EINVAL;
4246 
4247 	} else if (resp->flags & QEDR_RESP_INV) {
4248 		wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4249 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4250 
4251 		if (resp->flags & QEDR_RESP_RDMA)
4252 			return -EINVAL;
4253 
4254 	} else if (resp->flags & QEDR_RESP_RDMA) {
4255 		return -EINVAL;
4256 	}
4257 
4258 	return 0;
4259 }
4260 
4261 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4262 			       struct qedr_cq *cq, struct ib_wc *wc,
4263 			       struct rdma_cqe_responder *resp, u64 wr_id)
4264 {
4265 	/* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4266 	wc->opcode = IB_WC_RECV;
4267 	wc->wc_flags = 0;
4268 
4269 	if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4270 		if (qedr_set_ok_cqe_resp_wc(resp, wc))
4271 			DP_ERR(dev,
4272 			       "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4273 			       cq, cq->icid, resp->flags);
4274 
4275 	} else {
4276 		wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4277 		if (wc->status == IB_WC_GENERAL_ERR)
4278 			DP_ERR(dev,
4279 			       "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4280 			       cq, cq->icid, resp->status);
4281 	}
4282 
4283 	/* Fill the rest of the WC */
4284 	wc->vendor_err = 0;
4285 	wc->src_qp = qp->id;
4286 	wc->qp = &qp->ibqp;
4287 	wc->wr_id = wr_id;
4288 }
4289 
4290 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4291 				struct qedr_cq *cq, struct ib_wc *wc,
4292 				struct rdma_cqe_responder *resp)
4293 {
4294 	struct qedr_srq *srq = qp->srq;
4295 	u64 wr_id;
4296 
4297 	wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4298 			 le32_to_cpu(resp->srq_wr_id.lo), u64);
4299 
4300 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4301 		wc->status = IB_WC_WR_FLUSH_ERR;
4302 		wc->vendor_err = 0;
4303 		wc->wr_id = wr_id;
4304 		wc->byte_len = 0;
4305 		wc->src_qp = qp->id;
4306 		wc->qp = &qp->ibqp;
4307 		wc->wr_id = wr_id;
4308 	} else {
4309 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4310 	}
4311 	atomic_inc(&srq->hw_srq.wr_cons_cnt);
4312 
4313 	return 1;
4314 }
4315 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4316 			    struct qedr_cq *cq, struct ib_wc *wc,
4317 			    struct rdma_cqe_responder *resp)
4318 {
4319 	u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4320 
4321 	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4322 
4323 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4324 		qed_chain_consume(&qp->rq.pbl);
4325 	qedr_inc_sw_cons(&qp->rq);
4326 
4327 	return 1;
4328 }
4329 
4330 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4331 			      int num_entries, struct ib_wc *wc, u16 hw_cons)
4332 {
4333 	u16 cnt = 0;
4334 
4335 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
4336 		/* fill WC */
4337 		wc->status = IB_WC_WR_FLUSH_ERR;
4338 		wc->vendor_err = 0;
4339 		wc->wc_flags = 0;
4340 		wc->src_qp = qp->id;
4341 		wc->byte_len = 0;
4342 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4343 		wc->qp = &qp->ibqp;
4344 		num_entries--;
4345 		wc++;
4346 		cnt++;
4347 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4348 			qed_chain_consume(&qp->rq.pbl);
4349 		qedr_inc_sw_cons(&qp->rq);
4350 	}
4351 
4352 	return cnt;
4353 }
4354 
4355 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4356 				 struct rdma_cqe_responder *resp, int *update)
4357 {
4358 	if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4359 		consume_cqe(cq);
4360 		*update |= 1;
4361 	}
4362 }
4363 
4364 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4365 				 struct qedr_cq *cq, int num_entries,
4366 				 struct ib_wc *wc,
4367 				 struct rdma_cqe_responder *resp)
4368 {
4369 	int cnt;
4370 
4371 	cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4372 	consume_cqe(cq);
4373 
4374 	return cnt;
4375 }
4376 
4377 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4378 			     struct qedr_cq *cq, int num_entries,
4379 			     struct ib_wc *wc, struct rdma_cqe_responder *resp,
4380 			     int *update)
4381 {
4382 	int cnt;
4383 
4384 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4385 		cnt = process_resp_flush(qp, cq, num_entries, wc,
4386 					 resp->rq_cons_or_srq_id);
4387 		try_consume_resp_cqe(cq, qp, resp, update);
4388 	} else {
4389 		cnt = process_resp_one(dev, qp, cq, wc, resp);
4390 		consume_cqe(cq);
4391 		*update |= 1;
4392 	}
4393 
4394 	return cnt;
4395 }
4396 
4397 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4398 				struct rdma_cqe_requester *req, int *update)
4399 {
4400 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4401 		consume_cqe(cq);
4402 		*update |= 1;
4403 	}
4404 }
4405 
4406 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4407 {
4408 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4409 	struct qedr_cq *cq = get_qedr_cq(ibcq);
4410 	union rdma_cqe *cqe;
4411 	u32 old_cons, new_cons;
4412 	unsigned long flags;
4413 	int update = 0;
4414 	int done = 0;
4415 
4416 	if (cq->destroyed) {
4417 		DP_ERR(dev,
4418 		       "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4419 		       cq, cq->icid);
4420 		return 0;
4421 	}
4422 
4423 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4424 		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4425 
4426 	spin_lock_irqsave(&cq->cq_lock, flags);
4427 	cqe = cq->latest_cqe;
4428 	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4429 	while (num_entries && is_valid_cqe(cq, cqe)) {
4430 		struct qedr_qp *qp;
4431 		int cnt = 0;
4432 
4433 		/* prevent speculative reads of any field of CQE */
4434 		rmb();
4435 
4436 		qp = cqe_get_qp(cqe);
4437 		if (!qp) {
4438 			WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4439 			break;
4440 		}
4441 
4442 		wc->qp = &qp->ibqp;
4443 
4444 		switch (cqe_get_type(cqe)) {
4445 		case RDMA_CQE_TYPE_REQUESTER:
4446 			cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4447 					       &cqe->req);
4448 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
4449 			break;
4450 		case RDMA_CQE_TYPE_RESPONDER_RQ:
4451 			cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4452 						&cqe->resp, &update);
4453 			break;
4454 		case RDMA_CQE_TYPE_RESPONDER_SRQ:
4455 			cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4456 						    wc, &cqe->resp);
4457 			update = 1;
4458 			break;
4459 		case RDMA_CQE_TYPE_INVALID:
4460 		default:
4461 			DP_ERR(dev, "Error: invalid CQE type = %d\n",
4462 			       cqe_get_type(cqe));
4463 		}
4464 		num_entries -= cnt;
4465 		wc += cnt;
4466 		done += cnt;
4467 
4468 		cqe = get_cqe(cq);
4469 	}
4470 	new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4471 
4472 	cq->cq_cons += new_cons - old_cons;
4473 
4474 	if (update)
4475 		/* doorbell notifies abount latest VALID entry,
4476 		 * but chain already point to the next INVALID one
4477 		 */
4478 		doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4479 
4480 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4481 	return done;
4482 }
4483 
4484 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4485 		     u8 port_num, const struct ib_wc *in_wc,
4486 		     const struct ib_grh *in_grh, const struct ib_mad *in,
4487 		     struct ib_mad *out_mad, size_t *out_mad_size,
4488 		     u16 *out_mad_pkey_index)
4489 {
4490 	return IB_MAD_RESULT_SUCCESS;
4491 }
4492