xref: /openbmc/linux/drivers/infiniband/hw/qedr/verbs.c (revision 5388b581)
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38 
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
46 
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
50 #include "qedr.h"
51 #include "verbs.h"
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
55 
56 #define QEDR_SRQ_WQE_ELEM_SIZE	sizeof(union rdma_srq_elm)
57 #define	RDMA_MAX_SGE_PER_SRQ	(4)
58 #define RDMA_MAX_SRQ_WQE_SIZE	(RDMA_MAX_SGE_PER_SRQ + 1)
59 
60 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61 
62 enum {
63 	QEDR_USER_MMAP_IO_WC = 0,
64 	QEDR_USER_MMAP_PHYS_PAGE,
65 };
66 
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68 					size_t len)
69 {
70 	size_t min_len = min_t(size_t, len, udata->outlen);
71 
72 	return ib_copy_to_udata(udata, src, min_len);
73 }
74 
75 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
76 {
77 	if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78 		return -EINVAL;
79 
80 	*pkey = QEDR_ROCE_PKEY_DEFAULT;
81 	return 0;
82 }
83 
84 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
85 		      int index, union ib_gid *sgid)
86 {
87 	struct qedr_dev *dev = get_qedr_dev(ibdev);
88 
89 	memset(sgid->raw, 0, sizeof(sgid->raw));
90 	ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91 
92 	DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 		 sgid->global.interface_id, sgid->global.subnet_prefix);
94 
95 	return 0;
96 }
97 
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99 {
100 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 	struct qedr_device_attr *qattr = &dev->attr;
102 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
103 
104 	srq_attr->srq_limit = srq->srq_limit;
105 	srq_attr->max_wr = qattr->max_srq_wr;
106 	srq_attr->max_sge = qattr->max_sge;
107 
108 	return 0;
109 }
110 
111 int qedr_query_device(struct ib_device *ibdev,
112 		      struct ib_device_attr *attr, struct ib_udata *udata)
113 {
114 	struct qedr_dev *dev = get_qedr_dev(ibdev);
115 	struct qedr_device_attr *qattr = &dev->attr;
116 
117 	if (!dev->rdma_ctx) {
118 		DP_ERR(dev,
119 		       "qedr_query_device called with invalid params rdma_ctx=%p\n",
120 		       dev->rdma_ctx);
121 		return -EINVAL;
122 	}
123 
124 	memset(attr, 0, sizeof(*attr));
125 
126 	attr->fw_ver = qattr->fw_ver;
127 	attr->sys_image_guid = qattr->sys_image_guid;
128 	attr->max_mr_size = qattr->max_mr_size;
129 	attr->page_size_cap = qattr->page_size_caps;
130 	attr->vendor_id = qattr->vendor_id;
131 	attr->vendor_part_id = qattr->vendor_part_id;
132 	attr->hw_ver = qattr->hw_ver;
133 	attr->max_qp = qattr->max_qp;
134 	attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 	    IB_DEVICE_RC_RNR_NAK_GEN |
137 	    IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
138 
139 	attr->max_send_sge = qattr->max_sge;
140 	attr->max_recv_sge = qattr->max_sge;
141 	attr->max_sge_rd = qattr->max_sge;
142 	attr->max_cq = qattr->max_cq;
143 	attr->max_cqe = qattr->max_cqe;
144 	attr->max_mr = qattr->max_mr;
145 	attr->max_mw = qattr->max_mw;
146 	attr->max_pd = qattr->max_pd;
147 	attr->atomic_cap = dev->atomic_cap;
148 	attr->max_fmr = qattr->max_fmr;
149 	attr->max_map_per_fmr = 16;
150 	attr->max_qp_init_rd_atom =
151 	    1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
152 	attr->max_qp_rd_atom =
153 	    min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
154 		attr->max_qp_init_rd_atom);
155 
156 	attr->max_srq = qattr->max_srq;
157 	attr->max_srq_sge = qattr->max_srq_sge;
158 	attr->max_srq_wr = qattr->max_srq_wr;
159 
160 	attr->local_ca_ack_delay = qattr->dev_ack_delay;
161 	attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
162 	attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
163 	attr->max_ah = qattr->max_ah;
164 
165 	return 0;
166 }
167 
168 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
169 					    u8 *ib_width)
170 {
171 	switch (speed) {
172 	case 1000:
173 		*ib_speed = IB_SPEED_SDR;
174 		*ib_width = IB_WIDTH_1X;
175 		break;
176 	case 10000:
177 		*ib_speed = IB_SPEED_QDR;
178 		*ib_width = IB_WIDTH_1X;
179 		break;
180 
181 	case 20000:
182 		*ib_speed = IB_SPEED_DDR;
183 		*ib_width = IB_WIDTH_4X;
184 		break;
185 
186 	case 25000:
187 		*ib_speed = IB_SPEED_EDR;
188 		*ib_width = IB_WIDTH_1X;
189 		break;
190 
191 	case 40000:
192 		*ib_speed = IB_SPEED_QDR;
193 		*ib_width = IB_WIDTH_4X;
194 		break;
195 
196 	case 50000:
197 		*ib_speed = IB_SPEED_HDR;
198 		*ib_width = IB_WIDTH_1X;
199 		break;
200 
201 	case 100000:
202 		*ib_speed = IB_SPEED_EDR;
203 		*ib_width = IB_WIDTH_4X;
204 		break;
205 
206 	default:
207 		/* Unsupported */
208 		*ib_speed = IB_SPEED_SDR;
209 		*ib_width = IB_WIDTH_1X;
210 	}
211 }
212 
213 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
214 {
215 	struct qedr_dev *dev;
216 	struct qed_rdma_port *rdma_port;
217 
218 	dev = get_qedr_dev(ibdev);
219 
220 	if (!dev->rdma_ctx) {
221 		DP_ERR(dev, "rdma_ctx is NULL\n");
222 		return -EINVAL;
223 	}
224 
225 	rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
226 
227 	/* *attr being zeroed by the caller, avoid zeroing it here */
228 	if (rdma_port->port_state == QED_RDMA_PORT_UP) {
229 		attr->state = IB_PORT_ACTIVE;
230 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
231 	} else {
232 		attr->state = IB_PORT_DOWN;
233 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
234 	}
235 	attr->max_mtu = IB_MTU_4096;
236 	attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
237 	attr->lid = 0;
238 	attr->lmc = 0;
239 	attr->sm_lid = 0;
240 	attr->sm_sl = 0;
241 	attr->ip_gids = true;
242 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
243 		attr->gid_tbl_len = 1;
244 		attr->pkey_tbl_len = 1;
245 	} else {
246 		attr->gid_tbl_len = QEDR_MAX_SGID;
247 		attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
248 	}
249 	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
250 	attr->qkey_viol_cntr = 0;
251 	get_link_speed_and_width(rdma_port->link_speed,
252 				 &attr->active_speed, &attr->active_width);
253 	attr->max_msg_sz = rdma_port->max_msg_size;
254 	attr->max_vl_num = 4;
255 
256 	return 0;
257 }
258 
259 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
260 {
261 	struct ib_device *ibdev = uctx->device;
262 	int rc;
263 	struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
264 	struct qedr_alloc_ucontext_resp uresp = {};
265 	struct qedr_alloc_ucontext_req ureq = {};
266 	struct qedr_dev *dev = get_qedr_dev(ibdev);
267 	struct qed_rdma_add_user_out_params oparams;
268 	struct qedr_user_mmap_entry *entry;
269 
270 	if (!udata)
271 		return -EFAULT;
272 
273 	if (udata->inlen) {
274 		rc = ib_copy_from_udata(&ureq, udata,
275 					min(sizeof(ureq), udata->inlen));
276 		if (rc) {
277 			DP_ERR(dev, "Problem copying data from user space\n");
278 			return -EFAULT;
279 		}
280 
281 		ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
282 	}
283 
284 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
285 	if (rc) {
286 		DP_ERR(dev,
287 		       "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
288 		       rc);
289 		return rc;
290 	}
291 
292 	ctx->dpi = oparams.dpi;
293 	ctx->dpi_addr = oparams.dpi_addr;
294 	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
295 	ctx->dpi_size = oparams.dpi_size;
296 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
297 	if (!entry) {
298 		rc = -ENOMEM;
299 		goto err;
300 	}
301 
302 	entry->io_address = ctx->dpi_phys_addr;
303 	entry->length = ctx->dpi_size;
304 	entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
305 	entry->dpi = ctx->dpi;
306 	entry->dev = dev;
307 	rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
308 					 ctx->dpi_size);
309 	if (rc) {
310 		kfree(entry);
311 		goto err;
312 	}
313 	ctx->db_mmap_entry = &entry->rdma_entry;
314 
315 	uresp.dpm_enabled = dev->user_dpm_enabled;
316 	uresp.wids_enabled = 1;
317 	uresp.wid_count = oparams.wid_count;
318 	uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
319 	uresp.db_size = ctx->dpi_size;
320 	uresp.max_send_wr = dev->attr.max_sqe;
321 	uresp.max_recv_wr = dev->attr.max_rqe;
322 	uresp.max_srq_wr = dev->attr.max_srq_wr;
323 	uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
324 	uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
325 	uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
326 	uresp.max_cqes = QEDR_MAX_CQES;
327 
328 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
329 	if (rc)
330 		goto err;
331 
332 	ctx->dev = dev;
333 
334 	DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
335 		 &ctx->ibucontext);
336 	return 0;
337 
338 err:
339 	if (!ctx->db_mmap_entry)
340 		dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
341 	else
342 		rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
343 
344 	return rc;
345 }
346 
347 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
348 {
349 	struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
350 
351 	DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
352 		 uctx);
353 
354 	rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
355 }
356 
357 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
358 {
359 	struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
360 	struct qedr_dev *dev = entry->dev;
361 
362 	if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
363 		free_page((unsigned long)entry->address);
364 	else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
365 		dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
366 
367 	kfree(entry);
368 }
369 
370 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
371 {
372 	struct ib_device *dev = ucontext->device;
373 	size_t length = vma->vm_end - vma->vm_start;
374 	struct rdma_user_mmap_entry *rdma_entry;
375 	struct qedr_user_mmap_entry *entry;
376 	int rc = 0;
377 	u64 pfn;
378 
379 	ibdev_dbg(dev,
380 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
381 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
382 
383 	rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
384 	if (!rdma_entry) {
385 		ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
386 			  vma->vm_pgoff);
387 		return -EINVAL;
388 	}
389 	entry = get_qedr_mmap_entry(rdma_entry);
390 	ibdev_dbg(dev,
391 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
392 		  entry->io_address, length, entry->mmap_flag);
393 
394 	switch (entry->mmap_flag) {
395 	case QEDR_USER_MMAP_IO_WC:
396 		pfn = entry->io_address >> PAGE_SHIFT;
397 		rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
398 				       pgprot_writecombine(vma->vm_page_prot),
399 				       rdma_entry);
400 		break;
401 	case QEDR_USER_MMAP_PHYS_PAGE:
402 		rc = vm_insert_page(vma, vma->vm_start,
403 				    virt_to_page(entry->address));
404 		break;
405 	default:
406 		rc = -EINVAL;
407 	}
408 
409 	if (rc)
410 		ibdev_dbg(dev,
411 			  "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
412 			  entry->io_address, length, entry->mmap_flag, rc);
413 
414 	rdma_user_mmap_entry_put(rdma_entry);
415 	return rc;
416 }
417 
418 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
419 {
420 	struct ib_device *ibdev = ibpd->device;
421 	struct qedr_dev *dev = get_qedr_dev(ibdev);
422 	struct qedr_pd *pd = get_qedr_pd(ibpd);
423 	u16 pd_id;
424 	int rc;
425 
426 	DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
427 		 udata ? "User Lib" : "Kernel");
428 
429 	if (!dev->rdma_ctx) {
430 		DP_ERR(dev, "invalid RDMA context\n");
431 		return -EINVAL;
432 	}
433 
434 	rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
435 	if (rc)
436 		return rc;
437 
438 	pd->pd_id = pd_id;
439 
440 	if (udata) {
441 		struct qedr_alloc_pd_uresp uresp = {
442 			.pd_id = pd_id,
443 		};
444 		struct qedr_ucontext *context = rdma_udata_to_drv_context(
445 			udata, struct qedr_ucontext, ibucontext);
446 
447 		rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
448 		if (rc) {
449 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
450 			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
451 			return rc;
452 		}
453 
454 		pd->uctx = context;
455 		pd->uctx->pd = pd;
456 	}
457 
458 	return 0;
459 }
460 
461 void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
462 {
463 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
464 	struct qedr_pd *pd = get_qedr_pd(ibpd);
465 
466 	DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
467 	dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
468 }
469 
470 static void qedr_free_pbl(struct qedr_dev *dev,
471 			  struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
472 {
473 	struct pci_dev *pdev = dev->pdev;
474 	int i;
475 
476 	for (i = 0; i < pbl_info->num_pbls; i++) {
477 		if (!pbl[i].va)
478 			continue;
479 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
480 				  pbl[i].va, pbl[i].pa);
481 	}
482 
483 	kfree(pbl);
484 }
485 
486 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
487 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
488 
489 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
490 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
491 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
492 
493 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
494 					   struct qedr_pbl_info *pbl_info,
495 					   gfp_t flags)
496 {
497 	struct pci_dev *pdev = dev->pdev;
498 	struct qedr_pbl *pbl_table;
499 	dma_addr_t *pbl_main_tbl;
500 	dma_addr_t pa;
501 	void *va;
502 	int i;
503 
504 	pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
505 	if (!pbl_table)
506 		return ERR_PTR(-ENOMEM);
507 
508 	for (i = 0; i < pbl_info->num_pbls; i++) {
509 		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
510 					flags);
511 		if (!va)
512 			goto err;
513 
514 		pbl_table[i].va = va;
515 		pbl_table[i].pa = pa;
516 	}
517 
518 	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
519 	 * the first one with physical pointers to all of the rest
520 	 */
521 	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
522 	for (i = 0; i < pbl_info->num_pbls - 1; i++)
523 		pbl_main_tbl[i] = pbl_table[i + 1].pa;
524 
525 	return pbl_table;
526 
527 err:
528 	for (i--; i >= 0; i--)
529 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
530 				  pbl_table[i].va, pbl_table[i].pa);
531 
532 	qedr_free_pbl(dev, pbl_info, pbl_table);
533 
534 	return ERR_PTR(-ENOMEM);
535 }
536 
537 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
538 				struct qedr_pbl_info *pbl_info,
539 				u32 num_pbes, int two_layer_capable)
540 {
541 	u32 pbl_capacity;
542 	u32 pbl_size;
543 	u32 num_pbls;
544 
545 	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
546 		if (num_pbes > MAX_PBES_TWO_LAYER) {
547 			DP_ERR(dev, "prepare pbl table: too many pages %d\n",
548 			       num_pbes);
549 			return -EINVAL;
550 		}
551 
552 		/* calculate required pbl page size */
553 		pbl_size = MIN_FW_PBL_PAGE_SIZE;
554 		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
555 			       NUM_PBES_ON_PAGE(pbl_size);
556 
557 		while (pbl_capacity < num_pbes) {
558 			pbl_size *= 2;
559 			pbl_capacity = pbl_size / sizeof(u64);
560 			pbl_capacity = pbl_capacity * pbl_capacity;
561 		}
562 
563 		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
564 		num_pbls++;	/* One for the layer0 ( points to the pbls) */
565 		pbl_info->two_layered = true;
566 	} else {
567 		/* One layered PBL */
568 		num_pbls = 1;
569 		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
570 				 roundup_pow_of_two((num_pbes * sizeof(u64))));
571 		pbl_info->two_layered = false;
572 	}
573 
574 	pbl_info->num_pbls = num_pbls;
575 	pbl_info->pbl_size = pbl_size;
576 	pbl_info->num_pbes = num_pbes;
577 
578 	DP_DEBUG(dev, QEDR_MSG_MR,
579 		 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
580 		 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
581 
582 	return 0;
583 }
584 
585 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
586 			       struct qedr_pbl *pbl,
587 			       struct qedr_pbl_info *pbl_info, u32 pg_shift)
588 {
589 	int pbe_cnt, total_num_pbes = 0;
590 	u32 fw_pg_cnt, fw_pg_per_umem_pg;
591 	struct qedr_pbl *pbl_tbl;
592 	struct sg_dma_page_iter sg_iter;
593 	struct regpair *pbe;
594 	u64 pg_addr;
595 
596 	if (!pbl_info->num_pbes)
597 		return;
598 
599 	/* If we have a two layered pbl, the first pbl points to the rest
600 	 * of the pbls and the first entry lays on the second pbl in the table
601 	 */
602 	if (pbl_info->two_layered)
603 		pbl_tbl = &pbl[1];
604 	else
605 		pbl_tbl = pbl;
606 
607 	pbe = (struct regpair *)pbl_tbl->va;
608 	if (!pbe) {
609 		DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
610 		return;
611 	}
612 
613 	pbe_cnt = 0;
614 
615 	fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
616 
617 	for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
618 		pg_addr = sg_page_iter_dma_address(&sg_iter);
619 		for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
620 			pbe->lo = cpu_to_le32(pg_addr);
621 			pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
622 
623 			pg_addr += BIT(pg_shift);
624 			pbe_cnt++;
625 			total_num_pbes++;
626 			pbe++;
627 
628 			if (total_num_pbes == pbl_info->num_pbes)
629 				return;
630 
631 			/* If the given pbl is full storing the pbes,
632 			 * move to next pbl.
633 			 */
634 			if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
635 				pbl_tbl++;
636 				pbe = (struct regpair *)pbl_tbl->va;
637 				pbe_cnt = 0;
638 			}
639 
640 			fw_pg_cnt++;
641 		}
642 	}
643 }
644 
645 static int qedr_db_recovery_add(struct qedr_dev *dev,
646 				void __iomem *db_addr,
647 				void *db_data,
648 				enum qed_db_rec_width db_width,
649 				enum qed_db_rec_space db_space)
650 {
651 	if (!db_data) {
652 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
653 		return 0;
654 	}
655 
656 	return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
657 						 db_width, db_space);
658 }
659 
660 static void qedr_db_recovery_del(struct qedr_dev *dev,
661 				 void __iomem *db_addr,
662 				 void *db_data)
663 {
664 	if (!db_data) {
665 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
666 		return;
667 	}
668 
669 	/* Ignore return code as there is not much we can do about it. Error
670 	 * log will be printed inside.
671 	 */
672 	dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
673 }
674 
675 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
676 			      struct qedr_cq *cq, struct ib_udata *udata,
677 			      u32 db_offset)
678 {
679 	struct qedr_create_cq_uresp uresp;
680 	int rc;
681 
682 	memset(&uresp, 0, sizeof(uresp));
683 
684 	uresp.db_offset = db_offset;
685 	uresp.icid = cq->icid;
686 	if (cq->q.db_mmap_entry)
687 		uresp.db_rec_addr =
688 			rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
689 
690 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
691 	if (rc)
692 		DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
693 
694 	return rc;
695 }
696 
697 static void consume_cqe(struct qedr_cq *cq)
698 {
699 	if (cq->latest_cqe == cq->toggle_cqe)
700 		cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
701 
702 	cq->latest_cqe = qed_chain_consume(&cq->pbl);
703 }
704 
705 static inline int qedr_align_cq_entries(int entries)
706 {
707 	u64 size, aligned_size;
708 
709 	/* We allocate an extra entry that we don't report to the FW. */
710 	size = (entries + 1) * QEDR_CQE_SIZE;
711 	aligned_size = ALIGN(size, PAGE_SIZE);
712 
713 	return aligned_size / QEDR_CQE_SIZE;
714 }
715 
716 static int qedr_init_user_db_rec(struct ib_udata *udata,
717 				 struct qedr_dev *dev, struct qedr_userq *q,
718 				 bool requires_db_rec)
719 {
720 	struct qedr_ucontext *uctx =
721 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
722 					  ibucontext);
723 	struct qedr_user_mmap_entry *entry;
724 	int rc;
725 
726 	/* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
727 	if (requires_db_rec == 0 || !uctx->db_rec)
728 		return 0;
729 
730 	/* Allocate a page for doorbell recovery, add to mmap */
731 	q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
732 	if (!q->db_rec_data) {
733 		DP_ERR(dev, "get_zeroed_page failed\n");
734 		return -ENOMEM;
735 	}
736 
737 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
738 	if (!entry)
739 		goto err_free_db_data;
740 
741 	entry->address = q->db_rec_data;
742 	entry->length = PAGE_SIZE;
743 	entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
744 	rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
745 					 &entry->rdma_entry,
746 					 PAGE_SIZE);
747 	if (rc)
748 		goto err_free_entry;
749 
750 	q->db_mmap_entry = &entry->rdma_entry;
751 
752 	return 0;
753 
754 err_free_entry:
755 	kfree(entry);
756 
757 err_free_db_data:
758 	free_page((unsigned long)q->db_rec_data);
759 	q->db_rec_data = NULL;
760 	return -ENOMEM;
761 }
762 
763 static inline int qedr_init_user_queue(struct ib_udata *udata,
764 				       struct qedr_dev *dev,
765 				       struct qedr_userq *q, u64 buf_addr,
766 				       size_t buf_len, bool requires_db_rec,
767 				       int access,
768 				       int alloc_and_init)
769 {
770 	u32 fw_pages;
771 	int rc;
772 
773 	q->buf_addr = buf_addr;
774 	q->buf_len = buf_len;
775 	q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
776 	if (IS_ERR(q->umem)) {
777 		DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
778 		       PTR_ERR(q->umem));
779 		return PTR_ERR(q->umem);
780 	}
781 
782 	fw_pages = ib_umem_page_count(q->umem) <<
783 	    (PAGE_SHIFT - FW_PAGE_SHIFT);
784 
785 	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
786 	if (rc)
787 		goto err0;
788 
789 	if (alloc_and_init) {
790 		q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
791 		if (IS_ERR(q->pbl_tbl)) {
792 			rc = PTR_ERR(q->pbl_tbl);
793 			goto err0;
794 		}
795 		qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
796 				   FW_PAGE_SHIFT);
797 	} else {
798 		q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
799 		if (!q->pbl_tbl) {
800 			rc = -ENOMEM;
801 			goto err0;
802 		}
803 	}
804 
805 	/* mmap the user address used to store doorbell data for recovery */
806 	return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
807 
808 err0:
809 	ib_umem_release(q->umem);
810 	q->umem = NULL;
811 
812 	return rc;
813 }
814 
815 static inline void qedr_init_cq_params(struct qedr_cq *cq,
816 				       struct qedr_ucontext *ctx,
817 				       struct qedr_dev *dev, int vector,
818 				       int chain_entries, int page_cnt,
819 				       u64 pbl_ptr,
820 				       struct qed_rdma_create_cq_in_params
821 				       *params)
822 {
823 	memset(params, 0, sizeof(*params));
824 	params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
825 	params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
826 	params->cnq_id = vector;
827 	params->cq_size = chain_entries - 1;
828 	params->dpi = (ctx) ? ctx->dpi : dev->dpi;
829 	params->pbl_num_pages = page_cnt;
830 	params->pbl_ptr = pbl_ptr;
831 	params->pbl_two_level = 0;
832 }
833 
834 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
835 {
836 	cq->db.data.agg_flags = flags;
837 	cq->db.data.value = cpu_to_le32(cons);
838 	writeq(cq->db.raw, cq->db_addr);
839 }
840 
841 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
842 {
843 	struct qedr_cq *cq = get_qedr_cq(ibcq);
844 	unsigned long sflags;
845 	struct qedr_dev *dev;
846 
847 	dev = get_qedr_dev(ibcq->device);
848 
849 	if (cq->destroyed) {
850 		DP_ERR(dev,
851 		       "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
852 		       cq, cq->icid);
853 		return -EINVAL;
854 	}
855 
856 
857 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
858 		return 0;
859 
860 	spin_lock_irqsave(&cq->cq_lock, sflags);
861 
862 	cq->arm_flags = 0;
863 
864 	if (flags & IB_CQ_SOLICITED)
865 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
866 
867 	if (flags & IB_CQ_NEXT_COMP)
868 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
869 
870 	doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
871 
872 	spin_unlock_irqrestore(&cq->cq_lock, sflags);
873 
874 	return 0;
875 }
876 
877 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
878 		   struct ib_udata *udata)
879 {
880 	struct ib_device *ibdev = ibcq->device;
881 	struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
882 		udata, struct qedr_ucontext, ibucontext);
883 	struct qed_rdma_destroy_cq_out_params destroy_oparams;
884 	struct qed_rdma_destroy_cq_in_params destroy_iparams;
885 	struct qedr_dev *dev = get_qedr_dev(ibdev);
886 	struct qed_rdma_create_cq_in_params params;
887 	struct qedr_create_cq_ureq ureq = {};
888 	int vector = attr->comp_vector;
889 	int entries = attr->cqe;
890 	struct qedr_cq *cq = get_qedr_cq(ibcq);
891 	int chain_entries;
892 	u32 db_offset;
893 	int page_cnt;
894 	u64 pbl_ptr;
895 	u16 icid;
896 	int rc;
897 
898 	DP_DEBUG(dev, QEDR_MSG_INIT,
899 		 "create_cq: called from %s. entries=%d, vector=%d\n",
900 		 udata ? "User Lib" : "Kernel", entries, vector);
901 
902 	if (entries > QEDR_MAX_CQES) {
903 		DP_ERR(dev,
904 		       "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
905 		       entries, QEDR_MAX_CQES);
906 		return -EINVAL;
907 	}
908 
909 	chain_entries = qedr_align_cq_entries(entries);
910 	chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
911 
912 	/* calc db offset. user will add DPI base, kernel will add db addr */
913 	db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
914 
915 	if (udata) {
916 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
917 							 udata->inlen))) {
918 			DP_ERR(dev,
919 			       "create cq: problem copying data from user space\n");
920 			goto err0;
921 		}
922 
923 		if (!ureq.len) {
924 			DP_ERR(dev,
925 			       "create cq: cannot create a cq with 0 entries\n");
926 			goto err0;
927 		}
928 
929 		cq->cq_type = QEDR_CQ_TYPE_USER;
930 
931 		rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
932 					  ureq.len, true, IB_ACCESS_LOCAL_WRITE,
933 					  1);
934 		if (rc)
935 			goto err0;
936 
937 		pbl_ptr = cq->q.pbl_tbl->pa;
938 		page_cnt = cq->q.pbl_info.num_pbes;
939 
940 		cq->ibcq.cqe = chain_entries;
941 		cq->q.db_addr = ctx->dpi_addr + db_offset;
942 	} else {
943 		cq->cq_type = QEDR_CQ_TYPE_KERNEL;
944 
945 		rc = dev->ops->common->chain_alloc(dev->cdev,
946 						   QED_CHAIN_USE_TO_CONSUME,
947 						   QED_CHAIN_MODE_PBL,
948 						   QED_CHAIN_CNT_TYPE_U32,
949 						   chain_entries,
950 						   sizeof(union rdma_cqe),
951 						   &cq->pbl, NULL);
952 		if (rc)
953 			goto err0;
954 
955 		page_cnt = qed_chain_get_page_cnt(&cq->pbl);
956 		pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
957 		cq->ibcq.cqe = cq->pbl.capacity;
958 	}
959 
960 	qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
961 			    pbl_ptr, &params);
962 
963 	rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
964 	if (rc)
965 		goto err1;
966 
967 	cq->icid = icid;
968 	cq->sig = QEDR_CQ_MAGIC_NUMBER;
969 	spin_lock_init(&cq->cq_lock);
970 
971 	if (udata) {
972 		rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
973 		if (rc)
974 			goto err2;
975 
976 		rc = qedr_db_recovery_add(dev, cq->q.db_addr,
977 					  &cq->q.db_rec_data->db_data,
978 					  DB_REC_WIDTH_64B,
979 					  DB_REC_USER);
980 		if (rc)
981 			goto err2;
982 
983 	} else {
984 		/* Generate doorbell address. */
985 		cq->db.data.icid = cq->icid;
986 		cq->db_addr = dev->db_addr + db_offset;
987 		cq->db.data.params = DB_AGG_CMD_SET <<
988 		    RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
989 
990 		/* point to the very last element, passing it we will toggle */
991 		cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
992 		cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
993 		cq->latest_cqe = NULL;
994 		consume_cqe(cq);
995 		cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
996 
997 		rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
998 					  DB_REC_WIDTH_64B, DB_REC_KERNEL);
999 		if (rc)
1000 			goto err2;
1001 	}
1002 
1003 	DP_DEBUG(dev, QEDR_MSG_CQ,
1004 		 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1005 		 cq->icid, cq, params.cq_size);
1006 
1007 	return 0;
1008 
1009 err2:
1010 	destroy_iparams.icid = cq->icid;
1011 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1012 				  &destroy_oparams);
1013 err1:
1014 	if (udata) {
1015 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1016 		ib_umem_release(cq->q.umem);
1017 		if (cq->q.db_mmap_entry)
1018 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1019 	} else {
1020 		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1021 	}
1022 err0:
1023 	return -EINVAL;
1024 }
1025 
1026 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1027 {
1028 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1029 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1030 
1031 	DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1032 
1033 	return 0;
1034 }
1035 
1036 #define QEDR_DESTROY_CQ_MAX_ITERATIONS		(10)
1037 #define QEDR_DESTROY_CQ_ITER_DURATION		(10)
1038 
1039 void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1040 {
1041 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1042 	struct qed_rdma_destroy_cq_out_params oparams;
1043 	struct qed_rdma_destroy_cq_in_params iparams;
1044 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1045 	int iter;
1046 
1047 	DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1048 
1049 	cq->destroyed = 1;
1050 
1051 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
1052 	if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1053 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1054 		return;
1055 	}
1056 
1057 	iparams.icid = cq->icid;
1058 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1059 	dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1060 
1061 	if (udata) {
1062 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1063 		ib_umem_release(cq->q.umem);
1064 
1065 		if (cq->q.db_rec_data) {
1066 			qedr_db_recovery_del(dev, cq->q.db_addr,
1067 					     &cq->q.db_rec_data->db_data);
1068 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1069 		}
1070 	} else {
1071 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1072 	}
1073 
1074 	/* We don't want the IRQ handler to handle a non-existing CQ so we
1075 	 * wait until all CNQ interrupts, if any, are received. This will always
1076 	 * happen and will always happen very fast. If not, then a serious error
1077 	 * has occured. That is why we can use a long delay.
1078 	 * We spin for a short time so we don’t lose time on context switching
1079 	 * in case all the completions are handled in that span. Otherwise
1080 	 * we sleep for a while and check again. Since the CNQ may be
1081 	 * associated with (only) the current CPU we use msleep to allow the
1082 	 * current CPU to be freed.
1083 	 * The CNQ notification is increased in qedr_irq_handler().
1084 	 */
1085 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1086 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1087 		udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1088 		iter--;
1089 	}
1090 
1091 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1092 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1093 		msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1094 		iter--;
1095 	}
1096 
1097 	/* Note that we don't need to have explicit code to wait for the
1098 	 * completion of the event handler because it is invoked from the EQ.
1099 	 * Since the destroy CQ ramrod has also been received on the EQ we can
1100 	 * be certain that there's no event handler in process.
1101 	 */
1102 }
1103 
1104 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1105 					  struct ib_qp_attr *attr,
1106 					  int attr_mask,
1107 					  struct qed_rdma_modify_qp_in_params
1108 					  *qp_params)
1109 {
1110 	const struct ib_gid_attr *gid_attr;
1111 	enum rdma_network_type nw_type;
1112 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1113 	u32 ipv4_addr;
1114 	int ret;
1115 	int i;
1116 
1117 	gid_attr = grh->sgid_attr;
1118 	ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1119 	if (ret)
1120 		return ret;
1121 
1122 	nw_type = rdma_gid_attr_network_type(gid_attr);
1123 	switch (nw_type) {
1124 	case RDMA_NETWORK_IPV6:
1125 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1126 		       sizeof(qp_params->sgid));
1127 		memcpy(&qp_params->dgid.bytes[0],
1128 		       &grh->dgid,
1129 		       sizeof(qp_params->dgid));
1130 		qp_params->roce_mode = ROCE_V2_IPV6;
1131 		SET_FIELD(qp_params->modify_flags,
1132 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1133 		break;
1134 	case RDMA_NETWORK_IB:
1135 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1136 		       sizeof(qp_params->sgid));
1137 		memcpy(&qp_params->dgid.bytes[0],
1138 		       &grh->dgid,
1139 		       sizeof(qp_params->dgid));
1140 		qp_params->roce_mode = ROCE_V1;
1141 		break;
1142 	case RDMA_NETWORK_IPV4:
1143 		memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1144 		memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1145 		ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1146 		qp_params->sgid.ipv4_addr = ipv4_addr;
1147 		ipv4_addr =
1148 		    qedr_get_ipv4_from_gid(grh->dgid.raw);
1149 		qp_params->dgid.ipv4_addr = ipv4_addr;
1150 		SET_FIELD(qp_params->modify_flags,
1151 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1152 		qp_params->roce_mode = ROCE_V2_IPV4;
1153 		break;
1154 	}
1155 
1156 	for (i = 0; i < 4; i++) {
1157 		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1158 		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1159 	}
1160 
1161 	if (qp_params->vlan_id >= VLAN_CFI_MASK)
1162 		qp_params->vlan_id = 0;
1163 
1164 	return 0;
1165 }
1166 
1167 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1168 			       struct ib_qp_init_attr *attrs,
1169 			       struct ib_udata *udata)
1170 {
1171 	struct qedr_device_attr *qattr = &dev->attr;
1172 
1173 	/* QP0... attrs->qp_type == IB_QPT_GSI */
1174 	if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1175 		DP_DEBUG(dev, QEDR_MSG_QP,
1176 			 "create qp: unsupported qp type=0x%x requested\n",
1177 			 attrs->qp_type);
1178 		return -EINVAL;
1179 	}
1180 
1181 	if (attrs->cap.max_send_wr > qattr->max_sqe) {
1182 		DP_ERR(dev,
1183 		       "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1184 		       attrs->cap.max_send_wr, qattr->max_sqe);
1185 		return -EINVAL;
1186 	}
1187 
1188 	if (attrs->cap.max_inline_data > qattr->max_inline) {
1189 		DP_ERR(dev,
1190 		       "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1191 		       attrs->cap.max_inline_data, qattr->max_inline);
1192 		return -EINVAL;
1193 	}
1194 
1195 	if (attrs->cap.max_send_sge > qattr->max_sge) {
1196 		DP_ERR(dev,
1197 		       "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1198 		       attrs->cap.max_send_sge, qattr->max_sge);
1199 		return -EINVAL;
1200 	}
1201 
1202 	if (attrs->cap.max_recv_sge > qattr->max_sge) {
1203 		DP_ERR(dev,
1204 		       "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1205 		       attrs->cap.max_recv_sge, qattr->max_sge);
1206 		return -EINVAL;
1207 	}
1208 
1209 	/* Unprivileged user space cannot create special QP */
1210 	if (udata && attrs->qp_type == IB_QPT_GSI) {
1211 		DP_ERR(dev,
1212 		       "create qp: userspace can't create special QPs of type=0x%x\n",
1213 		       attrs->qp_type);
1214 		return -EINVAL;
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1221 			       struct qedr_srq *srq, struct ib_udata *udata)
1222 {
1223 	struct qedr_create_srq_uresp uresp = {};
1224 	int rc;
1225 
1226 	uresp.srq_id = srq->srq_id;
1227 
1228 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1229 	if (rc)
1230 		DP_ERR(dev, "create srq: problem copying data to user space\n");
1231 
1232 	return rc;
1233 }
1234 
1235 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1236 			      struct qedr_create_qp_uresp *uresp,
1237 			      struct qedr_qp *qp)
1238 {
1239 	/* iWARP requires two doorbells per RQ. */
1240 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1241 		uresp->rq_db_offset =
1242 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1243 		uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1244 	} else {
1245 		uresp->rq_db_offset =
1246 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1247 	}
1248 
1249 	uresp->rq_icid = qp->icid;
1250 	if (qp->urq.db_mmap_entry)
1251 		uresp->rq_db_rec_addr =
1252 			rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1253 }
1254 
1255 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1256 			       struct qedr_create_qp_uresp *uresp,
1257 			       struct qedr_qp *qp)
1258 {
1259 	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1260 
1261 	/* iWARP uses the same cid for rq and sq */
1262 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1263 		uresp->sq_icid = qp->icid;
1264 	else
1265 		uresp->sq_icid = qp->icid + 1;
1266 
1267 	if (qp->usq.db_mmap_entry)
1268 		uresp->sq_db_rec_addr =
1269 			rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1270 }
1271 
1272 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1273 			      struct qedr_qp *qp, struct ib_udata *udata,
1274 			      struct qedr_create_qp_uresp *uresp)
1275 {
1276 	int rc;
1277 
1278 	memset(uresp, 0, sizeof(*uresp));
1279 	qedr_copy_sq_uresp(dev, uresp, qp);
1280 	qedr_copy_rq_uresp(dev, uresp, qp);
1281 
1282 	uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1283 	uresp->qp_id = qp->qp_id;
1284 
1285 	rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1286 	if (rc)
1287 		DP_ERR(dev,
1288 		       "create qp: failed a copy to user space with qp icid=0x%x.\n",
1289 		       qp->icid);
1290 
1291 	return rc;
1292 }
1293 
1294 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1295 				      struct qedr_qp *qp,
1296 				      struct qedr_pd *pd,
1297 				      struct ib_qp_init_attr *attrs)
1298 {
1299 	spin_lock_init(&qp->q_lock);
1300 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1301 		kref_init(&qp->refcnt);
1302 		init_completion(&qp->iwarp_cm_comp);
1303 	}
1304 	qp->pd = pd;
1305 	qp->qp_type = attrs->qp_type;
1306 	qp->max_inline_data = attrs->cap.max_inline_data;
1307 	qp->sq.max_sges = attrs->cap.max_send_sge;
1308 	qp->state = QED_ROCE_QP_STATE_RESET;
1309 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1310 	qp->sq_cq = get_qedr_cq(attrs->send_cq);
1311 	qp->dev = dev;
1312 
1313 	if (attrs->srq) {
1314 		qp->srq = get_qedr_srq(attrs->srq);
1315 	} else {
1316 		qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1317 		qp->rq.max_sges = attrs->cap.max_recv_sge;
1318 		DP_DEBUG(dev, QEDR_MSG_QP,
1319 			 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1320 			 qp->rq.max_sges, qp->rq_cq->icid);
1321 	}
1322 
1323 	DP_DEBUG(dev, QEDR_MSG_QP,
1324 		 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1325 		 pd->pd_id, qp->qp_type, qp->max_inline_data,
1326 		 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1327 	DP_DEBUG(dev, QEDR_MSG_QP,
1328 		 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1329 		 qp->sq.max_sges, qp->sq_cq->icid);
1330 }
1331 
1332 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1333 {
1334 	int rc;
1335 
1336 	qp->sq.db = dev->db_addr +
1337 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1338 	qp->sq.db_data.data.icid = qp->icid + 1;
1339 	rc = qedr_db_recovery_add(dev, qp->sq.db,
1340 				  &qp->sq.db_data,
1341 				  DB_REC_WIDTH_32B,
1342 				  DB_REC_KERNEL);
1343 	if (rc)
1344 		return rc;
1345 
1346 	if (!qp->srq) {
1347 		qp->rq.db = dev->db_addr +
1348 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1349 		qp->rq.db_data.data.icid = qp->icid;
1350 
1351 		rc = qedr_db_recovery_add(dev, qp->rq.db,
1352 					  &qp->rq.db_data,
1353 					  DB_REC_WIDTH_32B,
1354 					  DB_REC_KERNEL);
1355 		if (rc)
1356 			qedr_db_recovery_del(dev, qp->sq.db,
1357 					     &qp->sq.db_data);
1358 	}
1359 
1360 	return rc;
1361 }
1362 
1363 static int qedr_check_srq_params(struct qedr_dev *dev,
1364 				 struct ib_srq_init_attr *attrs,
1365 				 struct ib_udata *udata)
1366 {
1367 	struct qedr_device_attr *qattr = &dev->attr;
1368 
1369 	if (attrs->attr.max_wr > qattr->max_srq_wr) {
1370 		DP_ERR(dev,
1371 		       "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1372 		       attrs->attr.max_wr, qattr->max_srq_wr);
1373 		return -EINVAL;
1374 	}
1375 
1376 	if (attrs->attr.max_sge > qattr->max_sge) {
1377 		DP_ERR(dev,
1378 		       "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1379 		       attrs->attr.max_sge, qattr->max_sge);
1380 		return -EINVAL;
1381 	}
1382 
1383 	return 0;
1384 }
1385 
1386 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1387 {
1388 	qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1389 	ib_umem_release(srq->usrq.umem);
1390 	ib_umem_release(srq->prod_umem);
1391 }
1392 
1393 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1394 {
1395 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1396 	struct qedr_dev *dev = srq->dev;
1397 
1398 	dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1399 
1400 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1401 			  hw_srq->virt_prod_pair_addr,
1402 			  hw_srq->phy_prod_pair_addr);
1403 }
1404 
1405 static int qedr_init_srq_user_params(struct ib_udata *udata,
1406 				     struct qedr_srq *srq,
1407 				     struct qedr_create_srq_ureq *ureq,
1408 				     int access)
1409 {
1410 	struct scatterlist *sg;
1411 	int rc;
1412 
1413 	rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1414 				  ureq->srq_len, false, access, 1);
1415 	if (rc)
1416 		return rc;
1417 
1418 	srq->prod_umem =
1419 		ib_umem_get(udata, ureq->prod_pair_addr,
1420 			    sizeof(struct rdma_srq_producers), access);
1421 	if (IS_ERR(srq->prod_umem)) {
1422 		qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1423 		ib_umem_release(srq->usrq.umem);
1424 		DP_ERR(srq->dev,
1425 		       "create srq: failed ib_umem_get for producer, got %ld\n",
1426 		       PTR_ERR(srq->prod_umem));
1427 		return PTR_ERR(srq->prod_umem);
1428 	}
1429 
1430 	sg = srq->prod_umem->sg_head.sgl;
1431 	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1432 
1433 	return 0;
1434 }
1435 
1436 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1437 					struct qedr_dev *dev,
1438 					struct ib_srq_init_attr *init_attr)
1439 {
1440 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1441 	dma_addr_t phy_prod_pair_addr;
1442 	u32 num_elems;
1443 	void *va;
1444 	int rc;
1445 
1446 	va = dma_alloc_coherent(&dev->pdev->dev,
1447 				sizeof(struct rdma_srq_producers),
1448 				&phy_prod_pair_addr, GFP_KERNEL);
1449 	if (!va) {
1450 		DP_ERR(dev,
1451 		       "create srq: failed to allocate dma memory for producer\n");
1452 		return -ENOMEM;
1453 	}
1454 
1455 	hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1456 	hw_srq->virt_prod_pair_addr = va;
1457 
1458 	num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1459 	rc = dev->ops->common->chain_alloc(dev->cdev,
1460 					   QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1461 					   QED_CHAIN_MODE_PBL,
1462 					   QED_CHAIN_CNT_TYPE_U32,
1463 					   num_elems,
1464 					   QEDR_SRQ_WQE_ELEM_SIZE,
1465 					   &hw_srq->pbl, NULL);
1466 	if (rc)
1467 		goto err0;
1468 
1469 	hw_srq->num_elems = num_elems;
1470 
1471 	return 0;
1472 
1473 err0:
1474 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1475 			  va, phy_prod_pair_addr);
1476 	return rc;
1477 }
1478 
1479 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1480 		    struct ib_udata *udata)
1481 {
1482 	struct qed_rdma_destroy_srq_in_params destroy_in_params;
1483 	struct qed_rdma_create_srq_in_params in_params = {};
1484 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1485 	struct qed_rdma_create_srq_out_params out_params;
1486 	struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1487 	struct qedr_create_srq_ureq ureq = {};
1488 	u64 pbl_base_addr, phy_prod_pair_addr;
1489 	struct qedr_srq_hwq_info *hw_srq;
1490 	u32 page_cnt, page_size;
1491 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1492 	int rc = 0;
1493 
1494 	DP_DEBUG(dev, QEDR_MSG_QP,
1495 		 "create SRQ called from %s (pd %p)\n",
1496 		 (udata) ? "User lib" : "kernel", pd);
1497 
1498 	rc = qedr_check_srq_params(dev, init_attr, udata);
1499 	if (rc)
1500 		return -EINVAL;
1501 
1502 	srq->dev = dev;
1503 	hw_srq = &srq->hw_srq;
1504 	spin_lock_init(&srq->lock);
1505 
1506 	hw_srq->max_wr = init_attr->attr.max_wr;
1507 	hw_srq->max_sges = init_attr->attr.max_sge;
1508 
1509 	if (udata) {
1510 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1511 							 udata->inlen))) {
1512 			DP_ERR(dev,
1513 			       "create srq: problem copying data from user space\n");
1514 			goto err0;
1515 		}
1516 
1517 		rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1518 		if (rc)
1519 			goto err0;
1520 
1521 		page_cnt = srq->usrq.pbl_info.num_pbes;
1522 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
1523 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1524 		page_size = PAGE_SIZE;
1525 	} else {
1526 		struct qed_chain *pbl;
1527 
1528 		rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1529 		if (rc)
1530 			goto err0;
1531 
1532 		pbl = &hw_srq->pbl;
1533 		page_cnt = qed_chain_get_page_cnt(pbl);
1534 		pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1535 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1536 		page_size = QED_CHAIN_PAGE_SIZE;
1537 	}
1538 
1539 	in_params.pd_id = pd->pd_id;
1540 	in_params.pbl_base_addr = pbl_base_addr;
1541 	in_params.prod_pair_addr = phy_prod_pair_addr;
1542 	in_params.num_pages = page_cnt;
1543 	in_params.page_size = page_size;
1544 
1545 	rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1546 	if (rc)
1547 		goto err1;
1548 
1549 	srq->srq_id = out_params.srq_id;
1550 
1551 	if (udata) {
1552 		rc = qedr_copy_srq_uresp(dev, srq, udata);
1553 		if (rc)
1554 			goto err2;
1555 	}
1556 
1557 	rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1558 	if (rc)
1559 		goto err2;
1560 
1561 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1562 		 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1563 	return 0;
1564 
1565 err2:
1566 	destroy_in_params.srq_id = srq->srq_id;
1567 
1568 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1569 err1:
1570 	if (udata)
1571 		qedr_free_srq_user_params(srq);
1572 	else
1573 		qedr_free_srq_kernel_params(srq);
1574 err0:
1575 	return -EFAULT;
1576 }
1577 
1578 void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1579 {
1580 	struct qed_rdma_destroy_srq_in_params in_params = {};
1581 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1582 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1583 
1584 	xa_erase_irq(&dev->srqs, srq->srq_id);
1585 	in_params.srq_id = srq->srq_id;
1586 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1587 
1588 	if (ibsrq->uobject)
1589 		qedr_free_srq_user_params(srq);
1590 	else
1591 		qedr_free_srq_kernel_params(srq);
1592 
1593 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1594 		 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1595 		 srq->srq_id);
1596 }
1597 
1598 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1599 		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1600 {
1601 	struct qed_rdma_modify_srq_in_params in_params = {};
1602 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1603 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1604 	int rc;
1605 
1606 	if (attr_mask & IB_SRQ_MAX_WR) {
1607 		DP_ERR(dev,
1608 		       "modify srq: invalid attribute mask=0x%x specified for %p\n",
1609 		       attr_mask, srq);
1610 		return -EINVAL;
1611 	}
1612 
1613 	if (attr_mask & IB_SRQ_LIMIT) {
1614 		if (attr->srq_limit >= srq->hw_srq.max_wr) {
1615 			DP_ERR(dev,
1616 			       "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1617 			       attr->srq_limit, srq->hw_srq.max_wr);
1618 			return -EINVAL;
1619 		}
1620 
1621 		in_params.srq_id = srq->srq_id;
1622 		in_params.wqe_limit = attr->srq_limit;
1623 		rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1624 		if (rc)
1625 			return rc;
1626 	}
1627 
1628 	srq->srq_limit = attr->srq_limit;
1629 
1630 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1631 		 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1632 
1633 	return 0;
1634 }
1635 
1636 static inline void
1637 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1638 			      struct qedr_pd *pd,
1639 			      struct qedr_qp *qp,
1640 			      struct ib_qp_init_attr *attrs,
1641 			      bool fmr_and_reserved_lkey,
1642 			      struct qed_rdma_create_qp_in_params *params)
1643 {
1644 	/* QP handle to be written in an async event */
1645 	params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1646 	params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1647 
1648 	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1649 	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1650 	params->pd = pd->pd_id;
1651 	params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1652 	params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1653 	params->stats_queue = 0;
1654 	params->srq_id = 0;
1655 	params->use_srq = false;
1656 
1657 	if (!qp->srq) {
1658 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1659 
1660 	} else {
1661 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1662 		params->srq_id = qp->srq->srq_id;
1663 		params->use_srq = true;
1664 	}
1665 }
1666 
1667 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1668 {
1669 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1670 		 "qp=%p. "
1671 		 "sq_addr=0x%llx, "
1672 		 "sq_len=%zd, "
1673 		 "rq_addr=0x%llx, "
1674 		 "rq_len=%zd"
1675 		 "\n",
1676 		 qp,
1677 		 qp->usq.buf_addr,
1678 		 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1679 }
1680 
1681 static inline void
1682 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1683 			    struct qedr_qp *qp,
1684 			    struct qed_rdma_create_qp_out_params *out_params)
1685 {
1686 	qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1687 	qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1688 
1689 	qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1690 			   &qp->usq.pbl_info, FW_PAGE_SHIFT);
1691 	if (!qp->srq) {
1692 		qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1693 		qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1694 	}
1695 
1696 	qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1697 			   &qp->urq.pbl_info, FW_PAGE_SHIFT);
1698 }
1699 
1700 static void qedr_cleanup_user(struct qedr_dev *dev,
1701 			      struct qedr_ucontext *ctx,
1702 			      struct qedr_qp *qp)
1703 {
1704 	ib_umem_release(qp->usq.umem);
1705 	qp->usq.umem = NULL;
1706 
1707 	ib_umem_release(qp->urq.umem);
1708 	qp->urq.umem = NULL;
1709 
1710 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
1711 		qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1712 		qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1713 	} else {
1714 		kfree(qp->usq.pbl_tbl);
1715 		kfree(qp->urq.pbl_tbl);
1716 	}
1717 
1718 	if (qp->usq.db_rec_data) {
1719 		qedr_db_recovery_del(dev, qp->usq.db_addr,
1720 				     &qp->usq.db_rec_data->db_data);
1721 		rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1722 	}
1723 
1724 	if (qp->urq.db_rec_data) {
1725 		qedr_db_recovery_del(dev, qp->urq.db_addr,
1726 				     &qp->urq.db_rec_data->db_data);
1727 		rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1728 	}
1729 
1730 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1731 		qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1732 				     &qp->urq.db_rec_db2_data);
1733 }
1734 
1735 static int qedr_create_user_qp(struct qedr_dev *dev,
1736 			       struct qedr_qp *qp,
1737 			       struct ib_pd *ibpd,
1738 			       struct ib_udata *udata,
1739 			       struct ib_qp_init_attr *attrs)
1740 {
1741 	struct qed_rdma_create_qp_in_params in_params;
1742 	struct qed_rdma_create_qp_out_params out_params;
1743 	struct qedr_pd *pd = get_qedr_pd(ibpd);
1744 	struct qedr_create_qp_uresp uresp;
1745 	struct qedr_ucontext *ctx = NULL;
1746 	struct qedr_create_qp_ureq ureq;
1747 	int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1748 	int rc = -EINVAL;
1749 
1750 	qp->create_type = QEDR_QP_CREATE_USER;
1751 	memset(&ureq, 0, sizeof(ureq));
1752 	rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
1753 	if (rc) {
1754 		DP_ERR(dev, "Problem copying data from user space\n");
1755 		return rc;
1756 	}
1757 
1758 	/* SQ - read access only (0) */
1759 	rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1760 				  ureq.sq_len, true, 0, alloc_and_init);
1761 	if (rc)
1762 		return rc;
1763 
1764 	if (!qp->srq) {
1765 		/* RQ - read access only (0) */
1766 		rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1767 					  ureq.rq_len, true, 0, alloc_and_init);
1768 		if (rc)
1769 			return rc;
1770 	}
1771 
1772 	memset(&in_params, 0, sizeof(in_params));
1773 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1774 	in_params.qp_handle_lo = ureq.qp_handle_lo;
1775 	in_params.qp_handle_hi = ureq.qp_handle_hi;
1776 	in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1777 	in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1778 	if (!qp->srq) {
1779 		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1780 		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1781 	}
1782 
1783 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1784 					      &in_params, &out_params);
1785 
1786 	if (!qp->qed_qp) {
1787 		rc = -ENOMEM;
1788 		goto err1;
1789 	}
1790 
1791 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1792 		qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1793 
1794 	qp->qp_id = out_params.qp_id;
1795 	qp->icid = out_params.icid;
1796 
1797 	rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1798 	if (rc)
1799 		goto err;
1800 
1801 	/* db offset was calculated in copy_qp_uresp, now set in the user q */
1802 	ctx = pd->uctx;
1803 	qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1804 	qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1805 
1806 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1807 		qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1808 
1809 		/* calculate the db_rec_db2 data since it is constant so no
1810 		 *  need to reflect from user
1811 		 */
1812 		qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1813 		qp->urq.db_rec_db2_data.data.value =
1814 			cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1815 	}
1816 
1817 	rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1818 				  &qp->usq.db_rec_data->db_data,
1819 				  DB_REC_WIDTH_32B,
1820 				  DB_REC_USER);
1821 	if (rc)
1822 		goto err;
1823 
1824 	rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1825 				  &qp->urq.db_rec_data->db_data,
1826 				  DB_REC_WIDTH_32B,
1827 				  DB_REC_USER);
1828 	if (rc)
1829 		goto err;
1830 
1831 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1832 		rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1833 					  &qp->urq.db_rec_db2_data,
1834 					  DB_REC_WIDTH_32B,
1835 					  DB_REC_USER);
1836 		if (rc)
1837 			goto err;
1838 	}
1839 	qedr_qp_user_print(dev, qp);
1840 
1841 	return rc;
1842 err:
1843 	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1844 	if (rc)
1845 		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1846 
1847 err1:
1848 	qedr_cleanup_user(dev, ctx, qp);
1849 	return rc;
1850 }
1851 
1852 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1853 {
1854 	int rc;
1855 
1856 	qp->sq.db = dev->db_addr +
1857 	    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1858 	qp->sq.db_data.data.icid = qp->icid;
1859 
1860 	rc = qedr_db_recovery_add(dev, qp->sq.db,
1861 				  &qp->sq.db_data,
1862 				  DB_REC_WIDTH_32B,
1863 				  DB_REC_KERNEL);
1864 	if (rc)
1865 		return rc;
1866 
1867 	qp->rq.db = dev->db_addr +
1868 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1869 	qp->rq.db_data.data.icid = qp->icid;
1870 	qp->rq.iwarp_db2 = dev->db_addr +
1871 			   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1872 	qp->rq.iwarp_db2_data.data.icid = qp->icid;
1873 	qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1874 
1875 	rc = qedr_db_recovery_add(dev, qp->rq.db,
1876 				  &qp->rq.db_data,
1877 				  DB_REC_WIDTH_32B,
1878 				  DB_REC_KERNEL);
1879 	if (rc)
1880 		return rc;
1881 
1882 	rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
1883 				  &qp->rq.iwarp_db2_data,
1884 				  DB_REC_WIDTH_32B,
1885 				  DB_REC_KERNEL);
1886 	return rc;
1887 }
1888 
1889 static int
1890 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1891 			   struct qedr_qp *qp,
1892 			   struct qed_rdma_create_qp_in_params *in_params,
1893 			   u32 n_sq_elems, u32 n_rq_elems)
1894 {
1895 	struct qed_rdma_create_qp_out_params out_params;
1896 	int rc;
1897 
1898 	rc = dev->ops->common->chain_alloc(dev->cdev,
1899 					   QED_CHAIN_USE_TO_PRODUCE,
1900 					   QED_CHAIN_MODE_PBL,
1901 					   QED_CHAIN_CNT_TYPE_U32,
1902 					   n_sq_elems,
1903 					   QEDR_SQE_ELEMENT_SIZE,
1904 					   &qp->sq.pbl, NULL);
1905 
1906 	if (rc)
1907 		return rc;
1908 
1909 	in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1910 	in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1911 
1912 	rc = dev->ops->common->chain_alloc(dev->cdev,
1913 					   QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1914 					   QED_CHAIN_MODE_PBL,
1915 					   QED_CHAIN_CNT_TYPE_U32,
1916 					   n_rq_elems,
1917 					   QEDR_RQE_ELEMENT_SIZE,
1918 					   &qp->rq.pbl, NULL);
1919 	if (rc)
1920 		return rc;
1921 
1922 	in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1923 	in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1924 
1925 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1926 					      in_params, &out_params);
1927 
1928 	if (!qp->qed_qp)
1929 		return -EINVAL;
1930 
1931 	qp->qp_id = out_params.qp_id;
1932 	qp->icid = out_params.icid;
1933 
1934 	return qedr_set_roce_db_info(dev, qp);
1935 }
1936 
1937 static int
1938 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1939 			    struct qedr_qp *qp,
1940 			    struct qed_rdma_create_qp_in_params *in_params,
1941 			    u32 n_sq_elems, u32 n_rq_elems)
1942 {
1943 	struct qed_rdma_create_qp_out_params out_params;
1944 	struct qed_chain_ext_pbl ext_pbl;
1945 	int rc;
1946 
1947 	in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1948 						     QEDR_SQE_ELEMENT_SIZE,
1949 						     QED_CHAIN_MODE_PBL);
1950 	in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1951 						     QEDR_RQE_ELEMENT_SIZE,
1952 						     QED_CHAIN_MODE_PBL);
1953 
1954 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1955 					      in_params, &out_params);
1956 
1957 	if (!qp->qed_qp)
1958 		return -EINVAL;
1959 
1960 	/* Now we allocate the chain */
1961 	ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1962 	ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1963 
1964 	rc = dev->ops->common->chain_alloc(dev->cdev,
1965 					   QED_CHAIN_USE_TO_PRODUCE,
1966 					   QED_CHAIN_MODE_PBL,
1967 					   QED_CHAIN_CNT_TYPE_U32,
1968 					   n_sq_elems,
1969 					   QEDR_SQE_ELEMENT_SIZE,
1970 					   &qp->sq.pbl, &ext_pbl);
1971 
1972 	if (rc)
1973 		goto err;
1974 
1975 	ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1976 	ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1977 
1978 	rc = dev->ops->common->chain_alloc(dev->cdev,
1979 					   QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1980 					   QED_CHAIN_MODE_PBL,
1981 					   QED_CHAIN_CNT_TYPE_U32,
1982 					   n_rq_elems,
1983 					   QEDR_RQE_ELEMENT_SIZE,
1984 					   &qp->rq.pbl, &ext_pbl);
1985 
1986 	if (rc)
1987 		goto err;
1988 
1989 	qp->qp_id = out_params.qp_id;
1990 	qp->icid = out_params.icid;
1991 
1992 	return qedr_set_iwarp_db_info(dev, qp);
1993 
1994 err:
1995 	dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1996 
1997 	return rc;
1998 }
1999 
2000 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2001 {
2002 	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2003 	kfree(qp->wqe_wr_id);
2004 
2005 	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2006 	kfree(qp->rqe_wr_id);
2007 
2008 	/* GSI qp is not registered to db mechanism so no need to delete */
2009 	if (qp->qp_type == IB_QPT_GSI)
2010 		return;
2011 
2012 	qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2013 
2014 	if (!qp->srq) {
2015 		qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2016 
2017 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2018 			qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2019 					     &qp->rq.iwarp_db2_data);
2020 	}
2021 }
2022 
2023 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2024 				 struct qedr_qp *qp,
2025 				 struct ib_pd *ibpd,
2026 				 struct ib_qp_init_attr *attrs)
2027 {
2028 	struct qed_rdma_create_qp_in_params in_params;
2029 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2030 	int rc = -EINVAL;
2031 	u32 n_rq_elems;
2032 	u32 n_sq_elems;
2033 	u32 n_sq_entries;
2034 
2035 	memset(&in_params, 0, sizeof(in_params));
2036 	qp->create_type = QEDR_QP_CREATE_KERNEL;
2037 
2038 	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2039 	 * the ring. The ring should allow at least a single WR, even if the
2040 	 * user requested none, due to allocation issues.
2041 	 * We should add an extra WR since the prod and cons indices of
2042 	 * wqe_wr_id are managed in such a way that the WQ is considered full
2043 	 * when (prod+1)%max_wr==cons. We currently don't do that because we
2044 	 * double the number of entries due an iSER issue that pushes far more
2045 	 * WRs than indicated. If we decline its ib_post_send() then we get
2046 	 * error prints in the dmesg we'd like to avoid.
2047 	 */
2048 	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2049 			      dev->attr.max_sqe);
2050 
2051 	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2052 				GFP_KERNEL);
2053 	if (!qp->wqe_wr_id) {
2054 		DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2055 		return -ENOMEM;
2056 	}
2057 
2058 	/* QP handle to be written in CQE */
2059 	in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2060 	in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2061 
2062 	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2063 	 * the ring. There ring should allow at least a single WR, even if the
2064 	 * user requested none, due to allocation issues.
2065 	 */
2066 	qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2067 
2068 	/* Allocate driver internal RQ array */
2069 	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2070 				GFP_KERNEL);
2071 	if (!qp->rqe_wr_id) {
2072 		DP_ERR(dev,
2073 		       "create qp: failed RQ shadow memory allocation\n");
2074 		kfree(qp->wqe_wr_id);
2075 		return -ENOMEM;
2076 	}
2077 
2078 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2079 
2080 	n_sq_entries = attrs->cap.max_send_wr;
2081 	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2082 	n_sq_entries = max_t(u32, n_sq_entries, 1);
2083 	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2084 
2085 	n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2086 
2087 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2088 		rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2089 						 n_sq_elems, n_rq_elems);
2090 	else
2091 		rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2092 						n_sq_elems, n_rq_elems);
2093 	if (rc)
2094 		qedr_cleanup_kernel(dev, qp);
2095 
2096 	return rc;
2097 }
2098 
2099 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
2100 			     struct ib_qp_init_attr *attrs,
2101 			     struct ib_udata *udata)
2102 {
2103 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2104 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2105 	struct qedr_qp *qp;
2106 	struct ib_qp *ibqp;
2107 	int rc = 0;
2108 
2109 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2110 		 udata ? "user library" : "kernel", pd);
2111 
2112 	rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2113 	if (rc)
2114 		return ERR_PTR(rc);
2115 
2116 	DP_DEBUG(dev, QEDR_MSG_QP,
2117 		 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2118 		 udata ? "user library" : "kernel", attrs->event_handler, pd,
2119 		 get_qedr_cq(attrs->send_cq),
2120 		 get_qedr_cq(attrs->send_cq)->icid,
2121 		 get_qedr_cq(attrs->recv_cq),
2122 		 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2123 
2124 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2125 	if (!qp) {
2126 		DP_ERR(dev, "create qp: failed allocating memory\n");
2127 		return ERR_PTR(-ENOMEM);
2128 	}
2129 
2130 	qedr_set_common_qp_params(dev, qp, pd, attrs);
2131 
2132 	if (attrs->qp_type == IB_QPT_GSI) {
2133 		ibqp = qedr_create_gsi_qp(dev, attrs, qp);
2134 		if (IS_ERR(ibqp))
2135 			kfree(qp);
2136 		return ibqp;
2137 	}
2138 
2139 	if (udata)
2140 		rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2141 	else
2142 		rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2143 
2144 	if (rc)
2145 		goto err;
2146 
2147 	qp->ibqp.qp_num = qp->qp_id;
2148 
2149 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2150 		rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2151 		if (rc)
2152 			goto err;
2153 	}
2154 
2155 	return &qp->ibqp;
2156 
2157 err:
2158 	kfree(qp);
2159 
2160 	return ERR_PTR(-EFAULT);
2161 }
2162 
2163 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2164 {
2165 	switch (qp_state) {
2166 	case QED_ROCE_QP_STATE_RESET:
2167 		return IB_QPS_RESET;
2168 	case QED_ROCE_QP_STATE_INIT:
2169 		return IB_QPS_INIT;
2170 	case QED_ROCE_QP_STATE_RTR:
2171 		return IB_QPS_RTR;
2172 	case QED_ROCE_QP_STATE_RTS:
2173 		return IB_QPS_RTS;
2174 	case QED_ROCE_QP_STATE_SQD:
2175 		return IB_QPS_SQD;
2176 	case QED_ROCE_QP_STATE_ERR:
2177 		return IB_QPS_ERR;
2178 	case QED_ROCE_QP_STATE_SQE:
2179 		return IB_QPS_SQE;
2180 	}
2181 	return IB_QPS_ERR;
2182 }
2183 
2184 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2185 					enum ib_qp_state qp_state)
2186 {
2187 	switch (qp_state) {
2188 	case IB_QPS_RESET:
2189 		return QED_ROCE_QP_STATE_RESET;
2190 	case IB_QPS_INIT:
2191 		return QED_ROCE_QP_STATE_INIT;
2192 	case IB_QPS_RTR:
2193 		return QED_ROCE_QP_STATE_RTR;
2194 	case IB_QPS_RTS:
2195 		return QED_ROCE_QP_STATE_RTS;
2196 	case IB_QPS_SQD:
2197 		return QED_ROCE_QP_STATE_SQD;
2198 	case IB_QPS_ERR:
2199 		return QED_ROCE_QP_STATE_ERR;
2200 	default:
2201 		return QED_ROCE_QP_STATE_ERR;
2202 	}
2203 }
2204 
2205 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2206 {
2207 	qed_chain_reset(&qph->pbl);
2208 	qph->prod = 0;
2209 	qph->cons = 0;
2210 	qph->wqe_cons = 0;
2211 	qph->db_data.data.value = cpu_to_le16(0);
2212 }
2213 
2214 static int qedr_update_qp_state(struct qedr_dev *dev,
2215 				struct qedr_qp *qp,
2216 				enum qed_roce_qp_state cur_state,
2217 				enum qed_roce_qp_state new_state)
2218 {
2219 	int status = 0;
2220 
2221 	if (new_state == cur_state)
2222 		return 0;
2223 
2224 	switch (cur_state) {
2225 	case QED_ROCE_QP_STATE_RESET:
2226 		switch (new_state) {
2227 		case QED_ROCE_QP_STATE_INIT:
2228 			qp->prev_wqe_size = 0;
2229 			qedr_reset_qp_hwq_info(&qp->sq);
2230 			qedr_reset_qp_hwq_info(&qp->rq);
2231 			break;
2232 		default:
2233 			status = -EINVAL;
2234 			break;
2235 		}
2236 		break;
2237 	case QED_ROCE_QP_STATE_INIT:
2238 		switch (new_state) {
2239 		case QED_ROCE_QP_STATE_RTR:
2240 			/* Update doorbell (in case post_recv was
2241 			 * done before move to RTR)
2242 			 */
2243 
2244 			if (rdma_protocol_roce(&dev->ibdev, 1)) {
2245 				writel(qp->rq.db_data.raw, qp->rq.db);
2246 			}
2247 			break;
2248 		case QED_ROCE_QP_STATE_ERR:
2249 			break;
2250 		default:
2251 			/* Invalid state change. */
2252 			status = -EINVAL;
2253 			break;
2254 		}
2255 		break;
2256 	case QED_ROCE_QP_STATE_RTR:
2257 		/* RTR->XXX */
2258 		switch (new_state) {
2259 		case QED_ROCE_QP_STATE_RTS:
2260 			break;
2261 		case QED_ROCE_QP_STATE_ERR:
2262 			break;
2263 		default:
2264 			/* Invalid state change. */
2265 			status = -EINVAL;
2266 			break;
2267 		}
2268 		break;
2269 	case QED_ROCE_QP_STATE_RTS:
2270 		/* RTS->XXX */
2271 		switch (new_state) {
2272 		case QED_ROCE_QP_STATE_SQD:
2273 			break;
2274 		case QED_ROCE_QP_STATE_ERR:
2275 			break;
2276 		default:
2277 			/* Invalid state change. */
2278 			status = -EINVAL;
2279 			break;
2280 		}
2281 		break;
2282 	case QED_ROCE_QP_STATE_SQD:
2283 		/* SQD->XXX */
2284 		switch (new_state) {
2285 		case QED_ROCE_QP_STATE_RTS:
2286 		case QED_ROCE_QP_STATE_ERR:
2287 			break;
2288 		default:
2289 			/* Invalid state change. */
2290 			status = -EINVAL;
2291 			break;
2292 		}
2293 		break;
2294 	case QED_ROCE_QP_STATE_ERR:
2295 		/* ERR->XXX */
2296 		switch (new_state) {
2297 		case QED_ROCE_QP_STATE_RESET:
2298 			if ((qp->rq.prod != qp->rq.cons) ||
2299 			    (qp->sq.prod != qp->sq.cons)) {
2300 				DP_NOTICE(dev,
2301 					  "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2302 					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
2303 					  qp->sq.cons);
2304 				status = -EINVAL;
2305 			}
2306 			break;
2307 		default:
2308 			status = -EINVAL;
2309 			break;
2310 		}
2311 		break;
2312 	default:
2313 		status = -EINVAL;
2314 		break;
2315 	}
2316 
2317 	return status;
2318 }
2319 
2320 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2321 		   int attr_mask, struct ib_udata *udata)
2322 {
2323 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2324 	struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2325 	struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2326 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2327 	enum ib_qp_state old_qp_state, new_qp_state;
2328 	enum qed_roce_qp_state cur_state;
2329 	int rc = 0;
2330 
2331 	DP_DEBUG(dev, QEDR_MSG_QP,
2332 		 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2333 		 attr->qp_state);
2334 
2335 	old_qp_state = qedr_get_ibqp_state(qp->state);
2336 	if (attr_mask & IB_QP_STATE)
2337 		new_qp_state = attr->qp_state;
2338 	else
2339 		new_qp_state = old_qp_state;
2340 
2341 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2342 		if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2343 					ibqp->qp_type, attr_mask)) {
2344 			DP_ERR(dev,
2345 			       "modify qp: invalid attribute mask=0x%x specified for\n"
2346 			       "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2347 			       attr_mask, qp->qp_id, ibqp->qp_type,
2348 			       old_qp_state, new_qp_state);
2349 			rc = -EINVAL;
2350 			goto err;
2351 		}
2352 	}
2353 
2354 	/* Translate the masks... */
2355 	if (attr_mask & IB_QP_STATE) {
2356 		SET_FIELD(qp_params.modify_flags,
2357 			  QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2358 		qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2359 	}
2360 
2361 	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2362 		qp_params.sqd_async = true;
2363 
2364 	if (attr_mask & IB_QP_PKEY_INDEX) {
2365 		SET_FIELD(qp_params.modify_flags,
2366 			  QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2367 		if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2368 			rc = -EINVAL;
2369 			goto err;
2370 		}
2371 
2372 		qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2373 	}
2374 
2375 	if (attr_mask & IB_QP_QKEY)
2376 		qp->qkey = attr->qkey;
2377 
2378 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
2379 		SET_FIELD(qp_params.modify_flags,
2380 			  QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2381 		qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2382 						  IB_ACCESS_REMOTE_READ;
2383 		qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2384 						   IB_ACCESS_REMOTE_WRITE;
2385 		qp_params.incoming_atomic_en = attr->qp_access_flags &
2386 					       IB_ACCESS_REMOTE_ATOMIC;
2387 	}
2388 
2389 	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2390 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2391 			return -EINVAL;
2392 
2393 		if (attr_mask & IB_QP_PATH_MTU) {
2394 			if (attr->path_mtu < IB_MTU_256 ||
2395 			    attr->path_mtu > IB_MTU_4096) {
2396 				pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2397 				rc = -EINVAL;
2398 				goto err;
2399 			}
2400 			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2401 				      ib_mtu_enum_to_int(iboe_get_mtu
2402 							 (dev->ndev->mtu)));
2403 		}
2404 
2405 		if (!qp->mtu) {
2406 			qp->mtu =
2407 			ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2408 			pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2409 		}
2410 
2411 		SET_FIELD(qp_params.modify_flags,
2412 			  QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2413 
2414 		qp_params.traffic_class_tos = grh->traffic_class;
2415 		qp_params.flow_label = grh->flow_label;
2416 		qp_params.hop_limit_ttl = grh->hop_limit;
2417 
2418 		qp->sgid_idx = grh->sgid_index;
2419 
2420 		rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2421 		if (rc) {
2422 			DP_ERR(dev,
2423 			       "modify qp: problems with GID index %d (rc=%d)\n",
2424 			       grh->sgid_index, rc);
2425 			return rc;
2426 		}
2427 
2428 		rc = qedr_get_dmac(dev, &attr->ah_attr,
2429 				   qp_params.remote_mac_addr);
2430 		if (rc)
2431 			return rc;
2432 
2433 		qp_params.use_local_mac = true;
2434 		ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2435 
2436 		DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2437 			 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2438 			 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2439 		DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2440 			 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2441 			 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2442 		DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2443 			 qp_params.remote_mac_addr);
2444 
2445 		qp_params.mtu = qp->mtu;
2446 		qp_params.lb_indication = false;
2447 	}
2448 
2449 	if (!qp_params.mtu) {
2450 		/* Stay with current MTU */
2451 		if (qp->mtu)
2452 			qp_params.mtu = qp->mtu;
2453 		else
2454 			qp_params.mtu =
2455 			    ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2456 	}
2457 
2458 	if (attr_mask & IB_QP_TIMEOUT) {
2459 		SET_FIELD(qp_params.modify_flags,
2460 			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2461 
2462 		/* The received timeout value is an exponent used like this:
2463 		 *    "12.7.34 LOCAL ACK TIMEOUT
2464 		 *    Value representing the transport (ACK) timeout for use by
2465 		 *    the remote, expressed as: 4.096 * 2^timeout [usec]"
2466 		 * The FW expects timeout in msec so we need to divide the usec
2467 		 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2468 		 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2469 		 * The value of zero means infinite so we use a 'max_t' to make
2470 		 * sure that sub 1 msec values will be configured as 1 msec.
2471 		 */
2472 		if (attr->timeout)
2473 			qp_params.ack_timeout =
2474 					1 << max_t(int, attr->timeout - 8, 0);
2475 		else
2476 			qp_params.ack_timeout = 0;
2477 	}
2478 
2479 	if (attr_mask & IB_QP_RETRY_CNT) {
2480 		SET_FIELD(qp_params.modify_flags,
2481 			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2482 		qp_params.retry_cnt = attr->retry_cnt;
2483 	}
2484 
2485 	if (attr_mask & IB_QP_RNR_RETRY) {
2486 		SET_FIELD(qp_params.modify_flags,
2487 			  QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2488 		qp_params.rnr_retry_cnt = attr->rnr_retry;
2489 	}
2490 
2491 	if (attr_mask & IB_QP_RQ_PSN) {
2492 		SET_FIELD(qp_params.modify_flags,
2493 			  QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2494 		qp_params.rq_psn = attr->rq_psn;
2495 		qp->rq_psn = attr->rq_psn;
2496 	}
2497 
2498 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2499 		if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2500 			rc = -EINVAL;
2501 			DP_ERR(dev,
2502 			       "unsupported max_rd_atomic=%d, supported=%d\n",
2503 			       attr->max_rd_atomic,
2504 			       dev->attr.max_qp_req_rd_atomic_resc);
2505 			goto err;
2506 		}
2507 
2508 		SET_FIELD(qp_params.modify_flags,
2509 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2510 		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2511 	}
2512 
2513 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2514 		SET_FIELD(qp_params.modify_flags,
2515 			  QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2516 		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2517 	}
2518 
2519 	if (attr_mask & IB_QP_SQ_PSN) {
2520 		SET_FIELD(qp_params.modify_flags,
2521 			  QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2522 		qp_params.sq_psn = attr->sq_psn;
2523 		qp->sq_psn = attr->sq_psn;
2524 	}
2525 
2526 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2527 		if (attr->max_dest_rd_atomic >
2528 		    dev->attr.max_qp_resp_rd_atomic_resc) {
2529 			DP_ERR(dev,
2530 			       "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2531 			       attr->max_dest_rd_atomic,
2532 			       dev->attr.max_qp_resp_rd_atomic_resc);
2533 
2534 			rc = -EINVAL;
2535 			goto err;
2536 		}
2537 
2538 		SET_FIELD(qp_params.modify_flags,
2539 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2540 		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2541 	}
2542 
2543 	if (attr_mask & IB_QP_DEST_QPN) {
2544 		SET_FIELD(qp_params.modify_flags,
2545 			  QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2546 
2547 		qp_params.dest_qp = attr->dest_qp_num;
2548 		qp->dest_qp_num = attr->dest_qp_num;
2549 	}
2550 
2551 	cur_state = qp->state;
2552 
2553 	/* Update the QP state before the actual ramrod to prevent a race with
2554 	 * fast path. Modifying the QP state to error will cause the device to
2555 	 * flush the CQEs and while polling the flushed CQEs will considered as
2556 	 * a potential issue if the QP isn't in error state.
2557 	 */
2558 	if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2559 	    !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2560 		qp->state = QED_ROCE_QP_STATE_ERR;
2561 
2562 	if (qp->qp_type != IB_QPT_GSI)
2563 		rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2564 					      qp->qed_qp, &qp_params);
2565 
2566 	if (attr_mask & IB_QP_STATE) {
2567 		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2568 			rc = qedr_update_qp_state(dev, qp, cur_state,
2569 						  qp_params.new_state);
2570 		qp->state = qp_params.new_state;
2571 	}
2572 
2573 err:
2574 	return rc;
2575 }
2576 
2577 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2578 {
2579 	int ib_qp_acc_flags = 0;
2580 
2581 	if (params->incoming_rdma_write_en)
2582 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2583 	if (params->incoming_rdma_read_en)
2584 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2585 	if (params->incoming_atomic_en)
2586 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2587 	ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2588 	return ib_qp_acc_flags;
2589 }
2590 
2591 int qedr_query_qp(struct ib_qp *ibqp,
2592 		  struct ib_qp_attr *qp_attr,
2593 		  int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2594 {
2595 	struct qed_rdma_query_qp_out_params params;
2596 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2597 	struct qedr_dev *dev = qp->dev;
2598 	int rc = 0;
2599 
2600 	memset(&params, 0, sizeof(params));
2601 
2602 	rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2603 	if (rc)
2604 		goto err;
2605 
2606 	memset(qp_attr, 0, sizeof(*qp_attr));
2607 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2608 
2609 	qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2610 	qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2611 	qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2612 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
2613 	qp_attr->rq_psn = params.rq_psn;
2614 	qp_attr->sq_psn = params.sq_psn;
2615 	qp_attr->dest_qp_num = params.dest_qp;
2616 
2617 	qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2618 
2619 	qp_attr->cap.max_send_wr = qp->sq.max_wr;
2620 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2621 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
2622 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2623 	qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2624 	qp_init_attr->cap = qp_attr->cap;
2625 
2626 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2627 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2628 			params.flow_label, qp->sgid_idx,
2629 			params.hop_limit_ttl, params.traffic_class_tos);
2630 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2631 	rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2632 	rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2633 	qp_attr->timeout = params.timeout;
2634 	qp_attr->rnr_retry = params.rnr_retry;
2635 	qp_attr->retry_cnt = params.retry_cnt;
2636 	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2637 	qp_attr->pkey_index = params.pkey_index;
2638 	qp_attr->port_num = 1;
2639 	rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2640 	rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2641 	qp_attr->alt_pkey_index = 0;
2642 	qp_attr->alt_port_num = 0;
2643 	qp_attr->alt_timeout = 0;
2644 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2645 
2646 	qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2647 	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2648 	qp_attr->max_rd_atomic = params.max_rd_atomic;
2649 	qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2650 
2651 	DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2652 		 qp_attr->cap.max_inline_data);
2653 
2654 err:
2655 	return rc;
2656 }
2657 
2658 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2659 				  struct ib_udata *udata)
2660 {
2661 	struct qedr_ucontext *ctx =
2662 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2663 					  ibucontext);
2664 	int rc;
2665 
2666 	if (qp->qp_type != IB_QPT_GSI) {
2667 		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2668 		if (rc)
2669 			return rc;
2670 	}
2671 
2672 	if (qp->create_type == QEDR_QP_CREATE_USER)
2673 		qedr_cleanup_user(dev, ctx, qp);
2674 	else
2675 		qedr_cleanup_kernel(dev, qp);
2676 
2677 	return 0;
2678 }
2679 
2680 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2681 {
2682 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2683 	struct qedr_dev *dev = qp->dev;
2684 	struct ib_qp_attr attr;
2685 	int attr_mask = 0;
2686 
2687 	DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2688 		 qp, qp->qp_type);
2689 
2690 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2691 		if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2692 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
2693 		    (qp->state != QED_ROCE_QP_STATE_INIT)) {
2694 
2695 			attr.qp_state = IB_QPS_ERR;
2696 			attr_mask |= IB_QP_STATE;
2697 
2698 			/* Change the QP state to ERROR */
2699 			qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2700 		}
2701 	} else {
2702 		/* If connection establishment started the WAIT_FOR_CONNECT
2703 		 * bit will be on and we need to Wait for the establishment
2704 		 * to complete before destroying the qp.
2705 		 */
2706 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2707 				     &qp->iwarp_cm_flags))
2708 			wait_for_completion(&qp->iwarp_cm_comp);
2709 
2710 		/* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2711 		 * bit will be on, and we need to wait for the disconnect to
2712 		 * complete before continuing. We can use the same completion,
2713 		 * iwarp_cm_comp, since this is the only place that waits for
2714 		 * this completion and it is sequential. In addition,
2715 		 * disconnect can't occur before the connection is fully
2716 		 * established, therefore if WAIT_FOR_DISCONNECT is on it
2717 		 * means WAIT_FOR_CONNECT is also on and the completion for
2718 		 * CONNECT already occurred.
2719 		 */
2720 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2721 				     &qp->iwarp_cm_flags))
2722 			wait_for_completion(&qp->iwarp_cm_comp);
2723 	}
2724 
2725 	if (qp->qp_type == IB_QPT_GSI)
2726 		qedr_destroy_gsi_qp(dev);
2727 
2728 	/* We need to remove the entry from the xarray before we release the
2729 	 * qp_id to avoid a race of the qp_id being reallocated and failing
2730 	 * on xa_insert
2731 	 */
2732 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2733 		xa_erase(&dev->qps, qp->qp_id);
2734 
2735 	qedr_free_qp_resources(dev, qp, udata);
2736 
2737 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2738 		qedr_iw_qp_rem_ref(&qp->ibqp);
2739 
2740 	return 0;
2741 }
2742 
2743 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
2744 		   struct ib_udata *udata)
2745 {
2746 	struct qedr_ah *ah = get_qedr_ah(ibah);
2747 
2748 	rdma_copy_ah_attr(&ah->attr, attr);
2749 
2750 	return 0;
2751 }
2752 
2753 void qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2754 {
2755 	struct qedr_ah *ah = get_qedr_ah(ibah);
2756 
2757 	rdma_destroy_ah_attr(&ah->attr);
2758 }
2759 
2760 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2761 {
2762 	struct qedr_pbl *pbl, *tmp;
2763 
2764 	if (info->pbl_table)
2765 		list_add_tail(&info->pbl_table->list_entry,
2766 			      &info->free_pbl_list);
2767 
2768 	if (!list_empty(&info->inuse_pbl_list))
2769 		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2770 
2771 	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2772 		list_del(&pbl->list_entry);
2773 		qedr_free_pbl(dev, &info->pbl_info, pbl);
2774 	}
2775 }
2776 
2777 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2778 			size_t page_list_len, bool two_layered)
2779 {
2780 	struct qedr_pbl *tmp;
2781 	int rc;
2782 
2783 	INIT_LIST_HEAD(&info->free_pbl_list);
2784 	INIT_LIST_HEAD(&info->inuse_pbl_list);
2785 
2786 	rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2787 				  page_list_len, two_layered);
2788 	if (rc)
2789 		goto done;
2790 
2791 	info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2792 	if (IS_ERR(info->pbl_table)) {
2793 		rc = PTR_ERR(info->pbl_table);
2794 		goto done;
2795 	}
2796 
2797 	DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2798 		 &info->pbl_table->pa);
2799 
2800 	/* in usual case we use 2 PBLs, so we add one to free
2801 	 * list and allocating another one
2802 	 */
2803 	tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2804 	if (IS_ERR(tmp)) {
2805 		DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2806 		goto done;
2807 	}
2808 
2809 	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2810 
2811 	DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2812 
2813 done:
2814 	if (rc)
2815 		free_mr_info(dev, info);
2816 
2817 	return rc;
2818 }
2819 
2820 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2821 			       u64 usr_addr, int acc, struct ib_udata *udata)
2822 {
2823 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2824 	struct qedr_mr *mr;
2825 	struct qedr_pd *pd;
2826 	int rc = -ENOMEM;
2827 
2828 	pd = get_qedr_pd(ibpd);
2829 	DP_DEBUG(dev, QEDR_MSG_MR,
2830 		 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2831 		 pd->pd_id, start, len, usr_addr, acc);
2832 
2833 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2834 		return ERR_PTR(-EINVAL);
2835 
2836 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2837 	if (!mr)
2838 		return ERR_PTR(rc);
2839 
2840 	mr->type = QEDR_MR_USER;
2841 
2842 	mr->umem = ib_umem_get(udata, start, len, acc);
2843 	if (IS_ERR(mr->umem)) {
2844 		rc = -EFAULT;
2845 		goto err0;
2846 	}
2847 
2848 	rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2849 	if (rc)
2850 		goto err1;
2851 
2852 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2853 			   &mr->info.pbl_info, PAGE_SHIFT);
2854 
2855 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2856 	if (rc) {
2857 		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2858 		goto err1;
2859 	}
2860 
2861 	/* Index only, 18 bit long, lkey = itid << 8 | key */
2862 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2863 	mr->hw_mr.key = 0;
2864 	mr->hw_mr.pd = pd->pd_id;
2865 	mr->hw_mr.local_read = 1;
2866 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2867 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2868 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2869 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2870 	mr->hw_mr.mw_bind = false;
2871 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2872 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2873 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2874 	mr->hw_mr.page_size_log = PAGE_SHIFT;
2875 	mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2876 	mr->hw_mr.length = len;
2877 	mr->hw_mr.vaddr = usr_addr;
2878 	mr->hw_mr.zbva = false;
2879 	mr->hw_mr.phy_mr = false;
2880 	mr->hw_mr.dma_mr = false;
2881 
2882 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2883 	if (rc) {
2884 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2885 		goto err2;
2886 	}
2887 
2888 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2889 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2890 	    mr->hw_mr.remote_atomic)
2891 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2892 
2893 	DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2894 		 mr->ibmr.lkey);
2895 	return &mr->ibmr;
2896 
2897 err2:
2898 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2899 err1:
2900 	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2901 err0:
2902 	kfree(mr);
2903 	return ERR_PTR(rc);
2904 }
2905 
2906 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
2907 {
2908 	struct qedr_mr *mr = get_qedr_mr(ib_mr);
2909 	struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2910 	int rc = 0;
2911 
2912 	rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2913 	if (rc)
2914 		return rc;
2915 
2916 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2917 
2918 	if (mr->type != QEDR_MR_DMA)
2919 		free_mr_info(dev, &mr->info);
2920 
2921 	/* it could be user registered memory. */
2922 	ib_umem_release(mr->umem);
2923 
2924 	kfree(mr);
2925 
2926 	return rc;
2927 }
2928 
2929 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2930 				       int max_page_list_len)
2931 {
2932 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2933 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2934 	struct qedr_mr *mr;
2935 	int rc = -ENOMEM;
2936 
2937 	DP_DEBUG(dev, QEDR_MSG_MR,
2938 		 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2939 		 max_page_list_len);
2940 
2941 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2942 	if (!mr)
2943 		return ERR_PTR(rc);
2944 
2945 	mr->dev = dev;
2946 	mr->type = QEDR_MR_FRMR;
2947 
2948 	rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2949 	if (rc)
2950 		goto err0;
2951 
2952 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2953 	if (rc) {
2954 		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2955 		goto err0;
2956 	}
2957 
2958 	/* Index only, 18 bit long, lkey = itid << 8 | key */
2959 	mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2960 	mr->hw_mr.key = 0;
2961 	mr->hw_mr.pd = pd->pd_id;
2962 	mr->hw_mr.local_read = 1;
2963 	mr->hw_mr.local_write = 0;
2964 	mr->hw_mr.remote_read = 0;
2965 	mr->hw_mr.remote_write = 0;
2966 	mr->hw_mr.remote_atomic = 0;
2967 	mr->hw_mr.mw_bind = false;
2968 	mr->hw_mr.pbl_ptr = 0;
2969 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2970 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2971 	mr->hw_mr.fbo = 0;
2972 	mr->hw_mr.length = 0;
2973 	mr->hw_mr.vaddr = 0;
2974 	mr->hw_mr.zbva = false;
2975 	mr->hw_mr.phy_mr = true;
2976 	mr->hw_mr.dma_mr = false;
2977 
2978 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2979 	if (rc) {
2980 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2981 		goto err1;
2982 	}
2983 
2984 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2985 	mr->ibmr.rkey = mr->ibmr.lkey;
2986 
2987 	DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2988 	return mr;
2989 
2990 err1:
2991 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2992 err0:
2993 	kfree(mr);
2994 	return ERR_PTR(rc);
2995 }
2996 
2997 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2998 			    u32 max_num_sg, struct ib_udata *udata)
2999 {
3000 	struct qedr_mr *mr;
3001 
3002 	if (mr_type != IB_MR_TYPE_MEM_REG)
3003 		return ERR_PTR(-EINVAL);
3004 
3005 	mr = __qedr_alloc_mr(ibpd, max_num_sg);
3006 
3007 	if (IS_ERR(mr))
3008 		return ERR_PTR(-EINVAL);
3009 
3010 	return &mr->ibmr;
3011 }
3012 
3013 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3014 {
3015 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3016 	struct qedr_pbl *pbl_table;
3017 	struct regpair *pbe;
3018 	u32 pbes_in_page;
3019 
3020 	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3021 		DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3022 		return -ENOMEM;
3023 	}
3024 
3025 	DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3026 		 mr->npages, addr);
3027 
3028 	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3029 	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3030 	pbe = (struct regpair *)pbl_table->va;
3031 	pbe +=  mr->npages % pbes_in_page;
3032 	pbe->lo = cpu_to_le32((u32)addr);
3033 	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3034 
3035 	mr->npages++;
3036 
3037 	return 0;
3038 }
3039 
3040 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3041 {
3042 	int work = info->completed - info->completed_handled - 1;
3043 
3044 	DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3045 	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3046 		struct qedr_pbl *pbl;
3047 
3048 		/* Free all the page list that are possible to be freed
3049 		 * (all the ones that were invalidated), under the assumption
3050 		 * that if an FMR was completed successfully that means that
3051 		 * if there was an invalidate operation before it also ended
3052 		 */
3053 		pbl = list_first_entry(&info->inuse_pbl_list,
3054 				       struct qedr_pbl, list_entry);
3055 		list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3056 		info->completed_handled++;
3057 	}
3058 }
3059 
3060 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3061 		   int sg_nents, unsigned int *sg_offset)
3062 {
3063 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3064 
3065 	mr->npages = 0;
3066 
3067 	handle_completed_mrs(mr->dev, &mr->info);
3068 	return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3069 }
3070 
3071 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3072 {
3073 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3074 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3075 	struct qedr_mr *mr;
3076 	int rc;
3077 
3078 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3079 	if (!mr)
3080 		return ERR_PTR(-ENOMEM);
3081 
3082 	mr->type = QEDR_MR_DMA;
3083 
3084 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3085 	if (rc) {
3086 		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
3087 		goto err1;
3088 	}
3089 
3090 	/* index only, 18 bit long, lkey = itid << 8 | key */
3091 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3092 	mr->hw_mr.pd = pd->pd_id;
3093 	mr->hw_mr.local_read = 1;
3094 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3095 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3096 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3097 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3098 	mr->hw_mr.dma_mr = true;
3099 
3100 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3101 	if (rc) {
3102 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3103 		goto err2;
3104 	}
3105 
3106 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3107 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3108 	    mr->hw_mr.remote_atomic)
3109 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3110 
3111 	DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3112 	return &mr->ibmr;
3113 
3114 err2:
3115 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3116 err1:
3117 	kfree(mr);
3118 	return ERR_PTR(rc);
3119 }
3120 
3121 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3122 {
3123 	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3124 }
3125 
3126 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3127 {
3128 	int i, len = 0;
3129 
3130 	for (i = 0; i < num_sge; i++)
3131 		len += sg_list[i].length;
3132 
3133 	return len;
3134 }
3135 
3136 static void swap_wqe_data64(u64 *p)
3137 {
3138 	int i;
3139 
3140 	for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3141 		*p = cpu_to_be64(cpu_to_le64(*p));
3142 }
3143 
3144 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3145 				       struct qedr_qp *qp, u8 *wqe_size,
3146 				       const struct ib_send_wr *wr,
3147 				       const struct ib_send_wr **bad_wr,
3148 				       u8 *bits, u8 bit)
3149 {
3150 	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3151 	char *seg_prt, *wqe;
3152 	int i, seg_siz;
3153 
3154 	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3155 		DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3156 		*bad_wr = wr;
3157 		return 0;
3158 	}
3159 
3160 	if (!data_size)
3161 		return data_size;
3162 
3163 	*bits |= bit;
3164 
3165 	seg_prt = NULL;
3166 	wqe = NULL;
3167 	seg_siz = 0;
3168 
3169 	/* Copy data inline */
3170 	for (i = 0; i < wr->num_sge; i++) {
3171 		u32 len = wr->sg_list[i].length;
3172 		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3173 
3174 		while (len > 0) {
3175 			u32 cur;
3176 
3177 			/* New segment required */
3178 			if (!seg_siz) {
3179 				wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3180 				seg_prt = wqe;
3181 				seg_siz = sizeof(struct rdma_sq_common_wqe);
3182 				(*wqe_size)++;
3183 			}
3184 
3185 			/* Calculate currently allowed length */
3186 			cur = min_t(u32, len, seg_siz);
3187 			memcpy(seg_prt, src, cur);
3188 
3189 			/* Update segment variables */
3190 			seg_prt += cur;
3191 			seg_siz -= cur;
3192 
3193 			/* Update sge variables */
3194 			src += cur;
3195 			len -= cur;
3196 
3197 			/* Swap fully-completed segments */
3198 			if (!seg_siz)
3199 				swap_wqe_data64((u64 *)wqe);
3200 		}
3201 	}
3202 
3203 	/* swap last not completed segment */
3204 	if (seg_siz)
3205 		swap_wqe_data64((u64 *)wqe);
3206 
3207 	return data_size;
3208 }
3209 
3210 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
3211 	do {							\
3212 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3213 		(sge)->length = cpu_to_le32(vlength);		\
3214 		(sge)->flags = cpu_to_le32(vflags);		\
3215 	} while (0)
3216 
3217 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
3218 	do {							\
3219 		DMA_REGPAIR_LE(hdr->wr_id, vwr_id);		\
3220 		(hdr)->num_sges = num_sge;			\
3221 	} while (0)
3222 
3223 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
3224 	do {							\
3225 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3226 		(sge)->length = cpu_to_le32(vlength);		\
3227 		(sge)->l_key = cpu_to_le32(vlkey);		\
3228 	} while (0)
3229 
3230 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3231 				const struct ib_send_wr *wr)
3232 {
3233 	u32 data_size = 0;
3234 	int i;
3235 
3236 	for (i = 0; i < wr->num_sge; i++) {
3237 		struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3238 
3239 		DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3240 		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3241 		sge->length = cpu_to_le32(wr->sg_list[i].length);
3242 		data_size += wr->sg_list[i].length;
3243 	}
3244 
3245 	if (wqe_size)
3246 		*wqe_size += wr->num_sge;
3247 
3248 	return data_size;
3249 }
3250 
3251 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3252 				     struct qedr_qp *qp,
3253 				     struct rdma_sq_rdma_wqe_1st *rwqe,
3254 				     struct rdma_sq_rdma_wqe_2nd *rwqe2,
3255 				     const struct ib_send_wr *wr,
3256 				     const struct ib_send_wr **bad_wr)
3257 {
3258 	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3259 	DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3260 
3261 	if (wr->send_flags & IB_SEND_INLINE &&
3262 	    (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3263 	     wr->opcode == IB_WR_RDMA_WRITE)) {
3264 		u8 flags = 0;
3265 
3266 		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3267 		return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3268 						   bad_wr, &rwqe->flags, flags);
3269 	}
3270 
3271 	return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3272 }
3273 
3274 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3275 				     struct qedr_qp *qp,
3276 				     struct rdma_sq_send_wqe_1st *swqe,
3277 				     struct rdma_sq_send_wqe_2st *swqe2,
3278 				     const struct ib_send_wr *wr,
3279 				     const struct ib_send_wr **bad_wr)
3280 {
3281 	memset(swqe2, 0, sizeof(*swqe2));
3282 	if (wr->send_flags & IB_SEND_INLINE) {
3283 		u8 flags = 0;
3284 
3285 		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3286 		return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3287 						   bad_wr, &swqe->flags, flags);
3288 	}
3289 
3290 	return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3291 }
3292 
3293 static int qedr_prepare_reg(struct qedr_qp *qp,
3294 			    struct rdma_sq_fmr_wqe_1st *fwqe1,
3295 			    const struct ib_reg_wr *wr)
3296 {
3297 	struct qedr_mr *mr = get_qedr_mr(wr->mr);
3298 	struct rdma_sq_fmr_wqe_2nd *fwqe2;
3299 
3300 	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3301 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3302 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3303 	fwqe1->l_key = wr->key;
3304 
3305 	fwqe2->access_ctrl = 0;
3306 
3307 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3308 		   !!(wr->access & IB_ACCESS_REMOTE_READ));
3309 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3310 		   !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3311 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3312 		   !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3313 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3314 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3315 		   !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3316 	fwqe2->fmr_ctrl = 0;
3317 
3318 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3319 		   ilog2(mr->ibmr.page_size) - 12);
3320 
3321 	fwqe2->length_hi = 0;
3322 	fwqe2->length_lo = mr->ibmr.length;
3323 	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3324 	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3325 
3326 	qp->wqe_wr_id[qp->sq.prod].mr = mr;
3327 
3328 	return 0;
3329 }
3330 
3331 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3332 {
3333 	switch (opcode) {
3334 	case IB_WR_RDMA_WRITE:
3335 	case IB_WR_RDMA_WRITE_WITH_IMM:
3336 		return IB_WC_RDMA_WRITE;
3337 	case IB_WR_SEND_WITH_IMM:
3338 	case IB_WR_SEND:
3339 	case IB_WR_SEND_WITH_INV:
3340 		return IB_WC_SEND;
3341 	case IB_WR_RDMA_READ:
3342 	case IB_WR_RDMA_READ_WITH_INV:
3343 		return IB_WC_RDMA_READ;
3344 	case IB_WR_ATOMIC_CMP_AND_SWP:
3345 		return IB_WC_COMP_SWAP;
3346 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3347 		return IB_WC_FETCH_ADD;
3348 	case IB_WR_REG_MR:
3349 		return IB_WC_REG_MR;
3350 	case IB_WR_LOCAL_INV:
3351 		return IB_WC_LOCAL_INV;
3352 	default:
3353 		return IB_WC_SEND;
3354 	}
3355 }
3356 
3357 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3358 				      const struct ib_send_wr *wr)
3359 {
3360 	int wq_is_full, err_wr, pbl_is_full;
3361 	struct qedr_dev *dev = qp->dev;
3362 
3363 	/* prevent SQ overflow and/or processing of a bad WR */
3364 	err_wr = wr->num_sge > qp->sq.max_sges;
3365 	wq_is_full = qedr_wq_is_full(&qp->sq);
3366 	pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3367 		      QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3368 	if (wq_is_full || err_wr || pbl_is_full) {
3369 		if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3370 			DP_ERR(dev,
3371 			       "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3372 			       qp);
3373 			qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3374 		}
3375 
3376 		if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3377 			DP_ERR(dev,
3378 			       "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3379 			       qp);
3380 			qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3381 		}
3382 
3383 		if (pbl_is_full &&
3384 		    !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3385 			DP_ERR(dev,
3386 			       "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3387 			       qp);
3388 			qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3389 		}
3390 		return false;
3391 	}
3392 	return true;
3393 }
3394 
3395 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3396 			    const struct ib_send_wr **bad_wr)
3397 {
3398 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3399 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3400 	struct rdma_sq_atomic_wqe_1st *awqe1;
3401 	struct rdma_sq_atomic_wqe_2nd *awqe2;
3402 	struct rdma_sq_atomic_wqe_3rd *awqe3;
3403 	struct rdma_sq_send_wqe_2st *swqe2;
3404 	struct rdma_sq_local_inv_wqe *iwqe;
3405 	struct rdma_sq_rdma_wqe_2nd *rwqe2;
3406 	struct rdma_sq_send_wqe_1st *swqe;
3407 	struct rdma_sq_rdma_wqe_1st *rwqe;
3408 	struct rdma_sq_fmr_wqe_1st *fwqe1;
3409 	struct rdma_sq_common_wqe *wqe;
3410 	u32 length;
3411 	int rc = 0;
3412 	bool comp;
3413 
3414 	if (!qedr_can_post_send(qp, wr)) {
3415 		*bad_wr = wr;
3416 		return -ENOMEM;
3417 	}
3418 
3419 	wqe = qed_chain_produce(&qp->sq.pbl);
3420 	qp->wqe_wr_id[qp->sq.prod].signaled =
3421 		!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3422 
3423 	wqe->flags = 0;
3424 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3425 		   !!(wr->send_flags & IB_SEND_SOLICITED));
3426 	comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3427 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3428 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3429 		   !!(wr->send_flags & IB_SEND_FENCE));
3430 	wqe->prev_wqe_size = qp->prev_wqe_size;
3431 
3432 	qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3433 
3434 	switch (wr->opcode) {
3435 	case IB_WR_SEND_WITH_IMM:
3436 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3437 			rc = -EINVAL;
3438 			*bad_wr = wr;
3439 			break;
3440 		}
3441 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3442 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3443 		swqe->wqe_size = 2;
3444 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3445 
3446 		swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3447 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3448 						   wr, bad_wr);
3449 		swqe->length = cpu_to_le32(length);
3450 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3451 		qp->prev_wqe_size = swqe->wqe_size;
3452 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3453 		break;
3454 	case IB_WR_SEND:
3455 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3456 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3457 
3458 		swqe->wqe_size = 2;
3459 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3460 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3461 						   wr, bad_wr);
3462 		swqe->length = cpu_to_le32(length);
3463 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3464 		qp->prev_wqe_size = swqe->wqe_size;
3465 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3466 		break;
3467 	case IB_WR_SEND_WITH_INV:
3468 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3469 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3470 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3471 		swqe->wqe_size = 2;
3472 		swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3473 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3474 						   wr, bad_wr);
3475 		swqe->length = cpu_to_le32(length);
3476 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3477 		qp->prev_wqe_size = swqe->wqe_size;
3478 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3479 		break;
3480 
3481 	case IB_WR_RDMA_WRITE_WITH_IMM:
3482 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3483 			rc = -EINVAL;
3484 			*bad_wr = wr;
3485 			break;
3486 		}
3487 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3488 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3489 
3490 		rwqe->wqe_size = 2;
3491 		rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3492 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3493 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3494 						   wr, bad_wr);
3495 		rwqe->length = cpu_to_le32(length);
3496 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3497 		qp->prev_wqe_size = rwqe->wqe_size;
3498 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3499 		break;
3500 	case IB_WR_RDMA_WRITE:
3501 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3502 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3503 
3504 		rwqe->wqe_size = 2;
3505 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3506 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3507 						   wr, bad_wr);
3508 		rwqe->length = cpu_to_le32(length);
3509 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3510 		qp->prev_wqe_size = rwqe->wqe_size;
3511 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3512 		break;
3513 	case IB_WR_RDMA_READ_WITH_INV:
3514 		SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3515 		/* fallthrough -- same is identical to RDMA READ */
3516 
3517 	case IB_WR_RDMA_READ:
3518 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3519 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3520 
3521 		rwqe->wqe_size = 2;
3522 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3523 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3524 						   wr, bad_wr);
3525 		rwqe->length = cpu_to_le32(length);
3526 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3527 		qp->prev_wqe_size = rwqe->wqe_size;
3528 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3529 		break;
3530 
3531 	case IB_WR_ATOMIC_CMP_AND_SWP:
3532 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3533 		awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3534 		awqe1->wqe_size = 4;
3535 
3536 		awqe2 = qed_chain_produce(&qp->sq.pbl);
3537 		DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3538 		awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3539 
3540 		awqe3 = qed_chain_produce(&qp->sq.pbl);
3541 
3542 		if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3543 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3544 			DMA_REGPAIR_LE(awqe3->swap_data,
3545 				       atomic_wr(wr)->compare_add);
3546 		} else {
3547 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3548 			DMA_REGPAIR_LE(awqe3->swap_data,
3549 				       atomic_wr(wr)->swap);
3550 			DMA_REGPAIR_LE(awqe3->cmp_data,
3551 				       atomic_wr(wr)->compare_add);
3552 		}
3553 
3554 		qedr_prepare_sq_sges(qp, NULL, wr);
3555 
3556 		qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3557 		qp->prev_wqe_size = awqe1->wqe_size;
3558 		break;
3559 
3560 	case IB_WR_LOCAL_INV:
3561 		iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3562 		iwqe->wqe_size = 1;
3563 
3564 		iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3565 		iwqe->inv_l_key = wr->ex.invalidate_rkey;
3566 		qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3567 		qp->prev_wqe_size = iwqe->wqe_size;
3568 		break;
3569 	case IB_WR_REG_MR:
3570 		DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3571 		wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3572 		fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3573 		fwqe1->wqe_size = 2;
3574 
3575 		rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3576 		if (rc) {
3577 			DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3578 			*bad_wr = wr;
3579 			break;
3580 		}
3581 
3582 		qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3583 		qp->prev_wqe_size = fwqe1->wqe_size;
3584 		break;
3585 	default:
3586 		DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3587 		rc = -EINVAL;
3588 		*bad_wr = wr;
3589 		break;
3590 	}
3591 
3592 	if (*bad_wr) {
3593 		u16 value;
3594 
3595 		/* Restore prod to its position before
3596 		 * this WR was processed
3597 		 */
3598 		value = le16_to_cpu(qp->sq.db_data.data.value);
3599 		qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3600 
3601 		/* Restore prev_wqe_size */
3602 		qp->prev_wqe_size = wqe->prev_wqe_size;
3603 		rc = -EINVAL;
3604 		DP_ERR(dev, "POST SEND FAILED\n");
3605 	}
3606 
3607 	return rc;
3608 }
3609 
3610 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3611 		   const struct ib_send_wr **bad_wr)
3612 {
3613 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3614 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3615 	unsigned long flags;
3616 	int rc = 0;
3617 
3618 	*bad_wr = NULL;
3619 
3620 	if (qp->qp_type == IB_QPT_GSI)
3621 		return qedr_gsi_post_send(ibqp, wr, bad_wr);
3622 
3623 	spin_lock_irqsave(&qp->q_lock, flags);
3624 
3625 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
3626 		if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3627 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
3628 		    (qp->state != QED_ROCE_QP_STATE_SQD)) {
3629 			spin_unlock_irqrestore(&qp->q_lock, flags);
3630 			*bad_wr = wr;
3631 			DP_DEBUG(dev, QEDR_MSG_CQ,
3632 				 "QP in wrong state! QP icid=0x%x state %d\n",
3633 				 qp->icid, qp->state);
3634 			return -EINVAL;
3635 		}
3636 	}
3637 
3638 	while (wr) {
3639 		rc = __qedr_post_send(ibqp, wr, bad_wr);
3640 		if (rc)
3641 			break;
3642 
3643 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3644 
3645 		qedr_inc_sw_prod(&qp->sq);
3646 
3647 		qp->sq.db_data.data.value++;
3648 
3649 		wr = wr->next;
3650 	}
3651 
3652 	/* Trigger doorbell
3653 	 * If there was a failure in the first WR then it will be triggered in
3654 	 * vane. However this is not harmful (as long as the producer value is
3655 	 * unchanged). For performance reasons we avoid checking for this
3656 	 * redundant doorbell.
3657 	 *
3658 	 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3659 	 * soon as we give the doorbell, we could get a completion
3660 	 * for this wr, therefore we need to make sure that the
3661 	 * memory is updated before giving the doorbell.
3662 	 * During qedr_poll_cq, rmb is called before accessing the
3663 	 * cqe. This covers for the smp_rmb as well.
3664 	 */
3665 	smp_wmb();
3666 	writel(qp->sq.db_data.raw, qp->sq.db);
3667 
3668 	spin_unlock_irqrestore(&qp->q_lock, flags);
3669 
3670 	return rc;
3671 }
3672 
3673 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3674 {
3675 	u32 used;
3676 
3677 	/* Calculate number of elements used based on producer
3678 	 * count and consumer count and subtract it from max
3679 	 * work request supported so that we get elements left.
3680 	 */
3681 	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
3682 
3683 	return hw_srq->max_wr - used;
3684 }
3685 
3686 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3687 		       const struct ib_recv_wr **bad_wr)
3688 {
3689 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
3690 	struct qedr_srq_hwq_info *hw_srq;
3691 	struct qedr_dev *dev = srq->dev;
3692 	struct qed_chain *pbl;
3693 	unsigned long flags;
3694 	int status = 0;
3695 	u32 num_sge;
3696 	u32 offset;
3697 
3698 	spin_lock_irqsave(&srq->lock, flags);
3699 
3700 	hw_srq = &srq->hw_srq;
3701 	pbl = &srq->hw_srq.pbl;
3702 	while (wr) {
3703 		struct rdma_srq_wqe_header *hdr;
3704 		int i;
3705 
3706 		if (!qedr_srq_elem_left(hw_srq) ||
3707 		    wr->num_sge > srq->hw_srq.max_sges) {
3708 			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
3709 			       hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
3710 			       wr->num_sge, srq->hw_srq.max_sges);
3711 			status = -ENOMEM;
3712 			*bad_wr = wr;
3713 			break;
3714 		}
3715 
3716 		hdr = qed_chain_produce(pbl);
3717 		num_sge = wr->num_sge;
3718 		/* Set number of sge and work request id in header */
3719 		SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3720 
3721 		srq->hw_srq.wr_prod_cnt++;
3722 		hw_srq->wqe_prod++;
3723 		hw_srq->sge_prod++;
3724 
3725 		DP_DEBUG(dev, QEDR_MSG_SRQ,
3726 			 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3727 			 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3728 
3729 		for (i = 0; i < wr->num_sge; i++) {
3730 			struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3731 
3732 			/* Set SGE length, lkey and address */
3733 			SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3734 				    wr->sg_list[i].length, wr->sg_list[i].lkey);
3735 
3736 			DP_DEBUG(dev, QEDR_MSG_SRQ,
3737 				 "[%d]: len %d key %x addr %x:%x\n",
3738 				 i, srq_sge->length, srq_sge->l_key,
3739 				 srq_sge->addr.hi, srq_sge->addr.lo);
3740 			hw_srq->sge_prod++;
3741 		}
3742 
3743 		/* Flush WQE and SGE information before
3744 		 * updating producer.
3745 		 */
3746 		wmb();
3747 
3748 		/* SRQ producer is 8 bytes. Need to update SGE producer index
3749 		 * in first 4 bytes and need to update WQE producer in
3750 		 * next 4 bytes.
3751 		 */
3752 		*srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
3753 		offset = offsetof(struct rdma_srq_producers, wqe_prod);
3754 		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
3755 			hw_srq->wqe_prod;
3756 
3757 		/* Flush producer after updating it. */
3758 		wmb();
3759 		wr = wr->next;
3760 	}
3761 
3762 	DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3763 		 qed_chain_get_elem_left(pbl));
3764 	spin_unlock_irqrestore(&srq->lock, flags);
3765 
3766 	return status;
3767 }
3768 
3769 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3770 		   const struct ib_recv_wr **bad_wr)
3771 {
3772 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3773 	struct qedr_dev *dev = qp->dev;
3774 	unsigned long flags;
3775 	int status = 0;
3776 
3777 	if (qp->qp_type == IB_QPT_GSI)
3778 		return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3779 
3780 	spin_lock_irqsave(&qp->q_lock, flags);
3781 
3782 	if (qp->state == QED_ROCE_QP_STATE_RESET) {
3783 		spin_unlock_irqrestore(&qp->q_lock, flags);
3784 		*bad_wr = wr;
3785 		return -EINVAL;
3786 	}
3787 
3788 	while (wr) {
3789 		int i;
3790 
3791 		if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3792 		    QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3793 		    wr->num_sge > qp->rq.max_sges) {
3794 			DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3795 			       qed_chain_get_elem_left_u32(&qp->rq.pbl),
3796 			       QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3797 			       qp->rq.max_sges);
3798 			status = -ENOMEM;
3799 			*bad_wr = wr;
3800 			break;
3801 		}
3802 		for (i = 0; i < wr->num_sge; i++) {
3803 			u32 flags = 0;
3804 			struct rdma_rq_sge *rqe =
3805 			    qed_chain_produce(&qp->rq.pbl);
3806 
3807 			/* First one must include the number
3808 			 * of SGE in the list
3809 			 */
3810 			if (!i)
3811 				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3812 					  wr->num_sge);
3813 
3814 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3815 				  wr->sg_list[i].lkey);
3816 
3817 			RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3818 				   wr->sg_list[i].length, flags);
3819 		}
3820 
3821 		/* Special case of no sges. FW requires between 1-4 sges...
3822 		 * in this case we need to post 1 sge with length zero. this is
3823 		 * because rdma write with immediate consumes an RQ.
3824 		 */
3825 		if (!wr->num_sge) {
3826 			u32 flags = 0;
3827 			struct rdma_rq_sge *rqe =
3828 			    qed_chain_produce(&qp->rq.pbl);
3829 
3830 			/* First one must include the number
3831 			 * of SGE in the list
3832 			 */
3833 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3834 			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3835 
3836 			RQ_SGE_SET(rqe, 0, 0, flags);
3837 			i = 1;
3838 		}
3839 
3840 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3841 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3842 
3843 		qedr_inc_sw_prod(&qp->rq);
3844 
3845 		/* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3846 		 * soon as we give the doorbell, we could get a completion
3847 		 * for this wr, therefore we need to make sure that the
3848 		 * memory is update before giving the doorbell.
3849 		 * During qedr_poll_cq, rmb is called before accessing the
3850 		 * cqe. This covers for the smp_rmb as well.
3851 		 */
3852 		smp_wmb();
3853 
3854 		qp->rq.db_data.data.value++;
3855 
3856 		writel(qp->rq.db_data.raw, qp->rq.db);
3857 
3858 		if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3859 			writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3860 		}
3861 
3862 		wr = wr->next;
3863 	}
3864 
3865 	spin_unlock_irqrestore(&qp->q_lock, flags);
3866 
3867 	return status;
3868 }
3869 
3870 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3871 {
3872 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
3873 
3874 	return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3875 		cq->pbl_toggle;
3876 }
3877 
3878 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3879 {
3880 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
3881 	struct qedr_qp *qp;
3882 
3883 	qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3884 						   resp_cqe->qp_handle.lo,
3885 						   u64);
3886 	return qp;
3887 }
3888 
3889 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3890 {
3891 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
3892 
3893 	return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3894 }
3895 
3896 /* Return latest CQE (needs processing) */
3897 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3898 {
3899 	return cq->latest_cqe;
3900 }
3901 
3902 /* In fmr we need to increase the number of fmr completed counter for the fmr
3903  * algorithm determining whether we can free a pbl or not.
3904  * we need to perform this whether the work request was signaled or not. for
3905  * this purpose we call this function from the condition that checks if a wr
3906  * should be skipped, to make sure we don't miss it ( possibly this fmr
3907  * operation was not signalted)
3908  */
3909 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3910 {
3911 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3912 		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3913 }
3914 
3915 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3916 		       struct qedr_cq *cq, int num_entries,
3917 		       struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3918 		       int force)
3919 {
3920 	u16 cnt = 0;
3921 
3922 	while (num_entries && qp->sq.wqe_cons != hw_cons) {
3923 		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3924 			qedr_chk_if_fmr(qp);
3925 			/* skip WC */
3926 			goto next_cqe;
3927 		}
3928 
3929 		/* fill WC */
3930 		wc->status = status;
3931 		wc->vendor_err = 0;
3932 		wc->wc_flags = 0;
3933 		wc->src_qp = qp->id;
3934 		wc->qp = &qp->ibqp;
3935 
3936 		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3937 		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3938 
3939 		switch (wc->opcode) {
3940 		case IB_WC_RDMA_WRITE:
3941 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3942 			break;
3943 		case IB_WC_COMP_SWAP:
3944 		case IB_WC_FETCH_ADD:
3945 			wc->byte_len = 8;
3946 			break;
3947 		case IB_WC_REG_MR:
3948 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3949 			break;
3950 		case IB_WC_RDMA_READ:
3951 		case IB_WC_SEND:
3952 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3953 			break;
3954 		default:
3955 			break;
3956 		}
3957 
3958 		num_entries--;
3959 		wc++;
3960 		cnt++;
3961 next_cqe:
3962 		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3963 			qed_chain_consume(&qp->sq.pbl);
3964 		qedr_inc_sw_cons(&qp->sq);
3965 	}
3966 
3967 	return cnt;
3968 }
3969 
3970 static int qedr_poll_cq_req(struct qedr_dev *dev,
3971 			    struct qedr_qp *qp, struct qedr_cq *cq,
3972 			    int num_entries, struct ib_wc *wc,
3973 			    struct rdma_cqe_requester *req)
3974 {
3975 	int cnt = 0;
3976 
3977 	switch (req->status) {
3978 	case RDMA_CQE_REQ_STS_OK:
3979 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3980 				  IB_WC_SUCCESS, 0);
3981 		break;
3982 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3983 		if (qp->state != QED_ROCE_QP_STATE_ERR)
3984 			DP_DEBUG(dev, QEDR_MSG_CQ,
3985 				 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3986 				 cq->icid, qp->icid);
3987 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3988 				  IB_WC_WR_FLUSH_ERR, 1);
3989 		break;
3990 	default:
3991 		/* process all WQE before the cosumer */
3992 		qp->state = QED_ROCE_QP_STATE_ERR;
3993 		cnt = process_req(dev, qp, cq, num_entries, wc,
3994 				  req->sq_cons - 1, IB_WC_SUCCESS, 0);
3995 		wc += cnt;
3996 		/* if we have extra WC fill it with actual error info */
3997 		if (cnt < num_entries) {
3998 			enum ib_wc_status wc_status;
3999 
4000 			switch (req->status) {
4001 			case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4002 				DP_ERR(dev,
4003 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4004 				       cq->icid, qp->icid);
4005 				wc_status = IB_WC_BAD_RESP_ERR;
4006 				break;
4007 			case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4008 				DP_ERR(dev,
4009 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4010 				       cq->icid, qp->icid);
4011 				wc_status = IB_WC_LOC_LEN_ERR;
4012 				break;
4013 			case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4014 				DP_ERR(dev,
4015 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4016 				       cq->icid, qp->icid);
4017 				wc_status = IB_WC_LOC_QP_OP_ERR;
4018 				break;
4019 			case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4020 				DP_ERR(dev,
4021 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4022 				       cq->icid, qp->icid);
4023 				wc_status = IB_WC_LOC_PROT_ERR;
4024 				break;
4025 			case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4026 				DP_ERR(dev,
4027 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4028 				       cq->icid, qp->icid);
4029 				wc_status = IB_WC_MW_BIND_ERR;
4030 				break;
4031 			case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4032 				DP_ERR(dev,
4033 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4034 				       cq->icid, qp->icid);
4035 				wc_status = IB_WC_REM_INV_REQ_ERR;
4036 				break;
4037 			case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4038 				DP_ERR(dev,
4039 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4040 				       cq->icid, qp->icid);
4041 				wc_status = IB_WC_REM_ACCESS_ERR;
4042 				break;
4043 			case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4044 				DP_ERR(dev,
4045 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4046 				       cq->icid, qp->icid);
4047 				wc_status = IB_WC_REM_OP_ERR;
4048 				break;
4049 			case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4050 				DP_ERR(dev,
4051 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4052 				       cq->icid, qp->icid);
4053 				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4054 				break;
4055 			case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4056 				DP_ERR(dev,
4057 				       "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4058 				       cq->icid, qp->icid);
4059 				wc_status = IB_WC_RETRY_EXC_ERR;
4060 				break;
4061 			default:
4062 				DP_ERR(dev,
4063 				       "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4064 				       cq->icid, qp->icid);
4065 				wc_status = IB_WC_GENERAL_ERR;
4066 			}
4067 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4068 					   wc_status, 1);
4069 		}
4070 	}
4071 
4072 	return cnt;
4073 }
4074 
4075 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4076 {
4077 	switch (status) {
4078 	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4079 		return IB_WC_LOC_ACCESS_ERR;
4080 	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4081 		return IB_WC_LOC_LEN_ERR;
4082 	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4083 		return IB_WC_LOC_QP_OP_ERR;
4084 	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4085 		return IB_WC_LOC_PROT_ERR;
4086 	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4087 		return IB_WC_MW_BIND_ERR;
4088 	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4089 		return IB_WC_REM_INV_RD_REQ_ERR;
4090 	case RDMA_CQE_RESP_STS_OK:
4091 		return IB_WC_SUCCESS;
4092 	default:
4093 		return IB_WC_GENERAL_ERR;
4094 	}
4095 }
4096 
4097 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4098 					  struct ib_wc *wc)
4099 {
4100 	wc->status = IB_WC_SUCCESS;
4101 	wc->byte_len = le32_to_cpu(resp->length);
4102 
4103 	if (resp->flags & QEDR_RESP_IMM) {
4104 		wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4105 		wc->wc_flags |= IB_WC_WITH_IMM;
4106 
4107 		if (resp->flags & QEDR_RESP_RDMA)
4108 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4109 
4110 		if (resp->flags & QEDR_RESP_INV)
4111 			return -EINVAL;
4112 
4113 	} else if (resp->flags & QEDR_RESP_INV) {
4114 		wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4115 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4116 
4117 		if (resp->flags & QEDR_RESP_RDMA)
4118 			return -EINVAL;
4119 
4120 	} else if (resp->flags & QEDR_RESP_RDMA) {
4121 		return -EINVAL;
4122 	}
4123 
4124 	return 0;
4125 }
4126 
4127 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4128 			       struct qedr_cq *cq, struct ib_wc *wc,
4129 			       struct rdma_cqe_responder *resp, u64 wr_id)
4130 {
4131 	/* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4132 	wc->opcode = IB_WC_RECV;
4133 	wc->wc_flags = 0;
4134 
4135 	if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4136 		if (qedr_set_ok_cqe_resp_wc(resp, wc))
4137 			DP_ERR(dev,
4138 			       "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4139 			       cq, cq->icid, resp->flags);
4140 
4141 	} else {
4142 		wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4143 		if (wc->status == IB_WC_GENERAL_ERR)
4144 			DP_ERR(dev,
4145 			       "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4146 			       cq, cq->icid, resp->status);
4147 	}
4148 
4149 	/* Fill the rest of the WC */
4150 	wc->vendor_err = 0;
4151 	wc->src_qp = qp->id;
4152 	wc->qp = &qp->ibqp;
4153 	wc->wr_id = wr_id;
4154 }
4155 
4156 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4157 				struct qedr_cq *cq, struct ib_wc *wc,
4158 				struct rdma_cqe_responder *resp)
4159 {
4160 	struct qedr_srq *srq = qp->srq;
4161 	u64 wr_id;
4162 
4163 	wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4164 			 le32_to_cpu(resp->srq_wr_id.lo), u64);
4165 
4166 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4167 		wc->status = IB_WC_WR_FLUSH_ERR;
4168 		wc->vendor_err = 0;
4169 		wc->wr_id = wr_id;
4170 		wc->byte_len = 0;
4171 		wc->src_qp = qp->id;
4172 		wc->qp = &qp->ibqp;
4173 		wc->wr_id = wr_id;
4174 	} else {
4175 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4176 	}
4177 	srq->hw_srq.wr_cons_cnt++;
4178 
4179 	return 1;
4180 }
4181 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4182 			    struct qedr_cq *cq, struct ib_wc *wc,
4183 			    struct rdma_cqe_responder *resp)
4184 {
4185 	u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4186 
4187 	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4188 
4189 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4190 		qed_chain_consume(&qp->rq.pbl);
4191 	qedr_inc_sw_cons(&qp->rq);
4192 
4193 	return 1;
4194 }
4195 
4196 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4197 			      int num_entries, struct ib_wc *wc, u16 hw_cons)
4198 {
4199 	u16 cnt = 0;
4200 
4201 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
4202 		/* fill WC */
4203 		wc->status = IB_WC_WR_FLUSH_ERR;
4204 		wc->vendor_err = 0;
4205 		wc->wc_flags = 0;
4206 		wc->src_qp = qp->id;
4207 		wc->byte_len = 0;
4208 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4209 		wc->qp = &qp->ibqp;
4210 		num_entries--;
4211 		wc++;
4212 		cnt++;
4213 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4214 			qed_chain_consume(&qp->rq.pbl);
4215 		qedr_inc_sw_cons(&qp->rq);
4216 	}
4217 
4218 	return cnt;
4219 }
4220 
4221 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4222 				 struct rdma_cqe_responder *resp, int *update)
4223 {
4224 	if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4225 		consume_cqe(cq);
4226 		*update |= 1;
4227 	}
4228 }
4229 
4230 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4231 				 struct qedr_cq *cq, int num_entries,
4232 				 struct ib_wc *wc,
4233 				 struct rdma_cqe_responder *resp)
4234 {
4235 	int cnt;
4236 
4237 	cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4238 	consume_cqe(cq);
4239 
4240 	return cnt;
4241 }
4242 
4243 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4244 			     struct qedr_cq *cq, int num_entries,
4245 			     struct ib_wc *wc, struct rdma_cqe_responder *resp,
4246 			     int *update)
4247 {
4248 	int cnt;
4249 
4250 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4251 		cnt = process_resp_flush(qp, cq, num_entries, wc,
4252 					 resp->rq_cons_or_srq_id);
4253 		try_consume_resp_cqe(cq, qp, resp, update);
4254 	} else {
4255 		cnt = process_resp_one(dev, qp, cq, wc, resp);
4256 		consume_cqe(cq);
4257 		*update |= 1;
4258 	}
4259 
4260 	return cnt;
4261 }
4262 
4263 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4264 				struct rdma_cqe_requester *req, int *update)
4265 {
4266 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4267 		consume_cqe(cq);
4268 		*update |= 1;
4269 	}
4270 }
4271 
4272 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4273 {
4274 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4275 	struct qedr_cq *cq = get_qedr_cq(ibcq);
4276 	union rdma_cqe *cqe;
4277 	u32 old_cons, new_cons;
4278 	unsigned long flags;
4279 	int update = 0;
4280 	int done = 0;
4281 
4282 	if (cq->destroyed) {
4283 		DP_ERR(dev,
4284 		       "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4285 		       cq, cq->icid);
4286 		return 0;
4287 	}
4288 
4289 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4290 		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4291 
4292 	spin_lock_irqsave(&cq->cq_lock, flags);
4293 	cqe = cq->latest_cqe;
4294 	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4295 	while (num_entries && is_valid_cqe(cq, cqe)) {
4296 		struct qedr_qp *qp;
4297 		int cnt = 0;
4298 
4299 		/* prevent speculative reads of any field of CQE */
4300 		rmb();
4301 
4302 		qp = cqe_get_qp(cqe);
4303 		if (!qp) {
4304 			WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4305 			break;
4306 		}
4307 
4308 		wc->qp = &qp->ibqp;
4309 
4310 		switch (cqe_get_type(cqe)) {
4311 		case RDMA_CQE_TYPE_REQUESTER:
4312 			cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4313 					       &cqe->req);
4314 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
4315 			break;
4316 		case RDMA_CQE_TYPE_RESPONDER_RQ:
4317 			cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4318 						&cqe->resp, &update);
4319 			break;
4320 		case RDMA_CQE_TYPE_RESPONDER_SRQ:
4321 			cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4322 						    wc, &cqe->resp);
4323 			update = 1;
4324 			break;
4325 		case RDMA_CQE_TYPE_INVALID:
4326 		default:
4327 			DP_ERR(dev, "Error: invalid CQE type = %d\n",
4328 			       cqe_get_type(cqe));
4329 		}
4330 		num_entries -= cnt;
4331 		wc += cnt;
4332 		done += cnt;
4333 
4334 		cqe = get_cqe(cq);
4335 	}
4336 	new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4337 
4338 	cq->cq_cons += new_cons - old_cons;
4339 
4340 	if (update)
4341 		/* doorbell notifies abount latest VALID entry,
4342 		 * but chain already point to the next INVALID one
4343 		 */
4344 		doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4345 
4346 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4347 	return done;
4348 }
4349 
4350 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4351 		     u8 port_num, const struct ib_wc *in_wc,
4352 		     const struct ib_grh *in_grh, const struct ib_mad *in,
4353 		     struct ib_mad *out_mad, size_t *out_mad_size,
4354 		     u16 *out_mad_pkey_index)
4355 {
4356 	return IB_MAD_RESULT_SUCCESS;
4357 }
4358