1 /*******************************************************************
2  * This file is part of the Emulex RoCE Device Driver for          *
3  * RoCE (RDMA over Converged Ethernet) adapters.                   *
4  * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *
20  * Contact Information:
21  * linux-drivers@emulex.com
22  *
23  * Emulex
24  * 3333 Susan Street
25  * Costa Mesa, CA 92626
26  *******************************************************************/
27 
28 #include <linux/dma-mapping.h>
29 #include <rdma/ib_verbs.h>
30 #include <rdma/ib_user_verbs.h>
31 #include <rdma/iw_cm.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_addr.h>
34 
35 #include "ocrdma.h"
36 #include "ocrdma_hw.h"
37 #include "ocrdma_verbs.h"
38 #include "ocrdma_abi.h"
39 
40 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41 {
42 	if (index > 1)
43 		return -EINVAL;
44 
45 	*pkey = 0xffff;
46 	return 0;
47 }
48 
49 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 		     int index, union ib_gid *sgid)
51 {
52 	struct ocrdma_dev *dev;
53 
54 	dev = get_ocrdma_dev(ibdev);
55 	memset(sgid, 0, sizeof(*sgid));
56 	if (index >= OCRDMA_MAX_SGID)
57 		return -EINVAL;
58 
59 	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60 
61 	return 0;
62 }
63 
64 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65 {
66 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67 
68 	memset(attr, 0, sizeof *attr);
69 	memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 	       min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 	ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 	attr->max_mr_size = ~0ull;
73 	attr->page_size_cap = 0xffff000;
74 	attr->vendor_id = dev->nic_info.pdev->vendor;
75 	attr->vendor_part_id = dev->nic_info.pdev->device;
76 	attr->hw_ver = 0;
77 	attr->max_qp = dev->attr.max_qp;
78 	attr->max_ah = OCRDMA_MAX_AH;
79 	attr->max_qp_wr = dev->attr.max_wqe;
80 
81 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 					IB_DEVICE_RC_RNR_NAK_GEN |
83 					IB_DEVICE_SHUTDOWN_PORT |
84 					IB_DEVICE_SYS_IMAGE_GUID |
85 					IB_DEVICE_LOCAL_DMA_LKEY |
86 					IB_DEVICE_MEM_MGT_EXTENSIONS;
87 	attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
88 	attr->max_sge_rd = 0;
89 	attr->max_cq = dev->attr.max_cq;
90 	attr->max_cqe = dev->attr.max_cqe;
91 	attr->max_mr = dev->attr.max_mr;
92 	attr->max_mw = 0;
93 	attr->max_pd = dev->attr.max_pd;
94 	attr->atomic_cap = 0;
95 	attr->max_fmr = 0;
96 	attr->max_map_per_fmr = 0;
97 	attr->max_qp_rd_atom =
98 	    min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99 	attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
100 	attr->max_srq = dev->attr.max_srq;
101 	attr->max_srq_sge = dev->attr.max_srq_sge;
102 	attr->max_srq_wr = dev->attr.max_rqe;
103 	attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 	attr->max_fast_reg_page_list_len = 0;
105 	attr->max_pkeys = 1;
106 	return 0;
107 }
108 
109 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
110 					    u8 *ib_speed, u8 *ib_width)
111 {
112 	int status;
113 	u8 speed;
114 
115 	status = ocrdma_mbx_get_link_speed(dev, &speed);
116 	if (status)
117 		speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
118 
119 	switch (speed) {
120 	case OCRDMA_PHYS_LINK_SPEED_1GBPS:
121 		*ib_speed = IB_SPEED_SDR;
122 		*ib_width = IB_WIDTH_1X;
123 		break;
124 
125 	case OCRDMA_PHYS_LINK_SPEED_10GBPS:
126 		*ib_speed = IB_SPEED_QDR;
127 		*ib_width = IB_WIDTH_1X;
128 		break;
129 
130 	case OCRDMA_PHYS_LINK_SPEED_20GBPS:
131 		*ib_speed = IB_SPEED_DDR;
132 		*ib_width = IB_WIDTH_4X;
133 		break;
134 
135 	case OCRDMA_PHYS_LINK_SPEED_40GBPS:
136 		*ib_speed = IB_SPEED_QDR;
137 		*ib_width = IB_WIDTH_4X;
138 		break;
139 
140 	default:
141 		/* Unsupported */
142 		*ib_speed = IB_SPEED_SDR;
143 		*ib_width = IB_WIDTH_1X;
144 	};
145 }
146 
147 
148 int ocrdma_query_port(struct ib_device *ibdev,
149 		      u8 port, struct ib_port_attr *props)
150 {
151 	enum ib_port_state port_state;
152 	struct ocrdma_dev *dev;
153 	struct net_device *netdev;
154 
155 	dev = get_ocrdma_dev(ibdev);
156 	if (port > 1) {
157 		pr_err("%s(%d) invalid_port=0x%x\n", __func__,
158 		       dev->id, port);
159 		return -EINVAL;
160 	}
161 	netdev = dev->nic_info.netdev;
162 	if (netif_running(netdev) && netif_oper_up(netdev)) {
163 		port_state = IB_PORT_ACTIVE;
164 		props->phys_state = 5;
165 	} else {
166 		port_state = IB_PORT_DOWN;
167 		props->phys_state = 3;
168 	}
169 	props->max_mtu = IB_MTU_4096;
170 	props->active_mtu = iboe_get_mtu(netdev->mtu);
171 	props->lid = 0;
172 	props->lmc = 0;
173 	props->sm_lid = 0;
174 	props->sm_sl = 0;
175 	props->state = port_state;
176 	props->port_cap_flags =
177 	    IB_PORT_CM_SUP |
178 	    IB_PORT_REINIT_SUP |
179 	    IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
180 	props->gid_tbl_len = OCRDMA_MAX_SGID;
181 	props->pkey_tbl_len = 1;
182 	props->bad_pkey_cntr = 0;
183 	props->qkey_viol_cntr = 0;
184 	get_link_speed_and_width(dev, &props->active_speed,
185 				 &props->active_width);
186 	props->max_msg_sz = 0x80000000;
187 	props->max_vl_num = 4;
188 	return 0;
189 }
190 
191 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
192 		       struct ib_port_modify *props)
193 {
194 	struct ocrdma_dev *dev;
195 
196 	dev = get_ocrdma_dev(ibdev);
197 	if (port > 1) {
198 		pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
199 		return -EINVAL;
200 	}
201 	return 0;
202 }
203 
204 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
205 			   unsigned long len)
206 {
207 	struct ocrdma_mm *mm;
208 
209 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
210 	if (mm == NULL)
211 		return -ENOMEM;
212 	mm->key.phy_addr = phy_addr;
213 	mm->key.len = len;
214 	INIT_LIST_HEAD(&mm->entry);
215 
216 	mutex_lock(&uctx->mm_list_lock);
217 	list_add_tail(&mm->entry, &uctx->mm_head);
218 	mutex_unlock(&uctx->mm_list_lock);
219 	return 0;
220 }
221 
222 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
223 			    unsigned long len)
224 {
225 	struct ocrdma_mm *mm, *tmp;
226 
227 	mutex_lock(&uctx->mm_list_lock);
228 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
229 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
230 			continue;
231 
232 		list_del(&mm->entry);
233 		kfree(mm);
234 		break;
235 	}
236 	mutex_unlock(&uctx->mm_list_lock);
237 }
238 
239 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
240 			      unsigned long len)
241 {
242 	bool found = false;
243 	struct ocrdma_mm *mm;
244 
245 	mutex_lock(&uctx->mm_list_lock);
246 	list_for_each_entry(mm, &uctx->mm_head, entry) {
247 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
248 			continue;
249 
250 		found = true;
251 		break;
252 	}
253 	mutex_unlock(&uctx->mm_list_lock);
254 	return found;
255 }
256 
257 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
258 					  struct ocrdma_ucontext *uctx,
259 					  struct ib_udata *udata)
260 {
261 	struct ocrdma_pd *pd = NULL;
262 	int status = 0;
263 
264 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
265 	if (!pd)
266 		return ERR_PTR(-ENOMEM);
267 
268 	if (udata && uctx) {
269 		pd->dpp_enabled =
270 			dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY;
271 		pd->num_dpp_qp =
272 			pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
273 	}
274 
275 retry:
276 	status = ocrdma_mbx_alloc_pd(dev, pd);
277 	if (status) {
278 		if (pd->dpp_enabled) {
279 			pd->dpp_enabled = false;
280 			pd->num_dpp_qp = 0;
281 			goto retry;
282 		} else {
283 			kfree(pd);
284 			return ERR_PTR(status);
285 		}
286 	}
287 
288 	return pd;
289 }
290 
291 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
292 				 struct ocrdma_pd *pd)
293 {
294 	return (uctx->cntxt_pd == pd ? true : false);
295 }
296 
297 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
298 			      struct ocrdma_pd *pd)
299 {
300 	int status = 0;
301 
302 	status = ocrdma_mbx_dealloc_pd(dev, pd);
303 	kfree(pd);
304 	return status;
305 }
306 
307 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
308 				    struct ocrdma_ucontext *uctx,
309 				    struct ib_udata *udata)
310 {
311 	int status = 0;
312 
313 	uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
314 	if (IS_ERR(uctx->cntxt_pd)) {
315 		status = PTR_ERR(uctx->cntxt_pd);
316 		uctx->cntxt_pd = NULL;
317 		goto err;
318 	}
319 
320 	uctx->cntxt_pd->uctx = uctx;
321 	uctx->cntxt_pd->ibpd.device = &dev->ibdev;
322 err:
323 	return status;
324 }
325 
326 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
327 {
328 	int status = 0;
329 	struct ocrdma_pd *pd = uctx->cntxt_pd;
330 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
331 
332 	BUG_ON(uctx->pd_in_use);
333 	uctx->cntxt_pd = NULL;
334 	status = _ocrdma_dealloc_pd(dev, pd);
335 	return status;
336 }
337 
338 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
339 {
340 	struct ocrdma_pd *pd = NULL;
341 
342 	mutex_lock(&uctx->mm_list_lock);
343 	if (!uctx->pd_in_use) {
344 		uctx->pd_in_use = true;
345 		pd = uctx->cntxt_pd;
346 	}
347 	mutex_unlock(&uctx->mm_list_lock);
348 
349 	return pd;
350 }
351 
352 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
353 {
354 	mutex_lock(&uctx->mm_list_lock);
355 	uctx->pd_in_use = false;
356 	mutex_unlock(&uctx->mm_list_lock);
357 }
358 
359 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
360 					  struct ib_udata *udata)
361 {
362 	int status;
363 	struct ocrdma_ucontext *ctx;
364 	struct ocrdma_alloc_ucontext_resp resp;
365 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
366 	struct pci_dev *pdev = dev->nic_info.pdev;
367 	u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
368 
369 	if (!udata)
370 		return ERR_PTR(-EFAULT);
371 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
372 	if (!ctx)
373 		return ERR_PTR(-ENOMEM);
374 	INIT_LIST_HEAD(&ctx->mm_head);
375 	mutex_init(&ctx->mm_list_lock);
376 
377 	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
378 					    &ctx->ah_tbl.pa, GFP_KERNEL);
379 	if (!ctx->ah_tbl.va) {
380 		kfree(ctx);
381 		return ERR_PTR(-ENOMEM);
382 	}
383 	memset(ctx->ah_tbl.va, 0, map_len);
384 	ctx->ah_tbl.len = map_len;
385 
386 	memset(&resp, 0, sizeof(resp));
387 	resp.ah_tbl_len = ctx->ah_tbl.len;
388 	resp.ah_tbl_page = ctx->ah_tbl.pa;
389 
390 	status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
391 	if (status)
392 		goto map_err;
393 
394 	status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
395 	if (status)
396 		goto pd_err;
397 
398 	resp.dev_id = dev->id;
399 	resp.max_inline_data = dev->attr.max_inline_data;
400 	resp.wqe_size = dev->attr.wqe_size;
401 	resp.rqe_size = dev->attr.rqe_size;
402 	resp.dpp_wqe_size = dev->attr.wqe_size;
403 
404 	memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
405 	status = ib_copy_to_udata(udata, &resp, sizeof(resp));
406 	if (status)
407 		goto cpy_err;
408 	return &ctx->ibucontext;
409 
410 cpy_err:
411 pd_err:
412 	ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
413 map_err:
414 	dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
415 			  ctx->ah_tbl.pa);
416 	kfree(ctx);
417 	return ERR_PTR(status);
418 }
419 
420 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
421 {
422 	int status = 0;
423 	struct ocrdma_mm *mm, *tmp;
424 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
425 	struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
426 	struct pci_dev *pdev = dev->nic_info.pdev;
427 
428 	status = ocrdma_dealloc_ucontext_pd(uctx);
429 
430 	ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
431 	dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
432 			  uctx->ah_tbl.pa);
433 
434 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
435 		list_del(&mm->entry);
436 		kfree(mm);
437 	}
438 	kfree(uctx);
439 	return status;
440 }
441 
442 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
443 {
444 	struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
445 	struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
446 	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
447 	u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
448 	unsigned long len = (vma->vm_end - vma->vm_start);
449 	int status = 0;
450 	bool found;
451 
452 	if (vma->vm_start & (PAGE_SIZE - 1))
453 		return -EINVAL;
454 	found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
455 	if (!found)
456 		return -EINVAL;
457 
458 	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
459 		dev->nic_info.db_total_size)) &&
460 		(len <=	dev->nic_info.db_page_size)) {
461 		if (vma->vm_flags & VM_READ)
462 			return -EPERM;
463 
464 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
465 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
466 					    len, vma->vm_page_prot);
467 	} else if (dev->nic_info.dpp_unmapped_len &&
468 		(vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
469 		(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
470 			dev->nic_info.dpp_unmapped_len)) &&
471 		(len <= dev->nic_info.dpp_unmapped_len)) {
472 		if (vma->vm_flags & VM_READ)
473 			return -EPERM;
474 
475 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
476 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
477 					    len, vma->vm_page_prot);
478 	} else {
479 		status = remap_pfn_range(vma, vma->vm_start,
480 					 vma->vm_pgoff, len, vma->vm_page_prot);
481 	}
482 	return status;
483 }
484 
485 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
486 				struct ib_ucontext *ib_ctx,
487 				struct ib_udata *udata)
488 {
489 	int status;
490 	u64 db_page_addr;
491 	u64 dpp_page_addr = 0;
492 	u32 db_page_size;
493 	struct ocrdma_alloc_pd_uresp rsp;
494 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
495 
496 	memset(&rsp, 0, sizeof(rsp));
497 	rsp.id = pd->id;
498 	rsp.dpp_enabled = pd->dpp_enabled;
499 	db_page_addr = ocrdma_get_db_addr(dev, pd->id);
500 	db_page_size = dev->nic_info.db_page_size;
501 
502 	status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
503 	if (status)
504 		return status;
505 
506 	if (pd->dpp_enabled) {
507 		dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
508 				(pd->id * PAGE_SIZE);
509 		status = ocrdma_add_mmap(uctx, dpp_page_addr,
510 				 PAGE_SIZE);
511 		if (status)
512 			goto dpp_map_err;
513 		rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
514 		rsp.dpp_page_addr_lo = dpp_page_addr;
515 	}
516 
517 	status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
518 	if (status)
519 		goto ucopy_err;
520 
521 	pd->uctx = uctx;
522 	return 0;
523 
524 ucopy_err:
525 	if (pd->dpp_enabled)
526 		ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
527 dpp_map_err:
528 	ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
529 	return status;
530 }
531 
532 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
533 			      struct ib_ucontext *context,
534 			      struct ib_udata *udata)
535 {
536 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
537 	struct ocrdma_pd *pd;
538 	struct ocrdma_ucontext *uctx = NULL;
539 	int status;
540 	u8 is_uctx_pd = false;
541 
542 	if (udata && context) {
543 		uctx = get_ocrdma_ucontext(context);
544 		pd = ocrdma_get_ucontext_pd(uctx);
545 		if (pd) {
546 			is_uctx_pd = true;
547 			goto pd_mapping;
548 		}
549 	}
550 
551 	pd = _ocrdma_alloc_pd(dev, uctx, udata);
552 	if (IS_ERR(pd)) {
553 		status = PTR_ERR(pd);
554 		goto exit;
555 	}
556 
557 pd_mapping:
558 	if (udata && context) {
559 		status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
560 		if (status)
561 			goto err;
562 	}
563 	return &pd->ibpd;
564 
565 err:
566 	if (is_uctx_pd) {
567 		ocrdma_release_ucontext_pd(uctx);
568 	} else {
569 		status = ocrdma_mbx_dealloc_pd(dev, pd);
570 		kfree(pd);
571 	}
572 exit:
573 	return ERR_PTR(status);
574 }
575 
576 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
577 {
578 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
579 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
580 	struct ocrdma_ucontext *uctx = NULL;
581 	int status = 0;
582 	u64 usr_db;
583 
584 	uctx = pd->uctx;
585 	if (uctx) {
586 		u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
587 			(pd->id * PAGE_SIZE);
588 		if (pd->dpp_enabled)
589 			ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
590 		usr_db = ocrdma_get_db_addr(dev, pd->id);
591 		ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
592 
593 		if (is_ucontext_pd(uctx, pd)) {
594 			ocrdma_release_ucontext_pd(uctx);
595 			return status;
596 		}
597 	}
598 	status = _ocrdma_dealloc_pd(dev, pd);
599 	return status;
600 }
601 
602 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
603 			    u32 pdid, int acc, u32 num_pbls, u32 addr_check)
604 {
605 	int status;
606 
607 	mr->hwmr.fr_mr = 0;
608 	mr->hwmr.local_rd = 1;
609 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
610 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
611 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
612 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
613 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
614 	mr->hwmr.num_pbls = num_pbls;
615 
616 	status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
617 	if (status)
618 		return status;
619 
620 	mr->ibmr.lkey = mr->hwmr.lkey;
621 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
622 		mr->ibmr.rkey = mr->hwmr.lkey;
623 	return 0;
624 }
625 
626 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
627 {
628 	int status;
629 	struct ocrdma_mr *mr;
630 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
631 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
632 
633 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
634 		pr_err("%s err, invalid access rights\n", __func__);
635 		return ERR_PTR(-EINVAL);
636 	}
637 
638 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
639 	if (!mr)
640 		return ERR_PTR(-ENOMEM);
641 
642 	status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
643 				   OCRDMA_ADDR_CHECK_DISABLE);
644 	if (status) {
645 		kfree(mr);
646 		return ERR_PTR(status);
647 	}
648 
649 	return &mr->ibmr;
650 }
651 
652 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
653 				   struct ocrdma_hw_mr *mr)
654 {
655 	struct pci_dev *pdev = dev->nic_info.pdev;
656 	int i = 0;
657 
658 	if (mr->pbl_table) {
659 		for (i = 0; i < mr->num_pbls; i++) {
660 			if (!mr->pbl_table[i].va)
661 				continue;
662 			dma_free_coherent(&pdev->dev, mr->pbl_size,
663 					  mr->pbl_table[i].va,
664 					  mr->pbl_table[i].pa);
665 		}
666 		kfree(mr->pbl_table);
667 		mr->pbl_table = NULL;
668 	}
669 }
670 
671 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
672 			      u32 num_pbes)
673 {
674 	u32 num_pbls = 0;
675 	u32 idx = 0;
676 	int status = 0;
677 	u32 pbl_size;
678 
679 	do {
680 		pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
681 		if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
682 			status = -EFAULT;
683 			break;
684 		}
685 		num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
686 		num_pbls = num_pbls / (pbl_size / sizeof(u64));
687 		idx++;
688 	} while (num_pbls >= dev->attr.max_num_mr_pbl);
689 
690 	mr->hwmr.num_pbes = num_pbes;
691 	mr->hwmr.num_pbls = num_pbls;
692 	mr->hwmr.pbl_size = pbl_size;
693 	return status;
694 }
695 
696 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
697 {
698 	int status = 0;
699 	int i;
700 	u32 dma_len = mr->pbl_size;
701 	struct pci_dev *pdev = dev->nic_info.pdev;
702 	void *va;
703 	dma_addr_t pa;
704 
705 	mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
706 				mr->num_pbls, GFP_KERNEL);
707 
708 	if (!mr->pbl_table)
709 		return -ENOMEM;
710 
711 	for (i = 0; i < mr->num_pbls; i++) {
712 		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
713 		if (!va) {
714 			ocrdma_free_mr_pbl_tbl(dev, mr);
715 			status = -ENOMEM;
716 			break;
717 		}
718 		memset(va, 0, dma_len);
719 		mr->pbl_table[i].va = va;
720 		mr->pbl_table[i].pa = pa;
721 	}
722 	return status;
723 }
724 
725 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
726 			    u32 num_pbes)
727 {
728 	struct ocrdma_pbe *pbe;
729 	struct ib_umem_chunk *chunk;
730 	struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
731 	struct ib_umem *umem = mr->umem;
732 	int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
733 
734 	if (!mr->hwmr.num_pbes)
735 		return;
736 
737 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
738 	pbe_cnt = 0;
739 
740 	shift = ilog2(umem->page_size);
741 
742 	list_for_each_entry(chunk, &umem->chunk_list, list) {
743 		/* get all the dma regions from the chunk. */
744 		for (i = 0; i < chunk->nmap; i++) {
745 			pages = sg_dma_len(&chunk->page_list[i]) >> shift;
746 			for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
747 				/* store the page address in pbe */
748 				pbe->pa_lo =
749 				    cpu_to_le32(sg_dma_address
750 						(&chunk->page_list[i]) +
751 						(umem->page_size * pg_cnt));
752 				pbe->pa_hi =
753 				    cpu_to_le32(upper_32_bits
754 						((sg_dma_address
755 						  (&chunk->page_list[i]) +
756 						  umem->page_size * pg_cnt)));
757 				pbe_cnt += 1;
758 				total_num_pbes += 1;
759 				pbe++;
760 
761 				/* if done building pbes, issue the mbx cmd. */
762 				if (total_num_pbes == num_pbes)
763 					return;
764 
765 				/* if the given pbl is full storing the pbes,
766 				 * move to next pbl.
767 				 */
768 				if (pbe_cnt ==
769 					(mr->hwmr.pbl_size / sizeof(u64))) {
770 					pbl_tbl++;
771 					pbe = (struct ocrdma_pbe *)pbl_tbl->va;
772 					pbe_cnt = 0;
773 				}
774 			}
775 		}
776 	}
777 }
778 
779 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
780 				 u64 usr_addr, int acc, struct ib_udata *udata)
781 {
782 	int status = -ENOMEM;
783 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
784 	struct ocrdma_mr *mr;
785 	struct ocrdma_pd *pd;
786 	u32 num_pbes;
787 
788 	pd = get_ocrdma_pd(ibpd);
789 
790 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
791 		return ERR_PTR(-EINVAL);
792 
793 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
794 	if (!mr)
795 		return ERR_PTR(status);
796 	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
797 	if (IS_ERR(mr->umem)) {
798 		status = -EFAULT;
799 		goto umem_err;
800 	}
801 	num_pbes = ib_umem_page_count(mr->umem);
802 	status = ocrdma_get_pbl_info(dev, mr, num_pbes);
803 	if (status)
804 		goto umem_err;
805 
806 	mr->hwmr.pbe_size = mr->umem->page_size;
807 	mr->hwmr.fbo = mr->umem->offset;
808 	mr->hwmr.va = usr_addr;
809 	mr->hwmr.len = len;
810 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
811 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
812 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
813 	mr->hwmr.local_rd = 1;
814 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
815 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
816 	if (status)
817 		goto umem_err;
818 	build_user_pbes(dev, mr, num_pbes);
819 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
820 	if (status)
821 		goto mbx_err;
822 	mr->ibmr.lkey = mr->hwmr.lkey;
823 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
824 		mr->ibmr.rkey = mr->hwmr.lkey;
825 
826 	return &mr->ibmr;
827 
828 mbx_err:
829 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
830 umem_err:
831 	kfree(mr);
832 	return ERR_PTR(status);
833 }
834 
835 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
836 {
837 	struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
838 	struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
839 	int status;
840 
841 	status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
842 
843 	if (mr->hwmr.fr_mr == 0)
844 		ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
845 
846 	/* it could be user registered memory. */
847 	if (mr->umem)
848 		ib_umem_release(mr->umem);
849 	kfree(mr);
850 	return status;
851 }
852 
853 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
854 				struct ib_udata *udata,
855 				struct ib_ucontext *ib_ctx)
856 {
857 	int status;
858 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
859 	struct ocrdma_create_cq_uresp uresp;
860 
861 	memset(&uresp, 0, sizeof(uresp));
862 	uresp.cq_id = cq->id;
863 	uresp.page_size = PAGE_ALIGN(cq->len);
864 	uresp.num_pages = 1;
865 	uresp.max_hw_cqe = cq->max_hw_cqe;
866 	uresp.page_addr[0] = cq->pa;
867 	uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
868 	uresp.db_page_size = dev->nic_info.db_page_size;
869 	uresp.phase_change = cq->phase_change ? 1 : 0;
870 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
871 	if (status) {
872 		pr_err("%s(%d) copy error cqid=0x%x.\n",
873 		       __func__, dev->id, cq->id);
874 		goto err;
875 	}
876 	status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
877 	if (status)
878 		goto err;
879 	status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
880 	if (status) {
881 		ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
882 		goto err;
883 	}
884 	cq->ucontext = uctx;
885 err:
886 	return status;
887 }
888 
889 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
890 			       struct ib_ucontext *ib_ctx,
891 			       struct ib_udata *udata)
892 {
893 	struct ocrdma_cq *cq;
894 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
895 	struct ocrdma_ucontext *uctx = NULL;
896 	u16 pd_id = 0;
897 	int status;
898 	struct ocrdma_create_cq_ureq ureq;
899 
900 	if (udata) {
901 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
902 			return ERR_PTR(-EFAULT);
903 	} else
904 		ureq.dpp_cq = 0;
905 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
906 	if (!cq)
907 		return ERR_PTR(-ENOMEM);
908 
909 	spin_lock_init(&cq->cq_lock);
910 	spin_lock_init(&cq->comp_handler_lock);
911 	INIT_LIST_HEAD(&cq->sq_head);
912 	INIT_LIST_HEAD(&cq->rq_head);
913 
914 	if (ib_ctx) {
915 		uctx = get_ocrdma_ucontext(ib_ctx);
916 		pd_id = uctx->cntxt_pd->id;
917 	}
918 
919 	status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
920 	if (status) {
921 		kfree(cq);
922 		return ERR_PTR(status);
923 	}
924 	if (ib_ctx) {
925 		status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
926 		if (status)
927 			goto ctx_err;
928 	}
929 	cq->phase = OCRDMA_CQE_VALID;
930 	cq->arm_needed = true;
931 	dev->cq_tbl[cq->id] = cq;
932 
933 	return &cq->ibcq;
934 
935 ctx_err:
936 	ocrdma_mbx_destroy_cq(dev, cq);
937 	kfree(cq);
938 	return ERR_PTR(status);
939 }
940 
941 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
942 		     struct ib_udata *udata)
943 {
944 	int status = 0;
945 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
946 
947 	if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
948 		status = -EINVAL;
949 		return status;
950 	}
951 	ibcq->cqe = new_cnt;
952 	return status;
953 }
954 
955 int ocrdma_destroy_cq(struct ib_cq *ibcq)
956 {
957 	int status;
958 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
959 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
960 	int pdid = 0;
961 
962 	status = ocrdma_mbx_destroy_cq(dev, cq);
963 
964 	if (cq->ucontext) {
965 		pdid = cq->ucontext->cntxt_pd->id;
966 		ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
967 				PAGE_ALIGN(cq->len));
968 		ocrdma_del_mmap(cq->ucontext,
969 				ocrdma_get_db_addr(dev, pdid),
970 				dev->nic_info.db_page_size);
971 	}
972 	dev->cq_tbl[cq->id] = NULL;
973 
974 	kfree(cq);
975 	return status;
976 }
977 
978 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
979 {
980 	int status = -EINVAL;
981 
982 	if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
983 		dev->qp_tbl[qp->id] = qp;
984 		status = 0;
985 	}
986 	return status;
987 }
988 
989 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
990 {
991 	dev->qp_tbl[qp->id] = NULL;
992 }
993 
994 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
995 				  struct ib_qp_init_attr *attrs)
996 {
997 	if ((attrs->qp_type != IB_QPT_GSI) &&
998 	    (attrs->qp_type != IB_QPT_RC) &&
999 	    (attrs->qp_type != IB_QPT_UC) &&
1000 	    (attrs->qp_type != IB_QPT_UD)) {
1001 		pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1002 		       __func__, dev->id, attrs->qp_type);
1003 		return -EINVAL;
1004 	}
1005 	/* Skip the check for QP1 to support CM size of 128 */
1006 	if ((attrs->qp_type != IB_QPT_GSI) &&
1007 	    (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1008 		pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1009 		       __func__, dev->id, attrs->cap.max_send_wr);
1010 		pr_err("%s(%d) supported send_wr=0x%x\n",
1011 		       __func__, dev->id, dev->attr.max_wqe);
1012 		return -EINVAL;
1013 	}
1014 	if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1015 		pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1016 		       __func__, dev->id, attrs->cap.max_recv_wr);
1017 		pr_err("%s(%d) supported recv_wr=0x%x\n",
1018 		       __func__, dev->id, dev->attr.max_rqe);
1019 		return -EINVAL;
1020 	}
1021 	if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1022 		pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1023 		       __func__, dev->id, attrs->cap.max_inline_data);
1024 		pr_err("%s(%d) supported inline data size=0x%x\n",
1025 		       __func__, dev->id, dev->attr.max_inline_data);
1026 		return -EINVAL;
1027 	}
1028 	if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1029 		pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1030 		       __func__, dev->id, attrs->cap.max_send_sge);
1031 		pr_err("%s(%d) supported send_sge=0x%x\n",
1032 		       __func__, dev->id, dev->attr.max_send_sge);
1033 		return -EINVAL;
1034 	}
1035 	if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1036 		pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1037 		       __func__, dev->id, attrs->cap.max_recv_sge);
1038 		pr_err("%s(%d) supported recv_sge=0x%x\n",
1039 		       __func__, dev->id, dev->attr.max_recv_sge);
1040 		return -EINVAL;
1041 	}
1042 	/* unprivileged user space cannot create special QP */
1043 	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1044 		pr_err
1045 		    ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1046 		     __func__, dev->id, attrs->qp_type);
1047 		return -EINVAL;
1048 	}
1049 	/* allow creating only one GSI type of QP */
1050 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1051 		pr_err("%s(%d) GSI special QPs already created.\n",
1052 		       __func__, dev->id);
1053 		return -EINVAL;
1054 	}
1055 	/* verify consumer QPs are not trying to use GSI QP's CQ */
1056 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1057 		if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1058 			(dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1059 			pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1060 				__func__, dev->id);
1061 			return -EINVAL;
1062 		}
1063 	}
1064 	return 0;
1065 }
1066 
1067 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1068 				struct ib_udata *udata, int dpp_offset,
1069 				int dpp_credit_lmt, int srq)
1070 {
1071 	int status = 0;
1072 	u64 usr_db;
1073 	struct ocrdma_create_qp_uresp uresp;
1074 	struct ocrdma_dev *dev = qp->dev;
1075 	struct ocrdma_pd *pd = qp->pd;
1076 
1077 	memset(&uresp, 0, sizeof(uresp));
1078 	usr_db = dev->nic_info.unmapped_db +
1079 			(pd->id * dev->nic_info.db_page_size);
1080 	uresp.qp_id = qp->id;
1081 	uresp.sq_dbid = qp->sq.dbid;
1082 	uresp.num_sq_pages = 1;
1083 	uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1084 	uresp.sq_page_addr[0] = qp->sq.pa;
1085 	uresp.num_wqe_allocated = qp->sq.max_cnt;
1086 	if (!srq) {
1087 		uresp.rq_dbid = qp->rq.dbid;
1088 		uresp.num_rq_pages = 1;
1089 		uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1090 		uresp.rq_page_addr[0] = qp->rq.pa;
1091 		uresp.num_rqe_allocated = qp->rq.max_cnt;
1092 	}
1093 	uresp.db_page_addr = usr_db;
1094 	uresp.db_page_size = dev->nic_info.db_page_size;
1095 	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1096 		uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1097 		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1098 		uresp.db_shift = 24;
1099 	} else {
1100 		uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
1101 		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1102 		uresp.db_shift = 16;
1103 	}
1104 
1105 	if (qp->dpp_enabled) {
1106 		uresp.dpp_credit = dpp_credit_lmt;
1107 		uresp.dpp_offset = dpp_offset;
1108 	}
1109 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1110 	if (status) {
1111 		pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1112 		goto err;
1113 	}
1114 	status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1115 				 uresp.sq_page_size);
1116 	if (status)
1117 		goto err;
1118 
1119 	if (!srq) {
1120 		status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1121 					 uresp.rq_page_size);
1122 		if (status)
1123 			goto rq_map_err;
1124 	}
1125 	return status;
1126 rq_map_err:
1127 	ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1128 err:
1129 	return status;
1130 }
1131 
1132 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1133 			     struct ocrdma_pd *pd)
1134 {
1135 	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1136 		qp->sq_db = dev->nic_info.db +
1137 			(pd->id * dev->nic_info.db_page_size) +
1138 			OCRDMA_DB_GEN2_SQ_OFFSET;
1139 		qp->rq_db = dev->nic_info.db +
1140 			(pd->id * dev->nic_info.db_page_size) +
1141 			OCRDMA_DB_GEN2_RQ_OFFSET;
1142 	} else {
1143 		qp->sq_db = dev->nic_info.db +
1144 			(pd->id * dev->nic_info.db_page_size) +
1145 			OCRDMA_DB_SQ_OFFSET;
1146 		qp->rq_db = dev->nic_info.db +
1147 			(pd->id * dev->nic_info.db_page_size) +
1148 			OCRDMA_DB_RQ_OFFSET;
1149 	}
1150 }
1151 
1152 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1153 {
1154 	qp->wqe_wr_id_tbl =
1155 	    kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1156 		    GFP_KERNEL);
1157 	if (qp->wqe_wr_id_tbl == NULL)
1158 		return -ENOMEM;
1159 	qp->rqe_wr_id_tbl =
1160 	    kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1161 	if (qp->rqe_wr_id_tbl == NULL)
1162 		return -ENOMEM;
1163 
1164 	return 0;
1165 }
1166 
1167 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1168 				      struct ocrdma_pd *pd,
1169 				      struct ib_qp_init_attr *attrs)
1170 {
1171 	qp->pd = pd;
1172 	spin_lock_init(&qp->q_lock);
1173 	INIT_LIST_HEAD(&qp->sq_entry);
1174 	INIT_LIST_HEAD(&qp->rq_entry);
1175 
1176 	qp->qp_type = attrs->qp_type;
1177 	qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1178 	qp->max_inline_data = attrs->cap.max_inline_data;
1179 	qp->sq.max_sges = attrs->cap.max_send_sge;
1180 	qp->rq.max_sges = attrs->cap.max_recv_sge;
1181 	qp->state = OCRDMA_QPS_RST;
1182 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1183 }
1184 
1185 
1186 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1187 				   struct ib_qp_init_attr *attrs)
1188 {
1189 	if (attrs->qp_type == IB_QPT_GSI) {
1190 		dev->gsi_qp_created = 1;
1191 		dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1192 		dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1193 	}
1194 }
1195 
1196 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1197 			       struct ib_qp_init_attr *attrs,
1198 			       struct ib_udata *udata)
1199 {
1200 	int status;
1201 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1202 	struct ocrdma_qp *qp;
1203 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1204 	struct ocrdma_create_qp_ureq ureq;
1205 	u16 dpp_credit_lmt, dpp_offset;
1206 
1207 	status = ocrdma_check_qp_params(ibpd, dev, attrs);
1208 	if (status)
1209 		goto gen_err;
1210 
1211 	memset(&ureq, 0, sizeof(ureq));
1212 	if (udata) {
1213 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1214 			return ERR_PTR(-EFAULT);
1215 	}
1216 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1217 	if (!qp) {
1218 		status = -ENOMEM;
1219 		goto gen_err;
1220 	}
1221 	qp->dev = dev;
1222 	ocrdma_set_qp_init_params(qp, pd, attrs);
1223 	if (udata == NULL)
1224 		qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1225 					OCRDMA_QP_FAST_REG);
1226 
1227 	mutex_lock(&dev->dev_lock);
1228 	status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1229 					ureq.dpp_cq_id,
1230 					&dpp_offset, &dpp_credit_lmt);
1231 	if (status)
1232 		goto mbx_err;
1233 
1234 	/* user space QP's wr_id table are managed in library */
1235 	if (udata == NULL) {
1236 		status = ocrdma_alloc_wr_id_tbl(qp);
1237 		if (status)
1238 			goto map_err;
1239 	}
1240 
1241 	status = ocrdma_add_qpn_map(dev, qp);
1242 	if (status)
1243 		goto map_err;
1244 	ocrdma_set_qp_db(dev, qp, pd);
1245 	if (udata) {
1246 		status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1247 					      dpp_credit_lmt,
1248 					      (attrs->srq != NULL));
1249 		if (status)
1250 			goto cpy_err;
1251 	}
1252 	ocrdma_store_gsi_qp_cq(dev, attrs);
1253 	qp->ibqp.qp_num = qp->id;
1254 	mutex_unlock(&dev->dev_lock);
1255 	return &qp->ibqp;
1256 
1257 cpy_err:
1258 	ocrdma_del_qpn_map(dev, qp);
1259 map_err:
1260 	ocrdma_mbx_destroy_qp(dev, qp);
1261 mbx_err:
1262 	mutex_unlock(&dev->dev_lock);
1263 	kfree(qp->wqe_wr_id_tbl);
1264 	kfree(qp->rqe_wr_id_tbl);
1265 	kfree(qp);
1266 	pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1267 gen_err:
1268 	return ERR_PTR(status);
1269 }
1270 
1271 
1272 static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
1273 {
1274 	if (qp->db_cache) {
1275 		u32 val = qp->rq.dbid | (qp->db_cache <<
1276 				ocrdma_get_num_posted_shift(qp));
1277 		iowrite32(val, qp->rq_db);
1278 		qp->db_cache = 0;
1279 	}
1280 }
1281 
1282 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1283 		      int attr_mask)
1284 {
1285 	int status = 0;
1286 	struct ocrdma_qp *qp;
1287 	struct ocrdma_dev *dev;
1288 	enum ib_qp_state old_qps;
1289 
1290 	qp = get_ocrdma_qp(ibqp);
1291 	dev = qp->dev;
1292 	if (attr_mask & IB_QP_STATE)
1293 		status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1294 	/* if new and previous states are same hw doesn't need to
1295 	 * know about it.
1296 	 */
1297 	if (status < 0)
1298 		return status;
1299 	status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
1300 	if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
1301 		ocrdma_flush_rq_db(qp);
1302 
1303 	return status;
1304 }
1305 
1306 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1307 		     int attr_mask, struct ib_udata *udata)
1308 {
1309 	unsigned long flags;
1310 	int status = -EINVAL;
1311 	struct ocrdma_qp *qp;
1312 	struct ocrdma_dev *dev;
1313 	enum ib_qp_state old_qps, new_qps;
1314 
1315 	qp = get_ocrdma_qp(ibqp);
1316 	dev = qp->dev;
1317 
1318 	/* syncronize with multiple context trying to change, retrive qps */
1319 	mutex_lock(&dev->dev_lock);
1320 	/* syncronize with wqe, rqe posting and cqe processing contexts */
1321 	spin_lock_irqsave(&qp->q_lock, flags);
1322 	old_qps = get_ibqp_state(qp->state);
1323 	if (attr_mask & IB_QP_STATE)
1324 		new_qps = attr->qp_state;
1325 	else
1326 		new_qps = old_qps;
1327 	spin_unlock_irqrestore(&qp->q_lock, flags);
1328 
1329 	if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1330 		pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1331 		       "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1332 		       __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1333 		       old_qps, new_qps);
1334 		goto param_err;
1335 	}
1336 
1337 	status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1338 	if (status > 0)
1339 		status = 0;
1340 param_err:
1341 	mutex_unlock(&dev->dev_lock);
1342 	return status;
1343 }
1344 
1345 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1346 {
1347 	switch (mtu) {
1348 	case 256:
1349 		return IB_MTU_256;
1350 	case 512:
1351 		return IB_MTU_512;
1352 	case 1024:
1353 		return IB_MTU_1024;
1354 	case 2048:
1355 		return IB_MTU_2048;
1356 	case 4096:
1357 		return IB_MTU_4096;
1358 	default:
1359 		return IB_MTU_1024;
1360 	}
1361 }
1362 
1363 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1364 {
1365 	int ib_qp_acc_flags = 0;
1366 
1367 	if (qp_cap_flags & OCRDMA_QP_INB_WR)
1368 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1369 	if (qp_cap_flags & OCRDMA_QP_INB_RD)
1370 		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1371 	return ib_qp_acc_flags;
1372 }
1373 
1374 int ocrdma_query_qp(struct ib_qp *ibqp,
1375 		    struct ib_qp_attr *qp_attr,
1376 		    int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1377 {
1378 	int status;
1379 	u32 qp_state;
1380 	struct ocrdma_qp_params params;
1381 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1382 	struct ocrdma_dev *dev = qp->dev;
1383 
1384 	memset(&params, 0, sizeof(params));
1385 	mutex_lock(&dev->dev_lock);
1386 	status = ocrdma_mbx_query_qp(dev, qp, &params);
1387 	mutex_unlock(&dev->dev_lock);
1388 	if (status)
1389 		goto mbx_err;
1390 	qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1391 	qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1392 	qp_attr->path_mtu =
1393 		ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1394 				OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1395 				OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1396 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
1397 	qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1398 	qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1399 	qp_attr->dest_qp_num =
1400 	    params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1401 
1402 	qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1403 	qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1404 	qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1405 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
1406 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1407 	qp_attr->cap.max_inline_data = qp->max_inline_data;
1408 	qp_init_attr->cap = qp_attr->cap;
1409 	memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1410 	       sizeof(params.dgid));
1411 	qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1412 	    OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1413 	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1414 	qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1415 					  OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1416 						OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1417 	qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1418 					      OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
1419 						OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1420 
1421 	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1422 	qp_attr->ah_attr.port_num = 1;
1423 	qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1424 			       OCRDMA_QP_PARAMS_SL_MASK) >>
1425 				OCRDMA_QP_PARAMS_SL_SHIFT;
1426 	qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1427 			    OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1428 				OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1429 	qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1430 			      OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1431 				OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1432 	qp_attr->retry_cnt =
1433 	    (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1434 		OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1435 	qp_attr->min_rnr_timer = 0;
1436 	qp_attr->pkey_index = 0;
1437 	qp_attr->port_num = 1;
1438 	qp_attr->ah_attr.src_path_bits = 0;
1439 	qp_attr->ah_attr.static_rate = 0;
1440 	qp_attr->alt_pkey_index = 0;
1441 	qp_attr->alt_port_num = 0;
1442 	qp_attr->alt_timeout = 0;
1443 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1444 	qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1445 		    OCRDMA_QP_PARAMS_STATE_SHIFT;
1446 	qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1447 	qp_attr->max_dest_rd_atomic =
1448 	    params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1449 	qp_attr->max_rd_atomic =
1450 	    params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1451 	qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1452 				OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1453 mbx_err:
1454 	return status;
1455 }
1456 
1457 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1458 {
1459 	int i = idx / 32;
1460 	unsigned int mask = (1 << (idx % 32));
1461 
1462 	if (srq->idx_bit_fields[i] & mask)
1463 		srq->idx_bit_fields[i] &= ~mask;
1464 	else
1465 		srq->idx_bit_fields[i] |= mask;
1466 }
1467 
1468 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1469 {
1470 	return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1471 }
1472 
1473 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1474 {
1475 	return (qp->sq.tail == qp->sq.head);
1476 }
1477 
1478 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1479 {
1480 	return (qp->rq.tail == qp->rq.head);
1481 }
1482 
1483 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1484 {
1485 	return q->va + (q->head * q->entry_size);
1486 }
1487 
1488 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1489 				      u32 idx)
1490 {
1491 	return q->va + (idx * q->entry_size);
1492 }
1493 
1494 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1495 {
1496 	q->head = (q->head + 1) & q->max_wqe_idx;
1497 }
1498 
1499 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1500 {
1501 	q->tail = (q->tail + 1) & q->max_wqe_idx;
1502 }
1503 
1504 /* discard the cqe for a given QP */
1505 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1506 {
1507 	unsigned long cq_flags;
1508 	unsigned long flags;
1509 	int discard_cnt = 0;
1510 	u32 cur_getp, stop_getp;
1511 	struct ocrdma_cqe *cqe;
1512 	u32 qpn = 0;
1513 
1514 	spin_lock_irqsave(&cq->cq_lock, cq_flags);
1515 
1516 	/* traverse through the CQEs in the hw CQ,
1517 	 * find the matching CQE for a given qp,
1518 	 * mark the matching one discarded by clearing qpn.
1519 	 * ring the doorbell in the poll_cq() as
1520 	 * we don't complete out of order cqe.
1521 	 */
1522 
1523 	cur_getp = cq->getp;
1524 	/* find upto when do we reap the cq. */
1525 	stop_getp = cur_getp;
1526 	do {
1527 		if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1528 			break;
1529 
1530 		cqe = cq->va + cur_getp;
1531 		/* if (a) done reaping whole hw cq, or
1532 		 *    (b) qp_xq becomes empty.
1533 		 * then exit
1534 		 */
1535 		qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1536 		/* if previously discarded cqe found, skip that too. */
1537 		/* check for matching qp */
1538 		if (qpn == 0 || qpn != qp->id)
1539 			goto skip_cqe;
1540 
1541 		/* mark cqe discarded so that it is not picked up later
1542 		 * in the poll_cq().
1543 		 */
1544 		discard_cnt += 1;
1545 		cqe->cmn.qpn = 0;
1546 		if (is_cqe_for_sq(cqe)) {
1547 			ocrdma_hwq_inc_tail(&qp->sq);
1548 		} else {
1549 			if (qp->srq) {
1550 				spin_lock_irqsave(&qp->srq->q_lock, flags);
1551 				ocrdma_hwq_inc_tail(&qp->srq->rq);
1552 				ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1553 				spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1554 
1555 			} else {
1556 				ocrdma_hwq_inc_tail(&qp->rq);
1557 			}
1558 		}
1559 skip_cqe:
1560 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1561 	} while (cur_getp != stop_getp);
1562 	spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1563 }
1564 
1565 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1566 {
1567 	int found = false;
1568 	unsigned long flags;
1569 	struct ocrdma_dev *dev = qp->dev;
1570 	/* sync with any active CQ poll */
1571 
1572 	spin_lock_irqsave(&dev->flush_q_lock, flags);
1573 	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1574 	if (found)
1575 		list_del(&qp->sq_entry);
1576 	if (!qp->srq) {
1577 		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1578 		if (found)
1579 			list_del(&qp->rq_entry);
1580 	}
1581 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1582 }
1583 
1584 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1585 {
1586 	int status;
1587 	struct ocrdma_pd *pd;
1588 	struct ocrdma_qp *qp;
1589 	struct ocrdma_dev *dev;
1590 	struct ib_qp_attr attrs;
1591 	int attr_mask = IB_QP_STATE;
1592 	unsigned long flags;
1593 
1594 	qp = get_ocrdma_qp(ibqp);
1595 	dev = qp->dev;
1596 
1597 	attrs.qp_state = IB_QPS_ERR;
1598 	pd = qp->pd;
1599 
1600 	/* change the QP state to ERROR */
1601 	_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1602 
1603 	/* ensure that CQEs for newly created QP (whose id may be same with
1604 	 * one which just getting destroyed are same), dont get
1605 	 * discarded until the old CQEs are discarded.
1606 	 */
1607 	mutex_lock(&dev->dev_lock);
1608 	status = ocrdma_mbx_destroy_qp(dev, qp);
1609 
1610 	/*
1611 	 * acquire CQ lock while destroy is in progress, in order to
1612 	 * protect against proessing in-flight CQEs for this QP.
1613 	 */
1614 	spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1615 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1616 		spin_lock(&qp->rq_cq->cq_lock);
1617 
1618 	ocrdma_del_qpn_map(dev, qp);
1619 
1620 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1621 		spin_unlock(&qp->rq_cq->cq_lock);
1622 	spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1623 
1624 	if (!pd->uctx) {
1625 		ocrdma_discard_cqes(qp, qp->sq_cq);
1626 		ocrdma_discard_cqes(qp, qp->rq_cq);
1627 	}
1628 	mutex_unlock(&dev->dev_lock);
1629 
1630 	if (pd->uctx) {
1631 		ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1632 				PAGE_ALIGN(qp->sq.len));
1633 		if (!qp->srq)
1634 			ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1635 					PAGE_ALIGN(qp->rq.len));
1636 	}
1637 
1638 	ocrdma_del_flush_qp(qp);
1639 
1640 	kfree(qp->wqe_wr_id_tbl);
1641 	kfree(qp->rqe_wr_id_tbl);
1642 	kfree(qp);
1643 	return status;
1644 }
1645 
1646 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1647 				struct ib_udata *udata)
1648 {
1649 	int status;
1650 	struct ocrdma_create_srq_uresp uresp;
1651 
1652 	memset(&uresp, 0, sizeof(uresp));
1653 	uresp.rq_dbid = srq->rq.dbid;
1654 	uresp.num_rq_pages = 1;
1655 	uresp.rq_page_addr[0] = srq->rq.pa;
1656 	uresp.rq_page_size = srq->rq.len;
1657 	uresp.db_page_addr = dev->nic_info.unmapped_db +
1658 	    (srq->pd->id * dev->nic_info.db_page_size);
1659 	uresp.db_page_size = dev->nic_info.db_page_size;
1660 	uresp.num_rqe_allocated = srq->rq.max_cnt;
1661 	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1662 		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1663 		uresp.db_shift = 24;
1664 	} else {
1665 		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1666 		uresp.db_shift = 16;
1667 	}
1668 
1669 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1670 	if (status)
1671 		return status;
1672 	status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1673 				 uresp.rq_page_size);
1674 	if (status)
1675 		return status;
1676 	return status;
1677 }
1678 
1679 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1680 				 struct ib_srq_init_attr *init_attr,
1681 				 struct ib_udata *udata)
1682 {
1683 	int status = -ENOMEM;
1684 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1685 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1686 	struct ocrdma_srq *srq;
1687 
1688 	if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1689 		return ERR_PTR(-EINVAL);
1690 	if (init_attr->attr.max_wr > dev->attr.max_rqe)
1691 		return ERR_PTR(-EINVAL);
1692 
1693 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1694 	if (!srq)
1695 		return ERR_PTR(status);
1696 
1697 	spin_lock_init(&srq->q_lock);
1698 	srq->pd = pd;
1699 	srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1700 	status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1701 	if (status)
1702 		goto err;
1703 
1704 	if (udata == NULL) {
1705 		srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1706 			    GFP_KERNEL);
1707 		if (srq->rqe_wr_id_tbl == NULL)
1708 			goto arm_err;
1709 
1710 		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1711 		    (srq->rq.max_cnt % 32 ? 1 : 0);
1712 		srq->idx_bit_fields =
1713 		    kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1714 		if (srq->idx_bit_fields == NULL)
1715 			goto arm_err;
1716 		memset(srq->idx_bit_fields, 0xff,
1717 		       srq->bit_fields_len * sizeof(u32));
1718 	}
1719 
1720 	if (init_attr->attr.srq_limit) {
1721 		status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1722 		if (status)
1723 			goto arm_err;
1724 	}
1725 
1726 	if (udata) {
1727 		status = ocrdma_copy_srq_uresp(dev, srq, udata);
1728 		if (status)
1729 			goto arm_err;
1730 	}
1731 
1732 	return &srq->ibsrq;
1733 
1734 arm_err:
1735 	ocrdma_mbx_destroy_srq(dev, srq);
1736 err:
1737 	kfree(srq->rqe_wr_id_tbl);
1738 	kfree(srq->idx_bit_fields);
1739 	kfree(srq);
1740 	return ERR_PTR(status);
1741 }
1742 
1743 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1744 		      struct ib_srq_attr *srq_attr,
1745 		      enum ib_srq_attr_mask srq_attr_mask,
1746 		      struct ib_udata *udata)
1747 {
1748 	int status = 0;
1749 	struct ocrdma_srq *srq;
1750 
1751 	srq = get_ocrdma_srq(ibsrq);
1752 	if (srq_attr_mask & IB_SRQ_MAX_WR)
1753 		status = -EINVAL;
1754 	else
1755 		status = ocrdma_mbx_modify_srq(srq, srq_attr);
1756 	return status;
1757 }
1758 
1759 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1760 {
1761 	int status;
1762 	struct ocrdma_srq *srq;
1763 
1764 	srq = get_ocrdma_srq(ibsrq);
1765 	status = ocrdma_mbx_query_srq(srq, srq_attr);
1766 	return status;
1767 }
1768 
1769 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1770 {
1771 	int status;
1772 	struct ocrdma_srq *srq;
1773 	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1774 
1775 	srq = get_ocrdma_srq(ibsrq);
1776 
1777 	status = ocrdma_mbx_destroy_srq(dev, srq);
1778 
1779 	if (srq->pd->uctx)
1780 		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1781 				PAGE_ALIGN(srq->rq.len));
1782 
1783 	kfree(srq->idx_bit_fields);
1784 	kfree(srq->rqe_wr_id_tbl);
1785 	kfree(srq);
1786 	return status;
1787 }
1788 
1789 /* unprivileged verbs and their support functions. */
1790 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1791 				struct ocrdma_hdr_wqe *hdr,
1792 				struct ib_send_wr *wr)
1793 {
1794 	struct ocrdma_ewqe_ud_hdr *ud_hdr =
1795 		(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1796 	struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1797 
1798 	ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1799 	if (qp->qp_type == IB_QPT_GSI)
1800 		ud_hdr->qkey = qp->qkey;
1801 	else
1802 		ud_hdr->qkey = wr->wr.ud.remote_qkey;
1803 	ud_hdr->rsvd_ahid = ah->id;
1804 }
1805 
1806 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1807 			      struct ocrdma_sge *sge, int num_sge,
1808 			      struct ib_sge *sg_list)
1809 {
1810 	int i;
1811 
1812 	for (i = 0; i < num_sge; i++) {
1813 		sge[i].lrkey = sg_list[i].lkey;
1814 		sge[i].addr_lo = sg_list[i].addr;
1815 		sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1816 		sge[i].len = sg_list[i].length;
1817 		hdr->total_len += sg_list[i].length;
1818 	}
1819 	if (num_sge == 0)
1820 		memset(sge, 0, sizeof(*sge));
1821 }
1822 
1823 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1824 {
1825 	uint32_t total_len = 0, i;
1826 
1827 	for (i = 0; i < num_sge; i++)
1828 		total_len += sg_list[i].length;
1829 	return total_len;
1830 }
1831 
1832 
1833 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1834 				    struct ocrdma_hdr_wqe *hdr,
1835 				    struct ocrdma_sge *sge,
1836 				    struct ib_send_wr *wr, u32 wqe_size)
1837 {
1838 	int i;
1839 	char *dpp_addr;
1840 
1841 	if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1842 		hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1843 		if (unlikely(hdr->total_len > qp->max_inline_data)) {
1844 			pr_err("%s() supported_len=0x%x,\n"
1845 			       " unspported len req=0x%x\n", __func__,
1846 				qp->max_inline_data, hdr->total_len);
1847 			return -EINVAL;
1848 		}
1849 		dpp_addr = (char *)sge;
1850 		for (i = 0; i < wr->num_sge; i++) {
1851 			memcpy(dpp_addr,
1852 			       (void *)(unsigned long)wr->sg_list[i].addr,
1853 			       wr->sg_list[i].length);
1854 			dpp_addr += wr->sg_list[i].length;
1855 		}
1856 
1857 		wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1858 		if (0 == hdr->total_len)
1859 			wqe_size += sizeof(struct ocrdma_sge);
1860 		hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1861 	} else {
1862 		ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1863 		if (wr->num_sge)
1864 			wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1865 		else
1866 			wqe_size += sizeof(struct ocrdma_sge);
1867 		hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1868 	}
1869 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1870 	return 0;
1871 }
1872 
1873 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1874 			     struct ib_send_wr *wr)
1875 {
1876 	int status;
1877 	struct ocrdma_sge *sge;
1878 	u32 wqe_size = sizeof(*hdr);
1879 
1880 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1881 		ocrdma_build_ud_hdr(qp, hdr, wr);
1882 		sge = (struct ocrdma_sge *)(hdr + 2);
1883 		wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1884 	} else {
1885 		sge = (struct ocrdma_sge *)(hdr + 1);
1886 	}
1887 
1888 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1889 	return status;
1890 }
1891 
1892 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1893 			      struct ib_send_wr *wr)
1894 {
1895 	int status;
1896 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1897 	struct ocrdma_sge *sge = ext_rw + 1;
1898 	u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1899 
1900 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1901 	if (status)
1902 		return status;
1903 	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1904 	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1905 	ext_rw->lrkey = wr->wr.rdma.rkey;
1906 	ext_rw->len = hdr->total_len;
1907 	return 0;
1908 }
1909 
1910 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1911 			      struct ib_send_wr *wr)
1912 {
1913 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1914 	struct ocrdma_sge *sge = ext_rw + 1;
1915 	u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1916 	    sizeof(struct ocrdma_hdr_wqe);
1917 
1918 	ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1919 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1920 	hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1921 	hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1922 
1923 	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1924 	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1925 	ext_rw->lrkey = wr->wr.rdma.rkey;
1926 	ext_rw->len = hdr->total_len;
1927 }
1928 
1929 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
1930 			    struct ocrdma_hw_mr *hwmr)
1931 {
1932 	int i;
1933 	u64 buf_addr = 0;
1934 	int num_pbes;
1935 	struct ocrdma_pbe *pbe;
1936 
1937 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1938 	num_pbes = 0;
1939 
1940 	/* go through the OS phy regions & fill hw pbe entries into pbls. */
1941 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
1942 		/* number of pbes can be more for one OS buf, when
1943 		 * buffers are of different sizes.
1944 		 * split the ib_buf to one or more pbes.
1945 		 */
1946 		buf_addr = wr->wr.fast_reg.page_list->page_list[i];
1947 		pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
1948 		pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
1949 		num_pbes += 1;
1950 		pbe++;
1951 
1952 		/* if the pbl is full storing the pbes,
1953 		 * move to next pbl.
1954 		*/
1955 		if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
1956 			pbl_tbl++;
1957 			pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1958 		}
1959 	}
1960 	return;
1961 }
1962 
1963 static int get_encoded_page_size(int pg_sz)
1964 {
1965 	/* Max size is 256M 4096 << 16 */
1966 	int i = 0;
1967 	for (; i < 17; i++)
1968 		if (pg_sz == (4096 << i))
1969 			break;
1970 	return i;
1971 }
1972 
1973 
1974 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1975 			   struct ib_send_wr *wr)
1976 {
1977 	u64 fbo;
1978 	struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
1979 	struct ocrdma_mr *mr;
1980 	u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
1981 
1982 	wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
1983 
1984 	if ((wr->wr.fast_reg.page_list_len >
1985 		qp->dev->attr.max_pages_per_frmr) ||
1986 		(wr->wr.fast_reg.length > 0xffffffffULL))
1987 		return -EINVAL;
1988 
1989 	hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
1990 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1991 
1992 	if (wr->wr.fast_reg.page_list_len == 0)
1993 		BUG();
1994 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
1995 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
1996 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
1997 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
1998 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
1999 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2000 	hdr->lkey = wr->wr.fast_reg.rkey;
2001 	hdr->total_len = wr->wr.fast_reg.length;
2002 
2003 	fbo = wr->wr.fast_reg.iova_start -
2004 	    (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2005 
2006 	fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2007 	fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2008 	fast_reg->fbo_hi = upper_32_bits(fbo);
2009 	fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2010 	fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2011 	fast_reg->size_sge =
2012 		get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2013 	mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) &
2014 		(OCRDMA_MAX_STAG - 1)];
2015 	build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2016 	return 0;
2017 }
2018 
2019 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2020 {
2021 	u32 val = qp->sq.dbid | (1 << 16);
2022 
2023 	iowrite32(val, qp->sq_db);
2024 }
2025 
2026 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2027 		     struct ib_send_wr **bad_wr)
2028 {
2029 	int status = 0;
2030 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2031 	struct ocrdma_hdr_wqe *hdr;
2032 	unsigned long flags;
2033 
2034 	spin_lock_irqsave(&qp->q_lock, flags);
2035 	if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2036 		spin_unlock_irqrestore(&qp->q_lock, flags);
2037 		*bad_wr = wr;
2038 		return -EINVAL;
2039 	}
2040 
2041 	while (wr) {
2042 		if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2043 		    wr->num_sge > qp->sq.max_sges) {
2044 			*bad_wr = wr;
2045 			status = -ENOMEM;
2046 			break;
2047 		}
2048 		hdr = ocrdma_hwq_head(&qp->sq);
2049 		hdr->cw = 0;
2050 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2051 			hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2052 		if (wr->send_flags & IB_SEND_FENCE)
2053 			hdr->cw |=
2054 			    (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2055 		if (wr->send_flags & IB_SEND_SOLICITED)
2056 			hdr->cw |=
2057 			    (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2058 		hdr->total_len = 0;
2059 		switch (wr->opcode) {
2060 		case IB_WR_SEND_WITH_IMM:
2061 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2062 			hdr->immdt = ntohl(wr->ex.imm_data);
2063 		case IB_WR_SEND:
2064 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2065 			ocrdma_build_send(qp, hdr, wr);
2066 			break;
2067 		case IB_WR_SEND_WITH_INV:
2068 			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2069 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2070 			hdr->lkey = wr->ex.invalidate_rkey;
2071 			status = ocrdma_build_send(qp, hdr, wr);
2072 			break;
2073 		case IB_WR_RDMA_WRITE_WITH_IMM:
2074 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2075 			hdr->immdt = ntohl(wr->ex.imm_data);
2076 		case IB_WR_RDMA_WRITE:
2077 			hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2078 			status = ocrdma_build_write(qp, hdr, wr);
2079 			break;
2080 		case IB_WR_RDMA_READ_WITH_INV:
2081 			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2082 		case IB_WR_RDMA_READ:
2083 			ocrdma_build_read(qp, hdr, wr);
2084 			break;
2085 		case IB_WR_LOCAL_INV:
2086 			hdr->cw |=
2087 			    (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2088 			hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2089 					sizeof(struct ocrdma_sge)) /
2090 				OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2091 			hdr->lkey = wr->ex.invalidate_rkey;
2092 			break;
2093 		case IB_WR_FAST_REG_MR:
2094 			status = ocrdma_build_fr(qp, hdr, wr);
2095 			break;
2096 		default:
2097 			status = -EINVAL;
2098 			break;
2099 		}
2100 		if (status) {
2101 			*bad_wr = wr;
2102 			break;
2103 		}
2104 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2105 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2106 		else
2107 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2108 		qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2109 		ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2110 				   OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2111 		/* make sure wqe is written before adapter can access it */
2112 		wmb();
2113 		/* inform hw to start processing it */
2114 		ocrdma_ring_sq_db(qp);
2115 
2116 		/* update pointer, counter for next wr */
2117 		ocrdma_hwq_inc_head(&qp->sq);
2118 		wr = wr->next;
2119 	}
2120 	spin_unlock_irqrestore(&qp->q_lock, flags);
2121 	return status;
2122 }
2123 
2124 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2125 {
2126 	u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
2127 
2128 	if (qp->state != OCRDMA_QPS_INIT)
2129 		iowrite32(val, qp->rq_db);
2130 	else
2131 		qp->db_cache++;
2132 }
2133 
2134 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2135 			     u16 tag)
2136 {
2137 	u32 wqe_size = 0;
2138 	struct ocrdma_sge *sge;
2139 	if (wr->num_sge)
2140 		wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2141 	else
2142 		wqe_size = sizeof(*sge) + sizeof(*rqe);
2143 
2144 	rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2145 				OCRDMA_WQE_SIZE_SHIFT);
2146 	rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2147 	rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2148 	rqe->total_len = 0;
2149 	rqe->rsvd_tag = tag;
2150 	sge = (struct ocrdma_sge *)(rqe + 1);
2151 	ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2152 	ocrdma_cpu_to_le32(rqe, wqe_size);
2153 }
2154 
2155 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2156 		     struct ib_recv_wr **bad_wr)
2157 {
2158 	int status = 0;
2159 	unsigned long flags;
2160 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2161 	struct ocrdma_hdr_wqe *rqe;
2162 
2163 	spin_lock_irqsave(&qp->q_lock, flags);
2164 	if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2165 		spin_unlock_irqrestore(&qp->q_lock, flags);
2166 		*bad_wr = wr;
2167 		return -EINVAL;
2168 	}
2169 	while (wr) {
2170 		if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2171 		    wr->num_sge > qp->rq.max_sges) {
2172 			*bad_wr = wr;
2173 			status = -ENOMEM;
2174 			break;
2175 		}
2176 		rqe = ocrdma_hwq_head(&qp->rq);
2177 		ocrdma_build_rqe(rqe, wr, 0);
2178 
2179 		qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2180 		/* make sure rqe is written before adapter can access it */
2181 		wmb();
2182 
2183 		/* inform hw to start processing it */
2184 		ocrdma_ring_rq_db(qp);
2185 
2186 		/* update pointer, counter for next wr */
2187 		ocrdma_hwq_inc_head(&qp->rq);
2188 		wr = wr->next;
2189 	}
2190 	spin_unlock_irqrestore(&qp->q_lock, flags);
2191 	return status;
2192 }
2193 
2194 /* cqe for srq's rqe can potentially arrive out of order.
2195  * index gives the entry in the shadow table where to store
2196  * the wr_id. tag/index is returned in cqe to reference back
2197  * for a given rqe.
2198  */
2199 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2200 {
2201 	int row = 0;
2202 	int indx = 0;
2203 
2204 	for (row = 0; row < srq->bit_fields_len; row++) {
2205 		if (srq->idx_bit_fields[row]) {
2206 			indx = ffs(srq->idx_bit_fields[row]);
2207 			indx = (row * 32) + (indx - 1);
2208 			if (indx >= srq->rq.max_cnt)
2209 				BUG();
2210 			ocrdma_srq_toggle_bit(srq, indx);
2211 			break;
2212 		}
2213 	}
2214 
2215 	if (row == srq->bit_fields_len)
2216 		BUG();
2217 	return indx;
2218 }
2219 
2220 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2221 {
2222 	u32 val = srq->rq.dbid | (1 << 16);
2223 
2224 	iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2225 }
2226 
2227 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2228 			 struct ib_recv_wr **bad_wr)
2229 {
2230 	int status = 0;
2231 	unsigned long flags;
2232 	struct ocrdma_srq *srq;
2233 	struct ocrdma_hdr_wqe *rqe;
2234 	u16 tag;
2235 
2236 	srq = get_ocrdma_srq(ibsrq);
2237 
2238 	spin_lock_irqsave(&srq->q_lock, flags);
2239 	while (wr) {
2240 		if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2241 		    wr->num_sge > srq->rq.max_sges) {
2242 			status = -ENOMEM;
2243 			*bad_wr = wr;
2244 			break;
2245 		}
2246 		tag = ocrdma_srq_get_idx(srq);
2247 		rqe = ocrdma_hwq_head(&srq->rq);
2248 		ocrdma_build_rqe(rqe, wr, tag);
2249 
2250 		srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2251 		/* make sure rqe is written before adapter can perform DMA */
2252 		wmb();
2253 		/* inform hw to start processing it */
2254 		ocrdma_ring_srq_db(srq);
2255 		/* update pointer, counter for next wr */
2256 		ocrdma_hwq_inc_head(&srq->rq);
2257 		wr = wr->next;
2258 	}
2259 	spin_unlock_irqrestore(&srq->q_lock, flags);
2260 	return status;
2261 }
2262 
2263 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2264 {
2265 	enum ib_wc_status ibwc_status;
2266 
2267 	switch (status) {
2268 	case OCRDMA_CQE_GENERAL_ERR:
2269 		ibwc_status = IB_WC_GENERAL_ERR;
2270 		break;
2271 	case OCRDMA_CQE_LOC_LEN_ERR:
2272 		ibwc_status = IB_WC_LOC_LEN_ERR;
2273 		break;
2274 	case OCRDMA_CQE_LOC_QP_OP_ERR:
2275 		ibwc_status = IB_WC_LOC_QP_OP_ERR;
2276 		break;
2277 	case OCRDMA_CQE_LOC_EEC_OP_ERR:
2278 		ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2279 		break;
2280 	case OCRDMA_CQE_LOC_PROT_ERR:
2281 		ibwc_status = IB_WC_LOC_PROT_ERR;
2282 		break;
2283 	case OCRDMA_CQE_WR_FLUSH_ERR:
2284 		ibwc_status = IB_WC_WR_FLUSH_ERR;
2285 		break;
2286 	case OCRDMA_CQE_MW_BIND_ERR:
2287 		ibwc_status = IB_WC_MW_BIND_ERR;
2288 		break;
2289 	case OCRDMA_CQE_BAD_RESP_ERR:
2290 		ibwc_status = IB_WC_BAD_RESP_ERR;
2291 		break;
2292 	case OCRDMA_CQE_LOC_ACCESS_ERR:
2293 		ibwc_status = IB_WC_LOC_ACCESS_ERR;
2294 		break;
2295 	case OCRDMA_CQE_REM_INV_REQ_ERR:
2296 		ibwc_status = IB_WC_REM_INV_REQ_ERR;
2297 		break;
2298 	case OCRDMA_CQE_REM_ACCESS_ERR:
2299 		ibwc_status = IB_WC_REM_ACCESS_ERR;
2300 		break;
2301 	case OCRDMA_CQE_REM_OP_ERR:
2302 		ibwc_status = IB_WC_REM_OP_ERR;
2303 		break;
2304 	case OCRDMA_CQE_RETRY_EXC_ERR:
2305 		ibwc_status = IB_WC_RETRY_EXC_ERR;
2306 		break;
2307 	case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2308 		ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2309 		break;
2310 	case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2311 		ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2312 		break;
2313 	case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2314 		ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2315 		break;
2316 	case OCRDMA_CQE_REM_ABORT_ERR:
2317 		ibwc_status = IB_WC_REM_ABORT_ERR;
2318 		break;
2319 	case OCRDMA_CQE_INV_EECN_ERR:
2320 		ibwc_status = IB_WC_INV_EECN_ERR;
2321 		break;
2322 	case OCRDMA_CQE_INV_EEC_STATE_ERR:
2323 		ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2324 		break;
2325 	case OCRDMA_CQE_FATAL_ERR:
2326 		ibwc_status = IB_WC_FATAL_ERR;
2327 		break;
2328 	case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2329 		ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2330 		break;
2331 	default:
2332 		ibwc_status = IB_WC_GENERAL_ERR;
2333 		break;
2334 	};
2335 	return ibwc_status;
2336 }
2337 
2338 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2339 		      u32 wqe_idx)
2340 {
2341 	struct ocrdma_hdr_wqe *hdr;
2342 	struct ocrdma_sge *rw;
2343 	int opcode;
2344 
2345 	hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2346 
2347 	ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2348 	/* Undo the hdr->cw swap */
2349 	opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2350 	switch (opcode) {
2351 	case OCRDMA_WRITE:
2352 		ibwc->opcode = IB_WC_RDMA_WRITE;
2353 		break;
2354 	case OCRDMA_READ:
2355 		rw = (struct ocrdma_sge *)(hdr + 1);
2356 		ibwc->opcode = IB_WC_RDMA_READ;
2357 		ibwc->byte_len = rw->len;
2358 		break;
2359 	case OCRDMA_SEND:
2360 		ibwc->opcode = IB_WC_SEND;
2361 		break;
2362 	case OCRDMA_FR_MR:
2363 		ibwc->opcode = IB_WC_FAST_REG_MR;
2364 		break;
2365 	case OCRDMA_LKEY_INV:
2366 		ibwc->opcode = IB_WC_LOCAL_INV;
2367 		break;
2368 	default:
2369 		ibwc->status = IB_WC_GENERAL_ERR;
2370 		pr_err("%s() invalid opcode received = 0x%x\n",
2371 		       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2372 		break;
2373 	};
2374 }
2375 
2376 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2377 						struct ocrdma_cqe *cqe)
2378 {
2379 	if (is_cqe_for_sq(cqe)) {
2380 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2381 				cqe->flags_status_srcqpn) &
2382 					~OCRDMA_CQE_STATUS_MASK);
2383 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2384 				cqe->flags_status_srcqpn) |
2385 				(OCRDMA_CQE_WR_FLUSH_ERR <<
2386 					OCRDMA_CQE_STATUS_SHIFT));
2387 	} else {
2388 		if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2389 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2390 					cqe->flags_status_srcqpn) &
2391 						~OCRDMA_CQE_UD_STATUS_MASK);
2392 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2393 					cqe->flags_status_srcqpn) |
2394 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2395 						OCRDMA_CQE_UD_STATUS_SHIFT));
2396 		} else {
2397 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2398 					cqe->flags_status_srcqpn) &
2399 						~OCRDMA_CQE_STATUS_MASK);
2400 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2401 					cqe->flags_status_srcqpn) |
2402 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2403 						OCRDMA_CQE_STATUS_SHIFT));
2404 		}
2405 	}
2406 }
2407 
2408 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2409 				  struct ocrdma_qp *qp, int status)
2410 {
2411 	bool expand = false;
2412 
2413 	ibwc->byte_len = 0;
2414 	ibwc->qp = &qp->ibqp;
2415 	ibwc->status = ocrdma_to_ibwc_err(status);
2416 
2417 	ocrdma_flush_qp(qp);
2418 	ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2419 
2420 	/* if wqe/rqe pending for which cqe needs to be returned,
2421 	 * trigger inflating it.
2422 	 */
2423 	if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2424 		expand = true;
2425 		ocrdma_set_cqe_status_flushed(qp, cqe);
2426 	}
2427 	return expand;
2428 }
2429 
2430 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2431 				  struct ocrdma_qp *qp, int status)
2432 {
2433 	ibwc->opcode = IB_WC_RECV;
2434 	ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2435 	ocrdma_hwq_inc_tail(&qp->rq);
2436 
2437 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2438 }
2439 
2440 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2441 				  struct ocrdma_qp *qp, int status)
2442 {
2443 	ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2444 	ocrdma_hwq_inc_tail(&qp->sq);
2445 
2446 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2447 }
2448 
2449 
2450 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2451 				 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2452 				 bool *polled, bool *stop)
2453 {
2454 	bool expand;
2455 	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2456 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2457 
2458 	/* when hw sq is empty, but rq is not empty, so we continue
2459 	 * to keep the cqe in order to get the cq event again.
2460 	 */
2461 	if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2462 		/* when cq for rq and sq is same, it is safe to return
2463 		 * flush cqe for RQEs.
2464 		 */
2465 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2466 			*polled = true;
2467 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2468 			expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2469 		} else {
2470 			/* stop processing further cqe as this cqe is used for
2471 			 * triggering cq event on buddy cq of RQ.
2472 			 * When QP is destroyed, this cqe will be removed
2473 			 * from the cq's hardware q.
2474 			 */
2475 			*polled = false;
2476 			*stop = true;
2477 			expand = false;
2478 		}
2479 	} else {
2480 		*polled = true;
2481 		expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2482 	}
2483 	return expand;
2484 }
2485 
2486 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2487 				     struct ocrdma_cqe *cqe,
2488 				     struct ib_wc *ibwc, bool *polled)
2489 {
2490 	bool expand = false;
2491 	int tail = qp->sq.tail;
2492 	u32 wqe_idx;
2493 
2494 	if (!qp->wqe_wr_id_tbl[tail].signaled) {
2495 		*polled = false;    /* WC cannot be consumed yet */
2496 	} else {
2497 		ibwc->status = IB_WC_SUCCESS;
2498 		ibwc->wc_flags = 0;
2499 		ibwc->qp = &qp->ibqp;
2500 		ocrdma_update_wc(qp, ibwc, tail);
2501 		*polled = true;
2502 	}
2503 	wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2504 			OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2505 	if (tail != wqe_idx)
2506 		expand = true; /* Coalesced CQE can't be consumed yet */
2507 
2508 	ocrdma_hwq_inc_tail(&qp->sq);
2509 	return expand;
2510 }
2511 
2512 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2513 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2514 {
2515 	int status;
2516 	bool expand;
2517 
2518 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2519 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2520 
2521 	if (status == OCRDMA_CQE_SUCCESS)
2522 		expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2523 	else
2524 		expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2525 	return expand;
2526 }
2527 
2528 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2529 {
2530 	int status;
2531 
2532 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2533 		OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2534 	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2535 						OCRDMA_CQE_SRCQP_MASK;
2536 	ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2537 						OCRDMA_CQE_PKEY_MASK;
2538 	ibwc->wc_flags = IB_WC_GRH;
2539 	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2540 					OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2541 	return status;
2542 }
2543 
2544 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2545 				       struct ocrdma_cqe *cqe,
2546 				       struct ocrdma_qp *qp)
2547 {
2548 	unsigned long flags;
2549 	struct ocrdma_srq *srq;
2550 	u32 wqe_idx;
2551 
2552 	srq = get_ocrdma_srq(qp->ibqp.srq);
2553 	wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2554 			OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2555 	ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2556 	spin_lock_irqsave(&srq->q_lock, flags);
2557 	ocrdma_srq_toggle_bit(srq, wqe_idx);
2558 	spin_unlock_irqrestore(&srq->q_lock, flags);
2559 	ocrdma_hwq_inc_tail(&srq->rq);
2560 }
2561 
2562 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2563 				struct ib_wc *ibwc, bool *polled, bool *stop,
2564 				int status)
2565 {
2566 	bool expand;
2567 
2568 	/* when hw_rq is empty, but wq is not empty, so continue
2569 	 * to keep the cqe to get the cq event again.
2570 	 */
2571 	if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2572 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2573 			*polled = true;
2574 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2575 			expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2576 		} else {
2577 			*polled = false;
2578 			*stop = true;
2579 			expand = false;
2580 		}
2581 	} else {
2582 		*polled = true;
2583 		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2584 	}
2585 	return expand;
2586 }
2587 
2588 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2589 				     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2590 {
2591 	ibwc->opcode = IB_WC_RECV;
2592 	ibwc->qp = &qp->ibqp;
2593 	ibwc->status = IB_WC_SUCCESS;
2594 
2595 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2596 		ocrdma_update_ud_rcqe(ibwc, cqe);
2597 	else
2598 		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2599 
2600 	if (is_cqe_imm(cqe)) {
2601 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2602 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2603 	} else if (is_cqe_wr_imm(cqe)) {
2604 		ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2605 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2606 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2607 	} else if (is_cqe_invalidated(cqe)) {
2608 		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2609 		ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2610 	}
2611 	if (qp->ibqp.srq) {
2612 		ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2613 	} else {
2614 		ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2615 		ocrdma_hwq_inc_tail(&qp->rq);
2616 	}
2617 }
2618 
2619 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2620 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2621 {
2622 	int status;
2623 	bool expand = false;
2624 
2625 	ibwc->wc_flags = 0;
2626 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2627 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2628 					OCRDMA_CQE_UD_STATUS_MASK) >>
2629 					OCRDMA_CQE_UD_STATUS_SHIFT;
2630 	} else {
2631 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2632 			     OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2633 	}
2634 
2635 	if (status == OCRDMA_CQE_SUCCESS) {
2636 		*polled = true;
2637 		ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2638 	} else {
2639 		expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2640 					      status);
2641 	}
2642 	return expand;
2643 }
2644 
2645 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2646 				   u16 cur_getp)
2647 {
2648 	if (cq->phase_change) {
2649 		if (cur_getp == 0)
2650 			cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2651 	} else {
2652 		/* clear valid bit */
2653 		cqe->flags_status_srcqpn = 0;
2654 	}
2655 }
2656 
2657 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2658 			    struct ib_wc *ibwc)
2659 {
2660 	u16 qpn = 0;
2661 	int i = 0;
2662 	bool expand = false;
2663 	int polled_hw_cqes = 0;
2664 	struct ocrdma_qp *qp = NULL;
2665 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2666 	struct ocrdma_cqe *cqe;
2667 	u16 cur_getp; bool polled = false; bool stop = false;
2668 
2669 	cur_getp = cq->getp;
2670 	while (num_entries) {
2671 		cqe = cq->va + cur_getp;
2672 		/* check whether valid cqe or not */
2673 		if (!is_cqe_valid(cq, cqe))
2674 			break;
2675 		qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2676 		/* ignore discarded cqe */
2677 		if (qpn == 0)
2678 			goto skip_cqe;
2679 		qp = dev->qp_tbl[qpn];
2680 		BUG_ON(qp == NULL);
2681 
2682 		if (is_cqe_for_sq(cqe)) {
2683 			expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2684 						  &stop);
2685 		} else {
2686 			expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2687 						  &stop);
2688 		}
2689 		if (expand)
2690 			goto expand_cqe;
2691 		if (stop)
2692 			goto stop_cqe;
2693 		/* clear qpn to avoid duplicate processing by discard_cqe() */
2694 		cqe->cmn.qpn = 0;
2695 skip_cqe:
2696 		polled_hw_cqes += 1;
2697 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2698 		ocrdma_change_cq_phase(cq, cqe, cur_getp);
2699 expand_cqe:
2700 		if (polled) {
2701 			num_entries -= 1;
2702 			i += 1;
2703 			ibwc = ibwc + 1;
2704 			polled = false;
2705 		}
2706 	}
2707 stop_cqe:
2708 	cq->getp = cur_getp;
2709 	if (polled_hw_cqes || expand || stop) {
2710 		ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
2711 				  polled_hw_cqes);
2712 	}
2713 	return i;
2714 }
2715 
2716 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2717 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2718 			      struct ocrdma_qp *qp, struct ib_wc *ibwc)
2719 {
2720 	int err_cqes = 0;
2721 
2722 	while (num_entries) {
2723 		if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2724 			break;
2725 		if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2726 			ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2727 			ocrdma_hwq_inc_tail(&qp->sq);
2728 		} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2729 			ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2730 			ocrdma_hwq_inc_tail(&qp->rq);
2731 		} else {
2732 			return err_cqes;
2733 		}
2734 		ibwc->byte_len = 0;
2735 		ibwc->status = IB_WC_WR_FLUSH_ERR;
2736 		ibwc = ibwc + 1;
2737 		err_cqes += 1;
2738 		num_entries -= 1;
2739 	}
2740 	return err_cqes;
2741 }
2742 
2743 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2744 {
2745 	int cqes_to_poll = num_entries;
2746 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2747 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2748 	int num_os_cqe = 0, err_cqes = 0;
2749 	struct ocrdma_qp *qp;
2750 	unsigned long flags;
2751 
2752 	/* poll cqes from adapter CQ */
2753 	spin_lock_irqsave(&cq->cq_lock, flags);
2754 	num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2755 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2756 	cqes_to_poll -= num_os_cqe;
2757 
2758 	if (cqes_to_poll) {
2759 		wc = wc + num_os_cqe;
2760 		/* adapter returns single error cqe when qp moves to
2761 		 * error state. So insert error cqes with wc_status as
2762 		 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2763 		 * respectively which uses this CQ.
2764 		 */
2765 		spin_lock_irqsave(&dev->flush_q_lock, flags);
2766 		list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2767 			if (cqes_to_poll == 0)
2768 				break;
2769 			err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2770 			cqes_to_poll -= err_cqes;
2771 			num_os_cqe += err_cqes;
2772 			wc = wc + err_cqes;
2773 		}
2774 		spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2775 	}
2776 	return num_os_cqe;
2777 }
2778 
2779 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2780 {
2781 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2782 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2783 	u16 cq_id;
2784 	u16 cur_getp;
2785 	struct ocrdma_cqe *cqe;
2786 	unsigned long flags;
2787 
2788 	cq_id = cq->id;
2789 
2790 	spin_lock_irqsave(&cq->cq_lock, flags);
2791 	if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2792 		cq->armed = true;
2793 	if (cq_flags & IB_CQ_SOLICITED)
2794 		cq->solicited = true;
2795 
2796 	cur_getp = cq->getp;
2797 	cqe = cq->va + cur_getp;
2798 
2799 	/* check whether any valid cqe exist or not, if not then safe to
2800 	 * arm. If cqe is not yet consumed, then let it get consumed and then
2801 	 * we arm it to avoid false interrupts.
2802 	 */
2803 	if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
2804 		cq->arm_needed = false;
2805 		ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
2806 	}
2807 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2808 	return 0;
2809 }
2810 
2811 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2812 {
2813 	int status;
2814 	struct ocrdma_mr *mr;
2815 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2816 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2817 
2818 	if (max_page_list_len > dev->attr.max_pages_per_frmr)
2819 		return ERR_PTR(-EINVAL);
2820 
2821 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2822 	if (!mr)
2823 		return ERR_PTR(-ENOMEM);
2824 
2825 	status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2826 	if (status)
2827 		goto pbl_err;
2828 	mr->hwmr.fr_mr = 1;
2829 	mr->hwmr.remote_rd = 0;
2830 	mr->hwmr.remote_wr = 0;
2831 	mr->hwmr.local_rd = 0;
2832 	mr->hwmr.local_wr = 0;
2833 	mr->hwmr.mw_bind = 0;
2834 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2835 	if (status)
2836 		goto pbl_err;
2837 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2838 	if (status)
2839 		goto mbx_err;
2840 	mr->ibmr.rkey = mr->hwmr.lkey;
2841 	mr->ibmr.lkey = mr->hwmr.lkey;
2842 	dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr;
2843 	return &mr->ibmr;
2844 mbx_err:
2845 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2846 pbl_err:
2847 	kfree(mr);
2848 	return ERR_PTR(-ENOMEM);
2849 }
2850 
2851 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
2852 							  *ibdev,
2853 							  int page_list_len)
2854 {
2855 	struct ib_fast_reg_page_list *frmr_list;
2856 	int size;
2857 
2858 	size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
2859 	frmr_list = kzalloc(size, GFP_KERNEL);
2860 	if (!frmr_list)
2861 		return ERR_PTR(-ENOMEM);
2862 	frmr_list->page_list = (u64 *)(frmr_list + 1);
2863 	return frmr_list;
2864 }
2865 
2866 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
2867 {
2868 	kfree(page_list);
2869 }
2870 
2871 #define MAX_KERNEL_PBE_SIZE 65536
2872 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
2873 				    int buf_cnt, u32 *pbe_size)
2874 {
2875 	u64 total_size = 0;
2876 	u64 buf_size = 0;
2877 	int i;
2878 	*pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
2879 	*pbe_size = roundup_pow_of_two(*pbe_size);
2880 
2881 	/* find the smallest PBE size that we can have */
2882 	for (i = 0; i < buf_cnt; i++) {
2883 		/* first addr may not be page aligned, so ignore checking */
2884 		if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
2885 				 (buf_list[i].size & ~PAGE_MASK))) {
2886 			return 0;
2887 		}
2888 
2889 		/* if configured PBE size is greater then the chosen one,
2890 		 * reduce the PBE size.
2891 		 */
2892 		buf_size = roundup(buf_list[i].size, PAGE_SIZE);
2893 		/* pbe_size has to be even multiple of 4K 1,2,4,8...*/
2894 		buf_size = roundup_pow_of_two(buf_size);
2895 		if (*pbe_size > buf_size)
2896 			*pbe_size = buf_size;
2897 
2898 		total_size += buf_size;
2899 	}
2900 	*pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
2901 	    (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
2902 
2903 	/* num_pbes = total_size / (*pbe_size);  this is implemented below. */
2904 
2905 	return total_size >> ilog2(*pbe_size);
2906 }
2907 
2908 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
2909 			      u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
2910 			      struct ocrdma_hw_mr *hwmr)
2911 {
2912 	int i;
2913 	int idx;
2914 	int pbes_per_buf = 0;
2915 	u64 buf_addr = 0;
2916 	int num_pbes;
2917 	struct ocrdma_pbe *pbe;
2918 	int total_num_pbes = 0;
2919 
2920 	if (!hwmr->num_pbes)
2921 		return;
2922 
2923 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2924 	num_pbes = 0;
2925 
2926 	/* go through the OS phy regions & fill hw pbe entries into pbls. */
2927 	for (i = 0; i < ib_buf_cnt; i++) {
2928 		buf_addr = buf_list[i].addr;
2929 		pbes_per_buf =
2930 		    roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
2931 		    pbe_size;
2932 		hwmr->len += buf_list[i].size;
2933 		/* number of pbes can be more for one OS buf, when
2934 		 * buffers are of different sizes.
2935 		 * split the ib_buf to one or more pbes.
2936 		 */
2937 		for (idx = 0; idx < pbes_per_buf; idx++) {
2938 			/* we program always page aligned addresses,
2939 			 * first unaligned address is taken care by fbo.
2940 			 */
2941 			if (i == 0) {
2942 				/* for non zero fbo, assign the
2943 				 * start of the page.
2944 				 */
2945 				pbe->pa_lo =
2946 				    cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2947 				pbe->pa_hi =
2948 				    cpu_to_le32((u32) upper_32_bits(buf_addr));
2949 			} else {
2950 				pbe->pa_lo =
2951 				    cpu_to_le32((u32) (buf_addr & 0xffffffff));
2952 				pbe->pa_hi =
2953 				    cpu_to_le32((u32) upper_32_bits(buf_addr));
2954 			}
2955 			buf_addr += pbe_size;
2956 			num_pbes += 1;
2957 			total_num_pbes += 1;
2958 			pbe++;
2959 
2960 			if (total_num_pbes == hwmr->num_pbes)
2961 				goto mr_tbl_done;
2962 			/* if the pbl is full storing the pbes,
2963 			 * move to next pbl.
2964 			 */
2965 			if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2966 				pbl_tbl++;
2967 				pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2968 				num_pbes = 0;
2969 			}
2970 		}
2971 	}
2972 mr_tbl_done:
2973 	return;
2974 }
2975 
2976 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
2977 				   struct ib_phys_buf *buf_list,
2978 				   int buf_cnt, int acc, u64 *iova_start)
2979 {
2980 	int status = -ENOMEM;
2981 	struct ocrdma_mr *mr;
2982 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2983 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2984 	u32 num_pbes;
2985 	u32 pbe_size = 0;
2986 
2987 	if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
2988 		return ERR_PTR(-EINVAL);
2989 
2990 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2991 	if (!mr)
2992 		return ERR_PTR(status);
2993 
2994 	num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
2995 	if (num_pbes == 0) {
2996 		status = -EINVAL;
2997 		goto pbl_err;
2998 	}
2999 	status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3000 	if (status)
3001 		goto pbl_err;
3002 
3003 	mr->hwmr.pbe_size = pbe_size;
3004 	mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3005 	mr->hwmr.va = *iova_start;
3006 	mr->hwmr.local_rd = 1;
3007 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3008 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3009 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3010 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3011 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3012 
3013 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3014 	if (status)
3015 		goto pbl_err;
3016 	build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3017 			  &mr->hwmr);
3018 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3019 	if (status)
3020 		goto mbx_err;
3021 
3022 	mr->ibmr.lkey = mr->hwmr.lkey;
3023 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3024 		mr->ibmr.rkey = mr->hwmr.lkey;
3025 	return &mr->ibmr;
3026 
3027 mbx_err:
3028 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3029 pbl_err:
3030 	kfree(mr);
3031 	return ERR_PTR(status);
3032 }
3033