1 /*******************************************************************
2  * This file is part of the Emulex RoCE Device Driver for          *
3  * RoCE (RDMA over Converged Ethernet) adapters.                   *
4  * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *
20  * Contact Information:
21  * linux-drivers@emulex.com
22  *
23  * Emulex
24  * 3333 Susan Street
25  * Costa Mesa, CA 92626
26  *******************************************************************/
27 
28 #include <linux/dma-mapping.h>
29 #include <rdma/ib_verbs.h>
30 #include <rdma/ib_user_verbs.h>
31 #include <rdma/iw_cm.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_addr.h>
34 
35 #include "ocrdma.h"
36 #include "ocrdma_hw.h"
37 #include "ocrdma_verbs.h"
38 #include "ocrdma_abi.h"
39 
40 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41 {
42 	if (index > 1)
43 		return -EINVAL;
44 
45 	*pkey = 0xffff;
46 	return 0;
47 }
48 
49 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 		     int index, union ib_gid *sgid)
51 {
52 	struct ocrdma_dev *dev;
53 
54 	dev = get_ocrdma_dev(ibdev);
55 	memset(sgid, 0, sizeof(*sgid));
56 	if (index >= OCRDMA_MAX_SGID)
57 		return -EINVAL;
58 
59 	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60 
61 	return 0;
62 }
63 
64 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65 {
66 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67 
68 	memset(attr, 0, sizeof *attr);
69 	memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 	       min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 	ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 	attr->max_mr_size = dev->attr.max_mr_size;
73 	attr->page_size_cap = 0xffff000;
74 	attr->vendor_id = dev->nic_info.pdev->vendor;
75 	attr->vendor_part_id = dev->nic_info.pdev->device;
76 	attr->hw_ver = dev->asic_id;
77 	attr->max_qp = dev->attr.max_qp;
78 	attr->max_ah = OCRDMA_MAX_AH;
79 	attr->max_qp_wr = dev->attr.max_wqe;
80 
81 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 					IB_DEVICE_RC_RNR_NAK_GEN |
83 					IB_DEVICE_SHUTDOWN_PORT |
84 					IB_DEVICE_SYS_IMAGE_GUID |
85 					IB_DEVICE_LOCAL_DMA_LKEY |
86 					IB_DEVICE_MEM_MGT_EXTENSIONS;
87 	attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
88 	attr->max_sge_rd = 0;
89 	attr->max_cq = dev->attr.max_cq;
90 	attr->max_cqe = dev->attr.max_cqe;
91 	attr->max_mr = dev->attr.max_mr;
92 	attr->max_mw = dev->attr.max_mw;
93 	attr->max_pd = dev->attr.max_pd;
94 	attr->atomic_cap = 0;
95 	attr->max_fmr = 0;
96 	attr->max_map_per_fmr = 0;
97 	attr->max_qp_rd_atom =
98 	    min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99 	attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
100 	attr->max_srq = dev->attr.max_srq;
101 	attr->max_srq_sge = dev->attr.max_srq_sge;
102 	attr->max_srq_wr = dev->attr.max_rqe;
103 	attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 	attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
105 	attr->max_pkeys = 1;
106 	return 0;
107 }
108 
109 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
110 					    u8 *ib_speed, u8 *ib_width)
111 {
112 	int status;
113 	u8 speed;
114 
115 	status = ocrdma_mbx_get_link_speed(dev, &speed);
116 	if (status)
117 		speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
118 
119 	switch (speed) {
120 	case OCRDMA_PHYS_LINK_SPEED_1GBPS:
121 		*ib_speed = IB_SPEED_SDR;
122 		*ib_width = IB_WIDTH_1X;
123 		break;
124 
125 	case OCRDMA_PHYS_LINK_SPEED_10GBPS:
126 		*ib_speed = IB_SPEED_QDR;
127 		*ib_width = IB_WIDTH_1X;
128 		break;
129 
130 	case OCRDMA_PHYS_LINK_SPEED_20GBPS:
131 		*ib_speed = IB_SPEED_DDR;
132 		*ib_width = IB_WIDTH_4X;
133 		break;
134 
135 	case OCRDMA_PHYS_LINK_SPEED_40GBPS:
136 		*ib_speed = IB_SPEED_QDR;
137 		*ib_width = IB_WIDTH_4X;
138 		break;
139 
140 	default:
141 		/* Unsupported */
142 		*ib_speed = IB_SPEED_SDR;
143 		*ib_width = IB_WIDTH_1X;
144 	}
145 }
146 
147 int ocrdma_query_port(struct ib_device *ibdev,
148 		      u8 port, struct ib_port_attr *props)
149 {
150 	enum ib_port_state port_state;
151 	struct ocrdma_dev *dev;
152 	struct net_device *netdev;
153 
154 	dev = get_ocrdma_dev(ibdev);
155 	if (port > 1) {
156 		pr_err("%s(%d) invalid_port=0x%x\n", __func__,
157 		       dev->id, port);
158 		return -EINVAL;
159 	}
160 	netdev = dev->nic_info.netdev;
161 	if (netif_running(netdev) && netif_oper_up(netdev)) {
162 		port_state = IB_PORT_ACTIVE;
163 		props->phys_state = 5;
164 	} else {
165 		port_state = IB_PORT_DOWN;
166 		props->phys_state = 3;
167 	}
168 	props->max_mtu = IB_MTU_4096;
169 	props->active_mtu = iboe_get_mtu(netdev->mtu);
170 	props->lid = 0;
171 	props->lmc = 0;
172 	props->sm_lid = 0;
173 	props->sm_sl = 0;
174 	props->state = port_state;
175 	props->port_cap_flags =
176 	    IB_PORT_CM_SUP |
177 	    IB_PORT_REINIT_SUP |
178 	    IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
179 	props->gid_tbl_len = OCRDMA_MAX_SGID;
180 	props->pkey_tbl_len = 1;
181 	props->bad_pkey_cntr = 0;
182 	props->qkey_viol_cntr = 0;
183 	get_link_speed_and_width(dev, &props->active_speed,
184 				 &props->active_width);
185 	props->max_msg_sz = 0x80000000;
186 	props->max_vl_num = 4;
187 	return 0;
188 }
189 
190 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
191 		       struct ib_port_modify *props)
192 {
193 	struct ocrdma_dev *dev;
194 
195 	dev = get_ocrdma_dev(ibdev);
196 	if (port > 1) {
197 		pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
198 		return -EINVAL;
199 	}
200 	return 0;
201 }
202 
203 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
204 			   unsigned long len)
205 {
206 	struct ocrdma_mm *mm;
207 
208 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
209 	if (mm == NULL)
210 		return -ENOMEM;
211 	mm->key.phy_addr = phy_addr;
212 	mm->key.len = len;
213 	INIT_LIST_HEAD(&mm->entry);
214 
215 	mutex_lock(&uctx->mm_list_lock);
216 	list_add_tail(&mm->entry, &uctx->mm_head);
217 	mutex_unlock(&uctx->mm_list_lock);
218 	return 0;
219 }
220 
221 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
222 			    unsigned long len)
223 {
224 	struct ocrdma_mm *mm, *tmp;
225 
226 	mutex_lock(&uctx->mm_list_lock);
227 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
228 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
229 			continue;
230 
231 		list_del(&mm->entry);
232 		kfree(mm);
233 		break;
234 	}
235 	mutex_unlock(&uctx->mm_list_lock);
236 }
237 
238 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
239 			      unsigned long len)
240 {
241 	bool found = false;
242 	struct ocrdma_mm *mm;
243 
244 	mutex_lock(&uctx->mm_list_lock);
245 	list_for_each_entry(mm, &uctx->mm_head, entry) {
246 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
247 			continue;
248 
249 		found = true;
250 		break;
251 	}
252 	mutex_unlock(&uctx->mm_list_lock);
253 	return found;
254 }
255 
256 
257 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258 {
259 	u16 pd_bitmap_idx = 0;
260 	const unsigned long *pd_bitmap;
261 
262 	if (dpp_pool) {
263 		pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 						    dev->pd_mgr->max_dpp_pd);
266 		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 		dev->pd_mgr->pd_dpp_count++;
268 		if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 			dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270 	} else {
271 		pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 						    dev->pd_mgr->max_normal_pd);
274 		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 		dev->pd_mgr->pd_norm_count++;
276 		if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 			dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278 	}
279 	return pd_bitmap_idx;
280 }
281 
282 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283 					bool dpp_pool)
284 {
285 	u16 pd_count;
286 	u16 pd_bit_index;
287 
288 	pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 			      dev->pd_mgr->pd_norm_count;
290 	if (pd_count == 0)
291 		return -EINVAL;
292 
293 	if (dpp_pool) {
294 		pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 		if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296 			return -EINVAL;
297 		} else {
298 			__clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 			dev->pd_mgr->pd_dpp_count--;
300 		}
301 	} else {
302 		pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 		if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304 			return -EINVAL;
305 		} else {
306 			__clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 			dev->pd_mgr->pd_norm_count--;
308 		}
309 	}
310 
311 	return 0;
312 }
313 
314 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 				   bool dpp_pool)
316 {
317 	int status;
318 
319 	mutex_lock(&dev->dev_lock);
320 	status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 	mutex_unlock(&dev->dev_lock);
322 	return status;
323 }
324 
325 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326 {
327 	u16 pd_idx = 0;
328 	int status = 0;
329 
330 	mutex_lock(&dev->dev_lock);
331 	if (pd->dpp_enabled) {
332 		/* try allocating DPP PD, if not available then normal PD */
333 		if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 			pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 			pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 		} else if (dev->pd_mgr->pd_norm_count <
338 			   dev->pd_mgr->max_normal_pd) {
339 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 			pd->dpp_enabled = false;
342 		} else {
343 			status = -EINVAL;
344 		}
345 	} else {
346 		if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 		} else {
350 			status = -EINVAL;
351 		}
352 	}
353 	mutex_unlock(&dev->dev_lock);
354 	return status;
355 }
356 
357 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
358 					  struct ocrdma_ucontext *uctx,
359 					  struct ib_udata *udata)
360 {
361 	struct ocrdma_pd *pd = NULL;
362 	int status = 0;
363 
364 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
365 	if (!pd)
366 		return ERR_PTR(-ENOMEM);
367 
368 	if (udata && uctx && dev->attr.max_dpp_pds) {
369 		pd->dpp_enabled =
370 			ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
371 		pd->num_dpp_qp =
372 			pd->dpp_enabled ? (dev->nic_info.db_page_size /
373 					   dev->attr.wqe_size) : 0;
374 	}
375 
376 	if (dev->pd_mgr->pd_prealloc_valid) {
377 		status = ocrdma_get_pd_num(dev, pd);
378 		return (status == 0) ? pd : ERR_PTR(status);
379 	}
380 
381 retry:
382 	status = ocrdma_mbx_alloc_pd(dev, pd);
383 	if (status) {
384 		if (pd->dpp_enabled) {
385 			pd->dpp_enabled = false;
386 			pd->num_dpp_qp = 0;
387 			goto retry;
388 		} else {
389 			kfree(pd);
390 			return ERR_PTR(status);
391 		}
392 	}
393 
394 	return pd;
395 }
396 
397 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
398 				 struct ocrdma_pd *pd)
399 {
400 	return (uctx->cntxt_pd == pd ? true : false);
401 }
402 
403 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
404 			      struct ocrdma_pd *pd)
405 {
406 	int status = 0;
407 
408 	if (dev->pd_mgr->pd_prealloc_valid)
409 		status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
410 	else
411 		status = ocrdma_mbx_dealloc_pd(dev, pd);
412 
413 	kfree(pd);
414 	return status;
415 }
416 
417 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
418 				    struct ocrdma_ucontext *uctx,
419 				    struct ib_udata *udata)
420 {
421 	int status = 0;
422 
423 	uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
424 	if (IS_ERR(uctx->cntxt_pd)) {
425 		status = PTR_ERR(uctx->cntxt_pd);
426 		uctx->cntxt_pd = NULL;
427 		goto err;
428 	}
429 
430 	uctx->cntxt_pd->uctx = uctx;
431 	uctx->cntxt_pd->ibpd.device = &dev->ibdev;
432 err:
433 	return status;
434 }
435 
436 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
437 {
438 	struct ocrdma_pd *pd = uctx->cntxt_pd;
439 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
440 
441 	if (uctx->pd_in_use) {
442 		pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
443 		       __func__, dev->id, pd->id);
444 	}
445 	uctx->cntxt_pd = NULL;
446 	(void)_ocrdma_dealloc_pd(dev, pd);
447 	return 0;
448 }
449 
450 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
451 {
452 	struct ocrdma_pd *pd = NULL;
453 
454 	mutex_lock(&uctx->mm_list_lock);
455 	if (!uctx->pd_in_use) {
456 		uctx->pd_in_use = true;
457 		pd = uctx->cntxt_pd;
458 	}
459 	mutex_unlock(&uctx->mm_list_lock);
460 
461 	return pd;
462 }
463 
464 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
465 {
466 	mutex_lock(&uctx->mm_list_lock);
467 	uctx->pd_in_use = false;
468 	mutex_unlock(&uctx->mm_list_lock);
469 }
470 
471 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
472 					  struct ib_udata *udata)
473 {
474 	int status;
475 	struct ocrdma_ucontext *ctx;
476 	struct ocrdma_alloc_ucontext_resp resp;
477 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
478 	struct pci_dev *pdev = dev->nic_info.pdev;
479 	u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
480 
481 	if (!udata)
482 		return ERR_PTR(-EFAULT);
483 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
484 	if (!ctx)
485 		return ERR_PTR(-ENOMEM);
486 	INIT_LIST_HEAD(&ctx->mm_head);
487 	mutex_init(&ctx->mm_list_lock);
488 
489 	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
490 					    &ctx->ah_tbl.pa, GFP_KERNEL);
491 	if (!ctx->ah_tbl.va) {
492 		kfree(ctx);
493 		return ERR_PTR(-ENOMEM);
494 	}
495 	memset(ctx->ah_tbl.va, 0, map_len);
496 	ctx->ah_tbl.len = map_len;
497 
498 	memset(&resp, 0, sizeof(resp));
499 	resp.ah_tbl_len = ctx->ah_tbl.len;
500 	resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
501 
502 	status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
503 	if (status)
504 		goto map_err;
505 
506 	status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
507 	if (status)
508 		goto pd_err;
509 
510 	resp.dev_id = dev->id;
511 	resp.max_inline_data = dev->attr.max_inline_data;
512 	resp.wqe_size = dev->attr.wqe_size;
513 	resp.rqe_size = dev->attr.rqe_size;
514 	resp.dpp_wqe_size = dev->attr.wqe_size;
515 
516 	memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
517 	status = ib_copy_to_udata(udata, &resp, sizeof(resp));
518 	if (status)
519 		goto cpy_err;
520 	return &ctx->ibucontext;
521 
522 cpy_err:
523 pd_err:
524 	ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
525 map_err:
526 	dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
527 			  ctx->ah_tbl.pa);
528 	kfree(ctx);
529 	return ERR_PTR(status);
530 }
531 
532 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
533 {
534 	int status = 0;
535 	struct ocrdma_mm *mm, *tmp;
536 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
537 	struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
538 	struct pci_dev *pdev = dev->nic_info.pdev;
539 
540 	status = ocrdma_dealloc_ucontext_pd(uctx);
541 
542 	ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
543 	dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
544 			  uctx->ah_tbl.pa);
545 
546 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
547 		list_del(&mm->entry);
548 		kfree(mm);
549 	}
550 	kfree(uctx);
551 	return status;
552 }
553 
554 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
555 {
556 	struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
557 	struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
558 	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
559 	u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
560 	unsigned long len = (vma->vm_end - vma->vm_start);
561 	int status = 0;
562 	bool found;
563 
564 	if (vma->vm_start & (PAGE_SIZE - 1))
565 		return -EINVAL;
566 	found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
567 	if (!found)
568 		return -EINVAL;
569 
570 	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
571 		dev->nic_info.db_total_size)) &&
572 		(len <=	dev->nic_info.db_page_size)) {
573 		if (vma->vm_flags & VM_READ)
574 			return -EPERM;
575 
576 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
577 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
578 					    len, vma->vm_page_prot);
579 	} else if (dev->nic_info.dpp_unmapped_len &&
580 		(vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
581 		(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
582 			dev->nic_info.dpp_unmapped_len)) &&
583 		(len <= dev->nic_info.dpp_unmapped_len)) {
584 		if (vma->vm_flags & VM_READ)
585 			return -EPERM;
586 
587 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
588 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
589 					    len, vma->vm_page_prot);
590 	} else {
591 		status = remap_pfn_range(vma, vma->vm_start,
592 					 vma->vm_pgoff, len, vma->vm_page_prot);
593 	}
594 	return status;
595 }
596 
597 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
598 				struct ib_ucontext *ib_ctx,
599 				struct ib_udata *udata)
600 {
601 	int status;
602 	u64 db_page_addr;
603 	u64 dpp_page_addr = 0;
604 	u32 db_page_size;
605 	struct ocrdma_alloc_pd_uresp rsp;
606 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
607 
608 	memset(&rsp, 0, sizeof(rsp));
609 	rsp.id = pd->id;
610 	rsp.dpp_enabled = pd->dpp_enabled;
611 	db_page_addr = ocrdma_get_db_addr(dev, pd->id);
612 	db_page_size = dev->nic_info.db_page_size;
613 
614 	status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
615 	if (status)
616 		return status;
617 
618 	if (pd->dpp_enabled) {
619 		dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
620 				(pd->id * PAGE_SIZE);
621 		status = ocrdma_add_mmap(uctx, dpp_page_addr,
622 				 PAGE_SIZE);
623 		if (status)
624 			goto dpp_map_err;
625 		rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
626 		rsp.dpp_page_addr_lo = dpp_page_addr;
627 	}
628 
629 	status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
630 	if (status)
631 		goto ucopy_err;
632 
633 	pd->uctx = uctx;
634 	return 0;
635 
636 ucopy_err:
637 	if (pd->dpp_enabled)
638 		ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
639 dpp_map_err:
640 	ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
641 	return status;
642 }
643 
644 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
645 			      struct ib_ucontext *context,
646 			      struct ib_udata *udata)
647 {
648 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
649 	struct ocrdma_pd *pd;
650 	struct ocrdma_ucontext *uctx = NULL;
651 	int status;
652 	u8 is_uctx_pd = false;
653 
654 	if (udata && context) {
655 		uctx = get_ocrdma_ucontext(context);
656 		pd = ocrdma_get_ucontext_pd(uctx);
657 		if (pd) {
658 			is_uctx_pd = true;
659 			goto pd_mapping;
660 		}
661 	}
662 
663 	pd = _ocrdma_alloc_pd(dev, uctx, udata);
664 	if (IS_ERR(pd)) {
665 		status = PTR_ERR(pd);
666 		goto exit;
667 	}
668 
669 pd_mapping:
670 	if (udata && context) {
671 		status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
672 		if (status)
673 			goto err;
674 	}
675 	return &pd->ibpd;
676 
677 err:
678 	if (is_uctx_pd) {
679 		ocrdma_release_ucontext_pd(uctx);
680 	} else {
681 		status = _ocrdma_dealloc_pd(dev, pd);
682 		kfree(pd);
683 	}
684 exit:
685 	return ERR_PTR(status);
686 }
687 
688 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
689 {
690 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
691 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
692 	struct ocrdma_ucontext *uctx = NULL;
693 	int status = 0;
694 	u64 usr_db;
695 
696 	uctx = pd->uctx;
697 	if (uctx) {
698 		u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
699 			(pd->id * PAGE_SIZE);
700 		if (pd->dpp_enabled)
701 			ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
702 		usr_db = ocrdma_get_db_addr(dev, pd->id);
703 		ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
704 
705 		if (is_ucontext_pd(uctx, pd)) {
706 			ocrdma_release_ucontext_pd(uctx);
707 			return status;
708 		}
709 	}
710 	status = _ocrdma_dealloc_pd(dev, pd);
711 	return status;
712 }
713 
714 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
715 			    u32 pdid, int acc, u32 num_pbls, u32 addr_check)
716 {
717 	int status;
718 
719 	mr->hwmr.fr_mr = 0;
720 	mr->hwmr.local_rd = 1;
721 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
722 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
723 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
724 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
725 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
726 	mr->hwmr.num_pbls = num_pbls;
727 
728 	status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
729 	if (status)
730 		return status;
731 
732 	mr->ibmr.lkey = mr->hwmr.lkey;
733 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
734 		mr->ibmr.rkey = mr->hwmr.lkey;
735 	return 0;
736 }
737 
738 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
739 {
740 	int status;
741 	struct ocrdma_mr *mr;
742 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
743 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
744 
745 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
746 		pr_err("%s err, invalid access rights\n", __func__);
747 		return ERR_PTR(-EINVAL);
748 	}
749 
750 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
751 	if (!mr)
752 		return ERR_PTR(-ENOMEM);
753 
754 	status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
755 				   OCRDMA_ADDR_CHECK_DISABLE);
756 	if (status) {
757 		kfree(mr);
758 		return ERR_PTR(status);
759 	}
760 
761 	return &mr->ibmr;
762 }
763 
764 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
765 				   struct ocrdma_hw_mr *mr)
766 {
767 	struct pci_dev *pdev = dev->nic_info.pdev;
768 	int i = 0;
769 
770 	if (mr->pbl_table) {
771 		for (i = 0; i < mr->num_pbls; i++) {
772 			if (!mr->pbl_table[i].va)
773 				continue;
774 			dma_free_coherent(&pdev->dev, mr->pbl_size,
775 					  mr->pbl_table[i].va,
776 					  mr->pbl_table[i].pa);
777 		}
778 		kfree(mr->pbl_table);
779 		mr->pbl_table = NULL;
780 	}
781 }
782 
783 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
784 			      u32 num_pbes)
785 {
786 	u32 num_pbls = 0;
787 	u32 idx = 0;
788 	int status = 0;
789 	u32 pbl_size;
790 
791 	do {
792 		pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
793 		if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
794 			status = -EFAULT;
795 			break;
796 		}
797 		num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
798 		num_pbls = num_pbls / (pbl_size / sizeof(u64));
799 		idx++;
800 	} while (num_pbls >= dev->attr.max_num_mr_pbl);
801 
802 	mr->hwmr.num_pbes = num_pbes;
803 	mr->hwmr.num_pbls = num_pbls;
804 	mr->hwmr.pbl_size = pbl_size;
805 	return status;
806 }
807 
808 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
809 {
810 	int status = 0;
811 	int i;
812 	u32 dma_len = mr->pbl_size;
813 	struct pci_dev *pdev = dev->nic_info.pdev;
814 	void *va;
815 	dma_addr_t pa;
816 
817 	mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
818 				mr->num_pbls, GFP_KERNEL);
819 
820 	if (!mr->pbl_table)
821 		return -ENOMEM;
822 
823 	for (i = 0; i < mr->num_pbls; i++) {
824 		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
825 		if (!va) {
826 			ocrdma_free_mr_pbl_tbl(dev, mr);
827 			status = -ENOMEM;
828 			break;
829 		}
830 		memset(va, 0, dma_len);
831 		mr->pbl_table[i].va = va;
832 		mr->pbl_table[i].pa = pa;
833 	}
834 	return status;
835 }
836 
837 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
838 			    u32 num_pbes)
839 {
840 	struct ocrdma_pbe *pbe;
841 	struct scatterlist *sg;
842 	struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
843 	struct ib_umem *umem = mr->umem;
844 	int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
845 
846 	if (!mr->hwmr.num_pbes)
847 		return;
848 
849 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
850 	pbe_cnt = 0;
851 
852 	shift = ilog2(umem->page_size);
853 
854 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
855 		pages = sg_dma_len(sg) >> shift;
856 		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
857 			/* store the page address in pbe */
858 			pbe->pa_lo =
859 			    cpu_to_le32(sg_dma_address
860 					(sg) +
861 					(umem->page_size * pg_cnt));
862 			pbe->pa_hi =
863 			    cpu_to_le32(upper_32_bits
864 					((sg_dma_address
865 					  (sg) +
866 					  umem->page_size * pg_cnt)));
867 			pbe_cnt += 1;
868 			total_num_pbes += 1;
869 			pbe++;
870 
871 			/* if done building pbes, issue the mbx cmd. */
872 			if (total_num_pbes == num_pbes)
873 				return;
874 
875 			/* if the given pbl is full storing the pbes,
876 			 * move to next pbl.
877 			 */
878 			if (pbe_cnt ==
879 				(mr->hwmr.pbl_size / sizeof(u64))) {
880 				pbl_tbl++;
881 				pbe = (struct ocrdma_pbe *)pbl_tbl->va;
882 				pbe_cnt = 0;
883 			}
884 
885 		}
886 	}
887 }
888 
889 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
890 				 u64 usr_addr, int acc, struct ib_udata *udata)
891 {
892 	int status = -ENOMEM;
893 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
894 	struct ocrdma_mr *mr;
895 	struct ocrdma_pd *pd;
896 	u32 num_pbes;
897 
898 	pd = get_ocrdma_pd(ibpd);
899 
900 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
901 		return ERR_PTR(-EINVAL);
902 
903 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
904 	if (!mr)
905 		return ERR_PTR(status);
906 	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
907 	if (IS_ERR(mr->umem)) {
908 		status = -EFAULT;
909 		goto umem_err;
910 	}
911 	num_pbes = ib_umem_page_count(mr->umem);
912 	status = ocrdma_get_pbl_info(dev, mr, num_pbes);
913 	if (status)
914 		goto umem_err;
915 
916 	mr->hwmr.pbe_size = mr->umem->page_size;
917 	mr->hwmr.fbo = ib_umem_offset(mr->umem);
918 	mr->hwmr.va = usr_addr;
919 	mr->hwmr.len = len;
920 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
921 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
922 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
923 	mr->hwmr.local_rd = 1;
924 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
925 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
926 	if (status)
927 		goto umem_err;
928 	build_user_pbes(dev, mr, num_pbes);
929 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
930 	if (status)
931 		goto mbx_err;
932 	mr->ibmr.lkey = mr->hwmr.lkey;
933 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
934 		mr->ibmr.rkey = mr->hwmr.lkey;
935 
936 	return &mr->ibmr;
937 
938 mbx_err:
939 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
940 umem_err:
941 	kfree(mr);
942 	return ERR_PTR(status);
943 }
944 
945 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
946 {
947 	struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
948 	struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
949 
950 	(void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
951 
952 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
953 
954 	/* it could be user registered memory. */
955 	if (mr->umem)
956 		ib_umem_release(mr->umem);
957 	kfree(mr);
958 
959 	/* Don't stop cleanup, in case FW is unresponsive */
960 	if (dev->mqe_ctx.fw_error_state) {
961 		pr_err("%s(%d) fw not responding.\n",
962 		       __func__, dev->id);
963 	}
964 	return 0;
965 }
966 
967 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
968 				struct ib_udata *udata,
969 				struct ib_ucontext *ib_ctx)
970 {
971 	int status;
972 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
973 	struct ocrdma_create_cq_uresp uresp;
974 
975 	memset(&uresp, 0, sizeof(uresp));
976 	uresp.cq_id = cq->id;
977 	uresp.page_size = PAGE_ALIGN(cq->len);
978 	uresp.num_pages = 1;
979 	uresp.max_hw_cqe = cq->max_hw_cqe;
980 	uresp.page_addr[0] = virt_to_phys(cq->va);
981 	uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
982 	uresp.db_page_size = dev->nic_info.db_page_size;
983 	uresp.phase_change = cq->phase_change ? 1 : 0;
984 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
985 	if (status) {
986 		pr_err("%s(%d) copy error cqid=0x%x.\n",
987 		       __func__, dev->id, cq->id);
988 		goto err;
989 	}
990 	status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
991 	if (status)
992 		goto err;
993 	status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
994 	if (status) {
995 		ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
996 		goto err;
997 	}
998 	cq->ucontext = uctx;
999 err:
1000 	return status;
1001 }
1002 
1003 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
1004 			       struct ib_ucontext *ib_ctx,
1005 			       struct ib_udata *udata)
1006 {
1007 	struct ocrdma_cq *cq;
1008 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1009 	struct ocrdma_ucontext *uctx = NULL;
1010 	u16 pd_id = 0;
1011 	int status;
1012 	struct ocrdma_create_cq_ureq ureq;
1013 
1014 	if (udata) {
1015 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1016 			return ERR_PTR(-EFAULT);
1017 	} else
1018 		ureq.dpp_cq = 0;
1019 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1020 	if (!cq)
1021 		return ERR_PTR(-ENOMEM);
1022 
1023 	spin_lock_init(&cq->cq_lock);
1024 	spin_lock_init(&cq->comp_handler_lock);
1025 	INIT_LIST_HEAD(&cq->sq_head);
1026 	INIT_LIST_HEAD(&cq->rq_head);
1027 	cq->first_arm = true;
1028 
1029 	if (ib_ctx) {
1030 		uctx = get_ocrdma_ucontext(ib_ctx);
1031 		pd_id = uctx->cntxt_pd->id;
1032 	}
1033 
1034 	status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1035 	if (status) {
1036 		kfree(cq);
1037 		return ERR_PTR(status);
1038 	}
1039 	if (ib_ctx) {
1040 		status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1041 		if (status)
1042 			goto ctx_err;
1043 	}
1044 	cq->phase = OCRDMA_CQE_VALID;
1045 	dev->cq_tbl[cq->id] = cq;
1046 	return &cq->ibcq;
1047 
1048 ctx_err:
1049 	ocrdma_mbx_destroy_cq(dev, cq);
1050 	kfree(cq);
1051 	return ERR_PTR(status);
1052 }
1053 
1054 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1055 		     struct ib_udata *udata)
1056 {
1057 	int status = 0;
1058 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1059 
1060 	if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1061 		status = -EINVAL;
1062 		return status;
1063 	}
1064 	ibcq->cqe = new_cnt;
1065 	return status;
1066 }
1067 
1068 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1069 {
1070 	int cqe_cnt;
1071 	int valid_count = 0;
1072 	unsigned long flags;
1073 
1074 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1075 	struct ocrdma_cqe *cqe = NULL;
1076 
1077 	cqe = cq->va;
1078 	cqe_cnt = cq->cqe_cnt;
1079 
1080 	/* Last irq might have scheduled a polling thread
1081 	 * sync-up with it before hard flushing.
1082 	 */
1083 	spin_lock_irqsave(&cq->cq_lock, flags);
1084 	while (cqe_cnt) {
1085 		if (is_cqe_valid(cq, cqe))
1086 			valid_count++;
1087 		cqe++;
1088 		cqe_cnt--;
1089 	}
1090 	ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1091 	spin_unlock_irqrestore(&cq->cq_lock, flags);
1092 }
1093 
1094 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1095 {
1096 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1097 	struct ocrdma_eq *eq = NULL;
1098 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1099 	int pdid = 0;
1100 	u32 irq, indx;
1101 
1102 	dev->cq_tbl[cq->id] = NULL;
1103 	indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1104 	if (indx == -EINVAL)
1105 		BUG();
1106 
1107 	eq = &dev->eq_tbl[indx];
1108 	irq = ocrdma_get_irq(dev, eq);
1109 	synchronize_irq(irq);
1110 	ocrdma_flush_cq(cq);
1111 
1112 	(void)ocrdma_mbx_destroy_cq(dev, cq);
1113 	if (cq->ucontext) {
1114 		pdid = cq->ucontext->cntxt_pd->id;
1115 		ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1116 				PAGE_ALIGN(cq->len));
1117 		ocrdma_del_mmap(cq->ucontext,
1118 				ocrdma_get_db_addr(dev, pdid),
1119 				dev->nic_info.db_page_size);
1120 	}
1121 
1122 	kfree(cq);
1123 	return 0;
1124 }
1125 
1126 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1127 {
1128 	int status = -EINVAL;
1129 
1130 	if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1131 		dev->qp_tbl[qp->id] = qp;
1132 		status = 0;
1133 	}
1134 	return status;
1135 }
1136 
1137 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1138 {
1139 	dev->qp_tbl[qp->id] = NULL;
1140 }
1141 
1142 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1143 				  struct ib_qp_init_attr *attrs)
1144 {
1145 	if ((attrs->qp_type != IB_QPT_GSI) &&
1146 	    (attrs->qp_type != IB_QPT_RC) &&
1147 	    (attrs->qp_type != IB_QPT_UC) &&
1148 	    (attrs->qp_type != IB_QPT_UD)) {
1149 		pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1150 		       __func__, dev->id, attrs->qp_type);
1151 		return -EINVAL;
1152 	}
1153 	/* Skip the check for QP1 to support CM size of 128 */
1154 	if ((attrs->qp_type != IB_QPT_GSI) &&
1155 	    (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1156 		pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1157 		       __func__, dev->id, attrs->cap.max_send_wr);
1158 		pr_err("%s(%d) supported send_wr=0x%x\n",
1159 		       __func__, dev->id, dev->attr.max_wqe);
1160 		return -EINVAL;
1161 	}
1162 	if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1163 		pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1164 		       __func__, dev->id, attrs->cap.max_recv_wr);
1165 		pr_err("%s(%d) supported recv_wr=0x%x\n",
1166 		       __func__, dev->id, dev->attr.max_rqe);
1167 		return -EINVAL;
1168 	}
1169 	if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1170 		pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1171 		       __func__, dev->id, attrs->cap.max_inline_data);
1172 		pr_err("%s(%d) supported inline data size=0x%x\n",
1173 		       __func__, dev->id, dev->attr.max_inline_data);
1174 		return -EINVAL;
1175 	}
1176 	if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1177 		pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1178 		       __func__, dev->id, attrs->cap.max_send_sge);
1179 		pr_err("%s(%d) supported send_sge=0x%x\n",
1180 		       __func__, dev->id, dev->attr.max_send_sge);
1181 		return -EINVAL;
1182 	}
1183 	if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1184 		pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1185 		       __func__, dev->id, attrs->cap.max_recv_sge);
1186 		pr_err("%s(%d) supported recv_sge=0x%x\n",
1187 		       __func__, dev->id, dev->attr.max_recv_sge);
1188 		return -EINVAL;
1189 	}
1190 	/* unprivileged user space cannot create special QP */
1191 	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1192 		pr_err
1193 		    ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1194 		     __func__, dev->id, attrs->qp_type);
1195 		return -EINVAL;
1196 	}
1197 	/* allow creating only one GSI type of QP */
1198 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1199 		pr_err("%s(%d) GSI special QPs already created.\n",
1200 		       __func__, dev->id);
1201 		return -EINVAL;
1202 	}
1203 	/* verify consumer QPs are not trying to use GSI QP's CQ */
1204 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1205 		if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1206 			(dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1207 			pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1208 				__func__, dev->id);
1209 			return -EINVAL;
1210 		}
1211 	}
1212 	return 0;
1213 }
1214 
1215 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1216 				struct ib_udata *udata, int dpp_offset,
1217 				int dpp_credit_lmt, int srq)
1218 {
1219 	int status = 0;
1220 	u64 usr_db;
1221 	struct ocrdma_create_qp_uresp uresp;
1222 	struct ocrdma_pd *pd = qp->pd;
1223 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1224 
1225 	memset(&uresp, 0, sizeof(uresp));
1226 	usr_db = dev->nic_info.unmapped_db +
1227 			(pd->id * dev->nic_info.db_page_size);
1228 	uresp.qp_id = qp->id;
1229 	uresp.sq_dbid = qp->sq.dbid;
1230 	uresp.num_sq_pages = 1;
1231 	uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1232 	uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1233 	uresp.num_wqe_allocated = qp->sq.max_cnt;
1234 	if (!srq) {
1235 		uresp.rq_dbid = qp->rq.dbid;
1236 		uresp.num_rq_pages = 1;
1237 		uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1238 		uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1239 		uresp.num_rqe_allocated = qp->rq.max_cnt;
1240 	}
1241 	uresp.db_page_addr = usr_db;
1242 	uresp.db_page_size = dev->nic_info.db_page_size;
1243 	uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1244 	uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1245 	uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1246 
1247 	if (qp->dpp_enabled) {
1248 		uresp.dpp_credit = dpp_credit_lmt;
1249 		uresp.dpp_offset = dpp_offset;
1250 	}
1251 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1252 	if (status) {
1253 		pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1254 		goto err;
1255 	}
1256 	status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1257 				 uresp.sq_page_size);
1258 	if (status)
1259 		goto err;
1260 
1261 	if (!srq) {
1262 		status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1263 					 uresp.rq_page_size);
1264 		if (status)
1265 			goto rq_map_err;
1266 	}
1267 	return status;
1268 rq_map_err:
1269 	ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1270 err:
1271 	return status;
1272 }
1273 
1274 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1275 			     struct ocrdma_pd *pd)
1276 {
1277 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1278 		qp->sq_db = dev->nic_info.db +
1279 			(pd->id * dev->nic_info.db_page_size) +
1280 			OCRDMA_DB_GEN2_SQ_OFFSET;
1281 		qp->rq_db = dev->nic_info.db +
1282 			(pd->id * dev->nic_info.db_page_size) +
1283 			OCRDMA_DB_GEN2_RQ_OFFSET;
1284 	} else {
1285 		qp->sq_db = dev->nic_info.db +
1286 			(pd->id * dev->nic_info.db_page_size) +
1287 			OCRDMA_DB_SQ_OFFSET;
1288 		qp->rq_db = dev->nic_info.db +
1289 			(pd->id * dev->nic_info.db_page_size) +
1290 			OCRDMA_DB_RQ_OFFSET;
1291 	}
1292 }
1293 
1294 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1295 {
1296 	qp->wqe_wr_id_tbl =
1297 	    kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1298 		    GFP_KERNEL);
1299 	if (qp->wqe_wr_id_tbl == NULL)
1300 		return -ENOMEM;
1301 	qp->rqe_wr_id_tbl =
1302 	    kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1303 	if (qp->rqe_wr_id_tbl == NULL)
1304 		return -ENOMEM;
1305 
1306 	return 0;
1307 }
1308 
1309 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1310 				      struct ocrdma_pd *pd,
1311 				      struct ib_qp_init_attr *attrs)
1312 {
1313 	qp->pd = pd;
1314 	spin_lock_init(&qp->q_lock);
1315 	INIT_LIST_HEAD(&qp->sq_entry);
1316 	INIT_LIST_HEAD(&qp->rq_entry);
1317 
1318 	qp->qp_type = attrs->qp_type;
1319 	qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1320 	qp->max_inline_data = attrs->cap.max_inline_data;
1321 	qp->sq.max_sges = attrs->cap.max_send_sge;
1322 	qp->rq.max_sges = attrs->cap.max_recv_sge;
1323 	qp->state = OCRDMA_QPS_RST;
1324 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1325 }
1326 
1327 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1328 				   struct ib_qp_init_attr *attrs)
1329 {
1330 	if (attrs->qp_type == IB_QPT_GSI) {
1331 		dev->gsi_qp_created = 1;
1332 		dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1333 		dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1334 	}
1335 }
1336 
1337 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1338 			       struct ib_qp_init_attr *attrs,
1339 			       struct ib_udata *udata)
1340 {
1341 	int status;
1342 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1343 	struct ocrdma_qp *qp;
1344 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1345 	struct ocrdma_create_qp_ureq ureq;
1346 	u16 dpp_credit_lmt, dpp_offset;
1347 
1348 	status = ocrdma_check_qp_params(ibpd, dev, attrs);
1349 	if (status)
1350 		goto gen_err;
1351 
1352 	memset(&ureq, 0, sizeof(ureq));
1353 	if (udata) {
1354 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1355 			return ERR_PTR(-EFAULT);
1356 	}
1357 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1358 	if (!qp) {
1359 		status = -ENOMEM;
1360 		goto gen_err;
1361 	}
1362 	ocrdma_set_qp_init_params(qp, pd, attrs);
1363 	if (udata == NULL)
1364 		qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1365 					OCRDMA_QP_FAST_REG);
1366 
1367 	mutex_lock(&dev->dev_lock);
1368 	status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1369 					ureq.dpp_cq_id,
1370 					&dpp_offset, &dpp_credit_lmt);
1371 	if (status)
1372 		goto mbx_err;
1373 
1374 	/* user space QP's wr_id table are managed in library */
1375 	if (udata == NULL) {
1376 		status = ocrdma_alloc_wr_id_tbl(qp);
1377 		if (status)
1378 			goto map_err;
1379 	}
1380 
1381 	status = ocrdma_add_qpn_map(dev, qp);
1382 	if (status)
1383 		goto map_err;
1384 	ocrdma_set_qp_db(dev, qp, pd);
1385 	if (udata) {
1386 		status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1387 					      dpp_credit_lmt,
1388 					      (attrs->srq != NULL));
1389 		if (status)
1390 			goto cpy_err;
1391 	}
1392 	ocrdma_store_gsi_qp_cq(dev, attrs);
1393 	qp->ibqp.qp_num = qp->id;
1394 	mutex_unlock(&dev->dev_lock);
1395 	return &qp->ibqp;
1396 
1397 cpy_err:
1398 	ocrdma_del_qpn_map(dev, qp);
1399 map_err:
1400 	ocrdma_mbx_destroy_qp(dev, qp);
1401 mbx_err:
1402 	mutex_unlock(&dev->dev_lock);
1403 	kfree(qp->wqe_wr_id_tbl);
1404 	kfree(qp->rqe_wr_id_tbl);
1405 	kfree(qp);
1406 	pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1407 gen_err:
1408 	return ERR_PTR(status);
1409 }
1410 
1411 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1412 		      int attr_mask)
1413 {
1414 	int status = 0;
1415 	struct ocrdma_qp *qp;
1416 	struct ocrdma_dev *dev;
1417 	enum ib_qp_state old_qps;
1418 
1419 	qp = get_ocrdma_qp(ibqp);
1420 	dev = get_ocrdma_dev(ibqp->device);
1421 	if (attr_mask & IB_QP_STATE)
1422 		status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1423 	/* if new and previous states are same hw doesn't need to
1424 	 * know about it.
1425 	 */
1426 	if (status < 0)
1427 		return status;
1428 	status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1429 
1430 	return status;
1431 }
1432 
1433 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1434 		     int attr_mask, struct ib_udata *udata)
1435 {
1436 	unsigned long flags;
1437 	int status = -EINVAL;
1438 	struct ocrdma_qp *qp;
1439 	struct ocrdma_dev *dev;
1440 	enum ib_qp_state old_qps, new_qps;
1441 
1442 	qp = get_ocrdma_qp(ibqp);
1443 	dev = get_ocrdma_dev(ibqp->device);
1444 
1445 	/* syncronize with multiple context trying to change, retrive qps */
1446 	mutex_lock(&dev->dev_lock);
1447 	/* syncronize with wqe, rqe posting and cqe processing contexts */
1448 	spin_lock_irqsave(&qp->q_lock, flags);
1449 	old_qps = get_ibqp_state(qp->state);
1450 	if (attr_mask & IB_QP_STATE)
1451 		new_qps = attr->qp_state;
1452 	else
1453 		new_qps = old_qps;
1454 	spin_unlock_irqrestore(&qp->q_lock, flags);
1455 
1456 	if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1457 				IB_LINK_LAYER_ETHERNET)) {
1458 		pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1459 		       "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1460 		       __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1461 		       old_qps, new_qps);
1462 		goto param_err;
1463 	}
1464 
1465 	status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1466 	if (status > 0)
1467 		status = 0;
1468 param_err:
1469 	mutex_unlock(&dev->dev_lock);
1470 	return status;
1471 }
1472 
1473 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1474 {
1475 	switch (mtu) {
1476 	case 256:
1477 		return IB_MTU_256;
1478 	case 512:
1479 		return IB_MTU_512;
1480 	case 1024:
1481 		return IB_MTU_1024;
1482 	case 2048:
1483 		return IB_MTU_2048;
1484 	case 4096:
1485 		return IB_MTU_4096;
1486 	default:
1487 		return IB_MTU_1024;
1488 	}
1489 }
1490 
1491 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1492 {
1493 	int ib_qp_acc_flags = 0;
1494 
1495 	if (qp_cap_flags & OCRDMA_QP_INB_WR)
1496 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1497 	if (qp_cap_flags & OCRDMA_QP_INB_RD)
1498 		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1499 	return ib_qp_acc_flags;
1500 }
1501 
1502 int ocrdma_query_qp(struct ib_qp *ibqp,
1503 		    struct ib_qp_attr *qp_attr,
1504 		    int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1505 {
1506 	int status;
1507 	u32 qp_state;
1508 	struct ocrdma_qp_params params;
1509 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1510 	struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1511 
1512 	memset(&params, 0, sizeof(params));
1513 	mutex_lock(&dev->dev_lock);
1514 	status = ocrdma_mbx_query_qp(dev, qp, &params);
1515 	mutex_unlock(&dev->dev_lock);
1516 	if (status)
1517 		goto mbx_err;
1518 	if (qp->qp_type == IB_QPT_UD)
1519 		qp_attr->qkey = params.qkey;
1520 	qp_attr->path_mtu =
1521 		ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1522 				OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1523 				OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1524 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
1525 	qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1526 	qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1527 	qp_attr->dest_qp_num =
1528 	    params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1529 
1530 	qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1531 	qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1532 	qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1533 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
1534 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1535 	qp_attr->cap.max_inline_data = qp->max_inline_data;
1536 	qp_init_attr->cap = qp_attr->cap;
1537 	memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1538 	       sizeof(params.dgid));
1539 	qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1540 	    OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1541 	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1542 	qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1543 					  OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1544 						OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1545 	qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1546 					      OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1547 						OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1548 
1549 	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1550 	qp_attr->ah_attr.port_num = 1;
1551 	qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1552 			       OCRDMA_QP_PARAMS_SL_MASK) >>
1553 				OCRDMA_QP_PARAMS_SL_SHIFT;
1554 	qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1555 			    OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1556 				OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1557 	qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1558 			      OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1559 				OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1560 	qp_attr->retry_cnt =
1561 	    (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1562 		OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1563 	qp_attr->min_rnr_timer = 0;
1564 	qp_attr->pkey_index = 0;
1565 	qp_attr->port_num = 1;
1566 	qp_attr->ah_attr.src_path_bits = 0;
1567 	qp_attr->ah_attr.static_rate = 0;
1568 	qp_attr->alt_pkey_index = 0;
1569 	qp_attr->alt_port_num = 0;
1570 	qp_attr->alt_timeout = 0;
1571 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1572 	qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1573 		    OCRDMA_QP_PARAMS_STATE_SHIFT;
1574 	qp_attr->qp_state = get_ibqp_state(qp_state);
1575 	qp_attr->cur_qp_state = qp_attr->qp_state;
1576 	qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1577 	qp_attr->max_dest_rd_atomic =
1578 	    params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1579 	qp_attr->max_rd_atomic =
1580 	    params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1581 	qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1582 				OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1583 	/* Sync driver QP state with FW */
1584 	ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1585 mbx_err:
1586 	return status;
1587 }
1588 
1589 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1590 {
1591 	unsigned int i = idx / 32;
1592 	u32 mask = (1U << (idx % 32));
1593 
1594 	srq->idx_bit_fields[i] ^= mask;
1595 }
1596 
1597 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1598 {
1599 	return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1600 }
1601 
1602 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1603 {
1604 	return (qp->sq.tail == qp->sq.head);
1605 }
1606 
1607 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1608 {
1609 	return (qp->rq.tail == qp->rq.head);
1610 }
1611 
1612 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1613 {
1614 	return q->va + (q->head * q->entry_size);
1615 }
1616 
1617 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1618 				      u32 idx)
1619 {
1620 	return q->va + (idx * q->entry_size);
1621 }
1622 
1623 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1624 {
1625 	q->head = (q->head + 1) & q->max_wqe_idx;
1626 }
1627 
1628 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1629 {
1630 	q->tail = (q->tail + 1) & q->max_wqe_idx;
1631 }
1632 
1633 /* discard the cqe for a given QP */
1634 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1635 {
1636 	unsigned long cq_flags;
1637 	unsigned long flags;
1638 	int discard_cnt = 0;
1639 	u32 cur_getp, stop_getp;
1640 	struct ocrdma_cqe *cqe;
1641 	u32 qpn = 0, wqe_idx = 0;
1642 
1643 	spin_lock_irqsave(&cq->cq_lock, cq_flags);
1644 
1645 	/* traverse through the CQEs in the hw CQ,
1646 	 * find the matching CQE for a given qp,
1647 	 * mark the matching one discarded by clearing qpn.
1648 	 * ring the doorbell in the poll_cq() as
1649 	 * we don't complete out of order cqe.
1650 	 */
1651 
1652 	cur_getp = cq->getp;
1653 	/* find upto when do we reap the cq. */
1654 	stop_getp = cur_getp;
1655 	do {
1656 		if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1657 			break;
1658 
1659 		cqe = cq->va + cur_getp;
1660 		/* if (a) done reaping whole hw cq, or
1661 		 *    (b) qp_xq becomes empty.
1662 		 * then exit
1663 		 */
1664 		qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1665 		/* if previously discarded cqe found, skip that too. */
1666 		/* check for matching qp */
1667 		if (qpn == 0 || qpn != qp->id)
1668 			goto skip_cqe;
1669 
1670 		if (is_cqe_for_sq(cqe)) {
1671 			ocrdma_hwq_inc_tail(&qp->sq);
1672 		} else {
1673 			if (qp->srq) {
1674 				wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1675 					OCRDMA_CQE_BUFTAG_SHIFT) &
1676 					qp->srq->rq.max_wqe_idx;
1677 				if (wqe_idx < 1)
1678 					BUG();
1679 				spin_lock_irqsave(&qp->srq->q_lock, flags);
1680 				ocrdma_hwq_inc_tail(&qp->srq->rq);
1681 				ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1682 				spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1683 
1684 			} else {
1685 				ocrdma_hwq_inc_tail(&qp->rq);
1686 			}
1687 		}
1688 		/* mark cqe discarded so that it is not picked up later
1689 		 * in the poll_cq().
1690 		 */
1691 		discard_cnt += 1;
1692 		cqe->cmn.qpn = 0;
1693 skip_cqe:
1694 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1695 	} while (cur_getp != stop_getp);
1696 	spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1697 }
1698 
1699 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1700 {
1701 	int found = false;
1702 	unsigned long flags;
1703 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1704 	/* sync with any active CQ poll */
1705 
1706 	spin_lock_irqsave(&dev->flush_q_lock, flags);
1707 	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1708 	if (found)
1709 		list_del(&qp->sq_entry);
1710 	if (!qp->srq) {
1711 		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1712 		if (found)
1713 			list_del(&qp->rq_entry);
1714 	}
1715 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1716 }
1717 
1718 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1719 {
1720 	struct ocrdma_pd *pd;
1721 	struct ocrdma_qp *qp;
1722 	struct ocrdma_dev *dev;
1723 	struct ib_qp_attr attrs;
1724 	int attr_mask;
1725 	unsigned long flags;
1726 
1727 	qp = get_ocrdma_qp(ibqp);
1728 	dev = get_ocrdma_dev(ibqp->device);
1729 
1730 	pd = qp->pd;
1731 
1732 	/* change the QP state to ERROR */
1733 	if (qp->state != OCRDMA_QPS_RST) {
1734 		attrs.qp_state = IB_QPS_ERR;
1735 		attr_mask = IB_QP_STATE;
1736 		_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1737 	}
1738 	/* ensure that CQEs for newly created QP (whose id may be same with
1739 	 * one which just getting destroyed are same), dont get
1740 	 * discarded until the old CQEs are discarded.
1741 	 */
1742 	mutex_lock(&dev->dev_lock);
1743 	(void) ocrdma_mbx_destroy_qp(dev, qp);
1744 
1745 	/*
1746 	 * acquire CQ lock while destroy is in progress, in order to
1747 	 * protect against proessing in-flight CQEs for this QP.
1748 	 */
1749 	spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1750 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1751 		spin_lock(&qp->rq_cq->cq_lock);
1752 
1753 	ocrdma_del_qpn_map(dev, qp);
1754 
1755 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1756 		spin_unlock(&qp->rq_cq->cq_lock);
1757 	spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1758 
1759 	if (!pd->uctx) {
1760 		ocrdma_discard_cqes(qp, qp->sq_cq);
1761 		ocrdma_discard_cqes(qp, qp->rq_cq);
1762 	}
1763 	mutex_unlock(&dev->dev_lock);
1764 
1765 	if (pd->uctx) {
1766 		ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1767 				PAGE_ALIGN(qp->sq.len));
1768 		if (!qp->srq)
1769 			ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1770 					PAGE_ALIGN(qp->rq.len));
1771 	}
1772 
1773 	ocrdma_del_flush_qp(qp);
1774 
1775 	kfree(qp->wqe_wr_id_tbl);
1776 	kfree(qp->rqe_wr_id_tbl);
1777 	kfree(qp);
1778 	return 0;
1779 }
1780 
1781 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1782 				struct ib_udata *udata)
1783 {
1784 	int status;
1785 	struct ocrdma_create_srq_uresp uresp;
1786 
1787 	memset(&uresp, 0, sizeof(uresp));
1788 	uresp.rq_dbid = srq->rq.dbid;
1789 	uresp.num_rq_pages = 1;
1790 	uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1791 	uresp.rq_page_size = srq->rq.len;
1792 	uresp.db_page_addr = dev->nic_info.unmapped_db +
1793 	    (srq->pd->id * dev->nic_info.db_page_size);
1794 	uresp.db_page_size = dev->nic_info.db_page_size;
1795 	uresp.num_rqe_allocated = srq->rq.max_cnt;
1796 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1797 		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1798 		uresp.db_shift = 24;
1799 	} else {
1800 		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1801 		uresp.db_shift = 16;
1802 	}
1803 
1804 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1805 	if (status)
1806 		return status;
1807 	status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1808 				 uresp.rq_page_size);
1809 	if (status)
1810 		return status;
1811 	return status;
1812 }
1813 
1814 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1815 				 struct ib_srq_init_attr *init_attr,
1816 				 struct ib_udata *udata)
1817 {
1818 	int status = -ENOMEM;
1819 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1820 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1821 	struct ocrdma_srq *srq;
1822 
1823 	if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1824 		return ERR_PTR(-EINVAL);
1825 	if (init_attr->attr.max_wr > dev->attr.max_rqe)
1826 		return ERR_PTR(-EINVAL);
1827 
1828 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1829 	if (!srq)
1830 		return ERR_PTR(status);
1831 
1832 	spin_lock_init(&srq->q_lock);
1833 	srq->pd = pd;
1834 	srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1835 	status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1836 	if (status)
1837 		goto err;
1838 
1839 	if (udata == NULL) {
1840 		srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1841 			    GFP_KERNEL);
1842 		if (srq->rqe_wr_id_tbl == NULL)
1843 			goto arm_err;
1844 
1845 		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1846 		    (srq->rq.max_cnt % 32 ? 1 : 0);
1847 		srq->idx_bit_fields =
1848 		    kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1849 		if (srq->idx_bit_fields == NULL)
1850 			goto arm_err;
1851 		memset(srq->idx_bit_fields, 0xff,
1852 		       srq->bit_fields_len * sizeof(u32));
1853 	}
1854 
1855 	if (init_attr->attr.srq_limit) {
1856 		status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1857 		if (status)
1858 			goto arm_err;
1859 	}
1860 
1861 	if (udata) {
1862 		status = ocrdma_copy_srq_uresp(dev, srq, udata);
1863 		if (status)
1864 			goto arm_err;
1865 	}
1866 
1867 	return &srq->ibsrq;
1868 
1869 arm_err:
1870 	ocrdma_mbx_destroy_srq(dev, srq);
1871 err:
1872 	kfree(srq->rqe_wr_id_tbl);
1873 	kfree(srq->idx_bit_fields);
1874 	kfree(srq);
1875 	return ERR_PTR(status);
1876 }
1877 
1878 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1879 		      struct ib_srq_attr *srq_attr,
1880 		      enum ib_srq_attr_mask srq_attr_mask,
1881 		      struct ib_udata *udata)
1882 {
1883 	int status = 0;
1884 	struct ocrdma_srq *srq;
1885 
1886 	srq = get_ocrdma_srq(ibsrq);
1887 	if (srq_attr_mask & IB_SRQ_MAX_WR)
1888 		status = -EINVAL;
1889 	else
1890 		status = ocrdma_mbx_modify_srq(srq, srq_attr);
1891 	return status;
1892 }
1893 
1894 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1895 {
1896 	int status;
1897 	struct ocrdma_srq *srq;
1898 
1899 	srq = get_ocrdma_srq(ibsrq);
1900 	status = ocrdma_mbx_query_srq(srq, srq_attr);
1901 	return status;
1902 }
1903 
1904 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1905 {
1906 	int status;
1907 	struct ocrdma_srq *srq;
1908 	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1909 
1910 	srq = get_ocrdma_srq(ibsrq);
1911 
1912 	status = ocrdma_mbx_destroy_srq(dev, srq);
1913 
1914 	if (srq->pd->uctx)
1915 		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1916 				PAGE_ALIGN(srq->rq.len));
1917 
1918 	kfree(srq->idx_bit_fields);
1919 	kfree(srq->rqe_wr_id_tbl);
1920 	kfree(srq);
1921 	return status;
1922 }
1923 
1924 /* unprivileged verbs and their support functions. */
1925 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1926 				struct ocrdma_hdr_wqe *hdr,
1927 				struct ib_send_wr *wr)
1928 {
1929 	struct ocrdma_ewqe_ud_hdr *ud_hdr =
1930 		(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1931 	struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1932 
1933 	ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1934 	if (qp->qp_type == IB_QPT_GSI)
1935 		ud_hdr->qkey = qp->qkey;
1936 	else
1937 		ud_hdr->qkey = wr->wr.ud.remote_qkey;
1938 	ud_hdr->rsvd_ahid = ah->id;
1939 	if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1940 		hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1941 }
1942 
1943 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1944 			      struct ocrdma_sge *sge, int num_sge,
1945 			      struct ib_sge *sg_list)
1946 {
1947 	int i;
1948 
1949 	for (i = 0; i < num_sge; i++) {
1950 		sge[i].lrkey = sg_list[i].lkey;
1951 		sge[i].addr_lo = sg_list[i].addr;
1952 		sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1953 		sge[i].len = sg_list[i].length;
1954 		hdr->total_len += sg_list[i].length;
1955 	}
1956 	if (num_sge == 0)
1957 		memset(sge, 0, sizeof(*sge));
1958 }
1959 
1960 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1961 {
1962 	uint32_t total_len = 0, i;
1963 
1964 	for (i = 0; i < num_sge; i++)
1965 		total_len += sg_list[i].length;
1966 	return total_len;
1967 }
1968 
1969 
1970 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1971 				    struct ocrdma_hdr_wqe *hdr,
1972 				    struct ocrdma_sge *sge,
1973 				    struct ib_send_wr *wr, u32 wqe_size)
1974 {
1975 	int i;
1976 	char *dpp_addr;
1977 
1978 	if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1979 		hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1980 		if (unlikely(hdr->total_len > qp->max_inline_data)) {
1981 			pr_err("%s() supported_len=0x%x,\n"
1982 			       " unsupported len req=0x%x\n", __func__,
1983 				qp->max_inline_data, hdr->total_len);
1984 			return -EINVAL;
1985 		}
1986 		dpp_addr = (char *)sge;
1987 		for (i = 0; i < wr->num_sge; i++) {
1988 			memcpy(dpp_addr,
1989 			       (void *)(unsigned long)wr->sg_list[i].addr,
1990 			       wr->sg_list[i].length);
1991 			dpp_addr += wr->sg_list[i].length;
1992 		}
1993 
1994 		wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1995 		if (0 == hdr->total_len)
1996 			wqe_size += sizeof(struct ocrdma_sge);
1997 		hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1998 	} else {
1999 		ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2000 		if (wr->num_sge)
2001 			wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2002 		else
2003 			wqe_size += sizeof(struct ocrdma_sge);
2004 		hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2005 	}
2006 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2007 	return 0;
2008 }
2009 
2010 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2011 			     struct ib_send_wr *wr)
2012 {
2013 	int status;
2014 	struct ocrdma_sge *sge;
2015 	u32 wqe_size = sizeof(*hdr);
2016 
2017 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2018 		ocrdma_build_ud_hdr(qp, hdr, wr);
2019 		sge = (struct ocrdma_sge *)(hdr + 2);
2020 		wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2021 	} else {
2022 		sge = (struct ocrdma_sge *)(hdr + 1);
2023 	}
2024 
2025 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2026 	return status;
2027 }
2028 
2029 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2030 			      struct ib_send_wr *wr)
2031 {
2032 	int status;
2033 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2034 	struct ocrdma_sge *sge = ext_rw + 1;
2035 	u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2036 
2037 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2038 	if (status)
2039 		return status;
2040 	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2041 	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2042 	ext_rw->lrkey = wr->wr.rdma.rkey;
2043 	ext_rw->len = hdr->total_len;
2044 	return 0;
2045 }
2046 
2047 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2048 			      struct ib_send_wr *wr)
2049 {
2050 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2051 	struct ocrdma_sge *sge = ext_rw + 1;
2052 	u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2053 	    sizeof(struct ocrdma_hdr_wqe);
2054 
2055 	ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2056 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2057 	hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2058 	hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2059 
2060 	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2061 	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2062 	ext_rw->lrkey = wr->wr.rdma.rkey;
2063 	ext_rw->len = hdr->total_len;
2064 }
2065 
2066 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2067 			    struct ocrdma_hw_mr *hwmr)
2068 {
2069 	int i;
2070 	u64 buf_addr = 0;
2071 	int num_pbes;
2072 	struct ocrdma_pbe *pbe;
2073 
2074 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2075 	num_pbes = 0;
2076 
2077 	/* go through the OS phy regions & fill hw pbe entries into pbls. */
2078 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2079 		/* number of pbes can be more for one OS buf, when
2080 		 * buffers are of different sizes.
2081 		 * split the ib_buf to one or more pbes.
2082 		 */
2083 		buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2084 		pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2085 		pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2086 		num_pbes += 1;
2087 		pbe++;
2088 
2089 		/* if the pbl is full storing the pbes,
2090 		 * move to next pbl.
2091 		*/
2092 		if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2093 			pbl_tbl++;
2094 			pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2095 		}
2096 	}
2097 	return;
2098 }
2099 
2100 static int get_encoded_page_size(int pg_sz)
2101 {
2102 	/* Max size is 256M 4096 << 16 */
2103 	int i = 0;
2104 	for (; i < 17; i++)
2105 		if (pg_sz == (4096 << i))
2106 			break;
2107 	return i;
2108 }
2109 
2110 
2111 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2112 			   struct ib_send_wr *wr)
2113 {
2114 	u64 fbo;
2115 	struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2116 	struct ocrdma_mr *mr;
2117 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2118 	u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2119 
2120 	wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2121 
2122 	if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2123 		return -EINVAL;
2124 
2125 	hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2126 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2127 
2128 	if (wr->wr.fast_reg.page_list_len == 0)
2129 		BUG();
2130 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2131 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2132 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2133 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2134 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2135 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2136 	hdr->lkey = wr->wr.fast_reg.rkey;
2137 	hdr->total_len = wr->wr.fast_reg.length;
2138 
2139 	fbo = wr->wr.fast_reg.iova_start -
2140 	    (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2141 
2142 	fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2143 	fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2144 	fast_reg->fbo_hi = upper_32_bits(fbo);
2145 	fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2146 	fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2147 	fast_reg->size_sge =
2148 		get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2149 	mr = (struct ocrdma_mr *) (unsigned long)
2150 		dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2151 	build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2152 	return 0;
2153 }
2154 
2155 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2156 {
2157 	u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2158 
2159 	iowrite32(val, qp->sq_db);
2160 }
2161 
2162 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2163 		     struct ib_send_wr **bad_wr)
2164 {
2165 	int status = 0;
2166 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2167 	struct ocrdma_hdr_wqe *hdr;
2168 	unsigned long flags;
2169 
2170 	spin_lock_irqsave(&qp->q_lock, flags);
2171 	if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2172 		spin_unlock_irqrestore(&qp->q_lock, flags);
2173 		*bad_wr = wr;
2174 		return -EINVAL;
2175 	}
2176 
2177 	while (wr) {
2178 		if (qp->qp_type == IB_QPT_UD &&
2179 		    (wr->opcode != IB_WR_SEND &&
2180 		     wr->opcode != IB_WR_SEND_WITH_IMM)) {
2181 			*bad_wr = wr;
2182 			status = -EINVAL;
2183 			break;
2184 		}
2185 		if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2186 		    wr->num_sge > qp->sq.max_sges) {
2187 			*bad_wr = wr;
2188 			status = -ENOMEM;
2189 			break;
2190 		}
2191 		hdr = ocrdma_hwq_head(&qp->sq);
2192 		hdr->cw = 0;
2193 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2194 			hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2195 		if (wr->send_flags & IB_SEND_FENCE)
2196 			hdr->cw |=
2197 			    (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2198 		if (wr->send_flags & IB_SEND_SOLICITED)
2199 			hdr->cw |=
2200 			    (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2201 		hdr->total_len = 0;
2202 		switch (wr->opcode) {
2203 		case IB_WR_SEND_WITH_IMM:
2204 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2205 			hdr->immdt = ntohl(wr->ex.imm_data);
2206 		case IB_WR_SEND:
2207 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2208 			ocrdma_build_send(qp, hdr, wr);
2209 			break;
2210 		case IB_WR_SEND_WITH_INV:
2211 			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2212 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2213 			hdr->lkey = wr->ex.invalidate_rkey;
2214 			status = ocrdma_build_send(qp, hdr, wr);
2215 			break;
2216 		case IB_WR_RDMA_WRITE_WITH_IMM:
2217 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2218 			hdr->immdt = ntohl(wr->ex.imm_data);
2219 		case IB_WR_RDMA_WRITE:
2220 			hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2221 			status = ocrdma_build_write(qp, hdr, wr);
2222 			break;
2223 		case IB_WR_RDMA_READ:
2224 			ocrdma_build_read(qp, hdr, wr);
2225 			break;
2226 		case IB_WR_LOCAL_INV:
2227 			hdr->cw |=
2228 			    (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2229 			hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2230 					sizeof(struct ocrdma_sge)) /
2231 				OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2232 			hdr->lkey = wr->ex.invalidate_rkey;
2233 			break;
2234 		case IB_WR_FAST_REG_MR:
2235 			status = ocrdma_build_fr(qp, hdr, wr);
2236 			break;
2237 		default:
2238 			status = -EINVAL;
2239 			break;
2240 		}
2241 		if (status) {
2242 			*bad_wr = wr;
2243 			break;
2244 		}
2245 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2246 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2247 		else
2248 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2249 		qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2250 		ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2251 				   OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2252 		/* make sure wqe is written before adapter can access it */
2253 		wmb();
2254 		/* inform hw to start processing it */
2255 		ocrdma_ring_sq_db(qp);
2256 
2257 		/* update pointer, counter for next wr */
2258 		ocrdma_hwq_inc_head(&qp->sq);
2259 		wr = wr->next;
2260 	}
2261 	spin_unlock_irqrestore(&qp->q_lock, flags);
2262 	return status;
2263 }
2264 
2265 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2266 {
2267 	u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2268 
2269 	iowrite32(val, qp->rq_db);
2270 }
2271 
2272 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2273 			     u16 tag)
2274 {
2275 	u32 wqe_size = 0;
2276 	struct ocrdma_sge *sge;
2277 	if (wr->num_sge)
2278 		wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2279 	else
2280 		wqe_size = sizeof(*sge) + sizeof(*rqe);
2281 
2282 	rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2283 				OCRDMA_WQE_SIZE_SHIFT);
2284 	rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2285 	rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2286 	rqe->total_len = 0;
2287 	rqe->rsvd_tag = tag;
2288 	sge = (struct ocrdma_sge *)(rqe + 1);
2289 	ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2290 	ocrdma_cpu_to_le32(rqe, wqe_size);
2291 }
2292 
2293 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2294 		     struct ib_recv_wr **bad_wr)
2295 {
2296 	int status = 0;
2297 	unsigned long flags;
2298 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2299 	struct ocrdma_hdr_wqe *rqe;
2300 
2301 	spin_lock_irqsave(&qp->q_lock, flags);
2302 	if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2303 		spin_unlock_irqrestore(&qp->q_lock, flags);
2304 		*bad_wr = wr;
2305 		return -EINVAL;
2306 	}
2307 	while (wr) {
2308 		if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2309 		    wr->num_sge > qp->rq.max_sges) {
2310 			*bad_wr = wr;
2311 			status = -ENOMEM;
2312 			break;
2313 		}
2314 		rqe = ocrdma_hwq_head(&qp->rq);
2315 		ocrdma_build_rqe(rqe, wr, 0);
2316 
2317 		qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2318 		/* make sure rqe is written before adapter can access it */
2319 		wmb();
2320 
2321 		/* inform hw to start processing it */
2322 		ocrdma_ring_rq_db(qp);
2323 
2324 		/* update pointer, counter for next wr */
2325 		ocrdma_hwq_inc_head(&qp->rq);
2326 		wr = wr->next;
2327 	}
2328 	spin_unlock_irqrestore(&qp->q_lock, flags);
2329 	return status;
2330 }
2331 
2332 /* cqe for srq's rqe can potentially arrive out of order.
2333  * index gives the entry in the shadow table where to store
2334  * the wr_id. tag/index is returned in cqe to reference back
2335  * for a given rqe.
2336  */
2337 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2338 {
2339 	int row = 0;
2340 	int indx = 0;
2341 
2342 	for (row = 0; row < srq->bit_fields_len; row++) {
2343 		if (srq->idx_bit_fields[row]) {
2344 			indx = ffs(srq->idx_bit_fields[row]);
2345 			indx = (row * 32) + (indx - 1);
2346 			if (indx >= srq->rq.max_cnt)
2347 				BUG();
2348 			ocrdma_srq_toggle_bit(srq, indx);
2349 			break;
2350 		}
2351 	}
2352 
2353 	if (row == srq->bit_fields_len)
2354 		BUG();
2355 	return indx + 1; /* Use from index 1 */
2356 }
2357 
2358 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2359 {
2360 	u32 val = srq->rq.dbid | (1 << 16);
2361 
2362 	iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2363 }
2364 
2365 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2366 			 struct ib_recv_wr **bad_wr)
2367 {
2368 	int status = 0;
2369 	unsigned long flags;
2370 	struct ocrdma_srq *srq;
2371 	struct ocrdma_hdr_wqe *rqe;
2372 	u16 tag;
2373 
2374 	srq = get_ocrdma_srq(ibsrq);
2375 
2376 	spin_lock_irqsave(&srq->q_lock, flags);
2377 	while (wr) {
2378 		if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2379 		    wr->num_sge > srq->rq.max_sges) {
2380 			status = -ENOMEM;
2381 			*bad_wr = wr;
2382 			break;
2383 		}
2384 		tag = ocrdma_srq_get_idx(srq);
2385 		rqe = ocrdma_hwq_head(&srq->rq);
2386 		ocrdma_build_rqe(rqe, wr, tag);
2387 
2388 		srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2389 		/* make sure rqe is written before adapter can perform DMA */
2390 		wmb();
2391 		/* inform hw to start processing it */
2392 		ocrdma_ring_srq_db(srq);
2393 		/* update pointer, counter for next wr */
2394 		ocrdma_hwq_inc_head(&srq->rq);
2395 		wr = wr->next;
2396 	}
2397 	spin_unlock_irqrestore(&srq->q_lock, flags);
2398 	return status;
2399 }
2400 
2401 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2402 {
2403 	enum ib_wc_status ibwc_status;
2404 
2405 	switch (status) {
2406 	case OCRDMA_CQE_GENERAL_ERR:
2407 		ibwc_status = IB_WC_GENERAL_ERR;
2408 		break;
2409 	case OCRDMA_CQE_LOC_LEN_ERR:
2410 		ibwc_status = IB_WC_LOC_LEN_ERR;
2411 		break;
2412 	case OCRDMA_CQE_LOC_QP_OP_ERR:
2413 		ibwc_status = IB_WC_LOC_QP_OP_ERR;
2414 		break;
2415 	case OCRDMA_CQE_LOC_EEC_OP_ERR:
2416 		ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2417 		break;
2418 	case OCRDMA_CQE_LOC_PROT_ERR:
2419 		ibwc_status = IB_WC_LOC_PROT_ERR;
2420 		break;
2421 	case OCRDMA_CQE_WR_FLUSH_ERR:
2422 		ibwc_status = IB_WC_WR_FLUSH_ERR;
2423 		break;
2424 	case OCRDMA_CQE_MW_BIND_ERR:
2425 		ibwc_status = IB_WC_MW_BIND_ERR;
2426 		break;
2427 	case OCRDMA_CQE_BAD_RESP_ERR:
2428 		ibwc_status = IB_WC_BAD_RESP_ERR;
2429 		break;
2430 	case OCRDMA_CQE_LOC_ACCESS_ERR:
2431 		ibwc_status = IB_WC_LOC_ACCESS_ERR;
2432 		break;
2433 	case OCRDMA_CQE_REM_INV_REQ_ERR:
2434 		ibwc_status = IB_WC_REM_INV_REQ_ERR;
2435 		break;
2436 	case OCRDMA_CQE_REM_ACCESS_ERR:
2437 		ibwc_status = IB_WC_REM_ACCESS_ERR;
2438 		break;
2439 	case OCRDMA_CQE_REM_OP_ERR:
2440 		ibwc_status = IB_WC_REM_OP_ERR;
2441 		break;
2442 	case OCRDMA_CQE_RETRY_EXC_ERR:
2443 		ibwc_status = IB_WC_RETRY_EXC_ERR;
2444 		break;
2445 	case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2446 		ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2447 		break;
2448 	case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2449 		ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2450 		break;
2451 	case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2452 		ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2453 		break;
2454 	case OCRDMA_CQE_REM_ABORT_ERR:
2455 		ibwc_status = IB_WC_REM_ABORT_ERR;
2456 		break;
2457 	case OCRDMA_CQE_INV_EECN_ERR:
2458 		ibwc_status = IB_WC_INV_EECN_ERR;
2459 		break;
2460 	case OCRDMA_CQE_INV_EEC_STATE_ERR:
2461 		ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2462 		break;
2463 	case OCRDMA_CQE_FATAL_ERR:
2464 		ibwc_status = IB_WC_FATAL_ERR;
2465 		break;
2466 	case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2467 		ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2468 		break;
2469 	default:
2470 		ibwc_status = IB_WC_GENERAL_ERR;
2471 		break;
2472 	}
2473 	return ibwc_status;
2474 }
2475 
2476 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2477 		      u32 wqe_idx)
2478 {
2479 	struct ocrdma_hdr_wqe *hdr;
2480 	struct ocrdma_sge *rw;
2481 	int opcode;
2482 
2483 	hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2484 
2485 	ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2486 	/* Undo the hdr->cw swap */
2487 	opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2488 	switch (opcode) {
2489 	case OCRDMA_WRITE:
2490 		ibwc->opcode = IB_WC_RDMA_WRITE;
2491 		break;
2492 	case OCRDMA_READ:
2493 		rw = (struct ocrdma_sge *)(hdr + 1);
2494 		ibwc->opcode = IB_WC_RDMA_READ;
2495 		ibwc->byte_len = rw->len;
2496 		break;
2497 	case OCRDMA_SEND:
2498 		ibwc->opcode = IB_WC_SEND;
2499 		break;
2500 	case OCRDMA_FR_MR:
2501 		ibwc->opcode = IB_WC_FAST_REG_MR;
2502 		break;
2503 	case OCRDMA_LKEY_INV:
2504 		ibwc->opcode = IB_WC_LOCAL_INV;
2505 		break;
2506 	default:
2507 		ibwc->status = IB_WC_GENERAL_ERR;
2508 		pr_err("%s() invalid opcode received = 0x%x\n",
2509 		       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2510 		break;
2511 	}
2512 }
2513 
2514 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2515 						struct ocrdma_cqe *cqe)
2516 {
2517 	if (is_cqe_for_sq(cqe)) {
2518 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2519 				cqe->flags_status_srcqpn) &
2520 					~OCRDMA_CQE_STATUS_MASK);
2521 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2522 				cqe->flags_status_srcqpn) |
2523 				(OCRDMA_CQE_WR_FLUSH_ERR <<
2524 					OCRDMA_CQE_STATUS_SHIFT));
2525 	} else {
2526 		if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2527 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2528 					cqe->flags_status_srcqpn) &
2529 						~OCRDMA_CQE_UD_STATUS_MASK);
2530 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2531 					cqe->flags_status_srcqpn) |
2532 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2533 						OCRDMA_CQE_UD_STATUS_SHIFT));
2534 		} else {
2535 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2536 					cqe->flags_status_srcqpn) &
2537 						~OCRDMA_CQE_STATUS_MASK);
2538 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2539 					cqe->flags_status_srcqpn) |
2540 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2541 						OCRDMA_CQE_STATUS_SHIFT));
2542 		}
2543 	}
2544 }
2545 
2546 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2547 				  struct ocrdma_qp *qp, int status)
2548 {
2549 	bool expand = false;
2550 
2551 	ibwc->byte_len = 0;
2552 	ibwc->qp = &qp->ibqp;
2553 	ibwc->status = ocrdma_to_ibwc_err(status);
2554 
2555 	ocrdma_flush_qp(qp);
2556 	ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2557 
2558 	/* if wqe/rqe pending for which cqe needs to be returned,
2559 	 * trigger inflating it.
2560 	 */
2561 	if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2562 		expand = true;
2563 		ocrdma_set_cqe_status_flushed(qp, cqe);
2564 	}
2565 	return expand;
2566 }
2567 
2568 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2569 				  struct ocrdma_qp *qp, int status)
2570 {
2571 	ibwc->opcode = IB_WC_RECV;
2572 	ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2573 	ocrdma_hwq_inc_tail(&qp->rq);
2574 
2575 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2576 }
2577 
2578 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2579 				  struct ocrdma_qp *qp, int status)
2580 {
2581 	ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2582 	ocrdma_hwq_inc_tail(&qp->sq);
2583 
2584 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2585 }
2586 
2587 
2588 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2589 				 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2590 				 bool *polled, bool *stop)
2591 {
2592 	bool expand;
2593 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2594 	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2595 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2596 	if (status < OCRDMA_MAX_CQE_ERR)
2597 		atomic_inc(&dev->cqe_err_stats[status]);
2598 
2599 	/* when hw sq is empty, but rq is not empty, so we continue
2600 	 * to keep the cqe in order to get the cq event again.
2601 	 */
2602 	if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2603 		/* when cq for rq and sq is same, it is safe to return
2604 		 * flush cqe for RQEs.
2605 		 */
2606 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2607 			*polled = true;
2608 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2609 			expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2610 		} else {
2611 			/* stop processing further cqe as this cqe is used for
2612 			 * triggering cq event on buddy cq of RQ.
2613 			 * When QP is destroyed, this cqe will be removed
2614 			 * from the cq's hardware q.
2615 			 */
2616 			*polled = false;
2617 			*stop = true;
2618 			expand = false;
2619 		}
2620 	} else if (is_hw_sq_empty(qp)) {
2621 		/* Do nothing */
2622 		expand = false;
2623 		*polled = false;
2624 		*stop = false;
2625 	} else {
2626 		*polled = true;
2627 		expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2628 	}
2629 	return expand;
2630 }
2631 
2632 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2633 				     struct ocrdma_cqe *cqe,
2634 				     struct ib_wc *ibwc, bool *polled)
2635 {
2636 	bool expand = false;
2637 	int tail = qp->sq.tail;
2638 	u32 wqe_idx;
2639 
2640 	if (!qp->wqe_wr_id_tbl[tail].signaled) {
2641 		*polled = false;    /* WC cannot be consumed yet */
2642 	} else {
2643 		ibwc->status = IB_WC_SUCCESS;
2644 		ibwc->wc_flags = 0;
2645 		ibwc->qp = &qp->ibqp;
2646 		ocrdma_update_wc(qp, ibwc, tail);
2647 		*polled = true;
2648 	}
2649 	wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2650 			OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2651 	if (tail != wqe_idx)
2652 		expand = true; /* Coalesced CQE can't be consumed yet */
2653 
2654 	ocrdma_hwq_inc_tail(&qp->sq);
2655 	return expand;
2656 }
2657 
2658 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2659 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2660 {
2661 	int status;
2662 	bool expand;
2663 
2664 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2665 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2666 
2667 	if (status == OCRDMA_CQE_SUCCESS)
2668 		expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2669 	else
2670 		expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2671 	return expand;
2672 }
2673 
2674 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2675 {
2676 	int status;
2677 
2678 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2679 		OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2680 	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2681 						OCRDMA_CQE_SRCQP_MASK;
2682 	ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2683 						OCRDMA_CQE_PKEY_MASK;
2684 	ibwc->wc_flags = IB_WC_GRH;
2685 	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2686 					OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2687 	return status;
2688 }
2689 
2690 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2691 				       struct ocrdma_cqe *cqe,
2692 				       struct ocrdma_qp *qp)
2693 {
2694 	unsigned long flags;
2695 	struct ocrdma_srq *srq;
2696 	u32 wqe_idx;
2697 
2698 	srq = get_ocrdma_srq(qp->ibqp.srq);
2699 	wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2700 		OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2701 	if (wqe_idx < 1)
2702 		BUG();
2703 
2704 	ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2705 	spin_lock_irqsave(&srq->q_lock, flags);
2706 	ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2707 	spin_unlock_irqrestore(&srq->q_lock, flags);
2708 	ocrdma_hwq_inc_tail(&srq->rq);
2709 }
2710 
2711 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2712 				struct ib_wc *ibwc, bool *polled, bool *stop,
2713 				int status)
2714 {
2715 	bool expand;
2716 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2717 
2718 	if (status < OCRDMA_MAX_CQE_ERR)
2719 		atomic_inc(&dev->cqe_err_stats[status]);
2720 
2721 	/* when hw_rq is empty, but wq is not empty, so continue
2722 	 * to keep the cqe to get the cq event again.
2723 	 */
2724 	if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2725 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2726 			*polled = true;
2727 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2728 			expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2729 		} else {
2730 			*polled = false;
2731 			*stop = true;
2732 			expand = false;
2733 		}
2734 	} else if (is_hw_rq_empty(qp)) {
2735 		/* Do nothing */
2736 		expand = false;
2737 		*polled = false;
2738 		*stop = false;
2739 	} else {
2740 		*polled = true;
2741 		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2742 	}
2743 	return expand;
2744 }
2745 
2746 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2747 				     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2748 {
2749 	ibwc->opcode = IB_WC_RECV;
2750 	ibwc->qp = &qp->ibqp;
2751 	ibwc->status = IB_WC_SUCCESS;
2752 
2753 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2754 		ocrdma_update_ud_rcqe(ibwc, cqe);
2755 	else
2756 		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2757 
2758 	if (is_cqe_imm(cqe)) {
2759 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2760 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2761 	} else if (is_cqe_wr_imm(cqe)) {
2762 		ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2763 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2764 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2765 	} else if (is_cqe_invalidated(cqe)) {
2766 		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2767 		ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2768 	}
2769 	if (qp->ibqp.srq) {
2770 		ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2771 	} else {
2772 		ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2773 		ocrdma_hwq_inc_tail(&qp->rq);
2774 	}
2775 }
2776 
2777 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2778 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2779 {
2780 	int status;
2781 	bool expand = false;
2782 
2783 	ibwc->wc_flags = 0;
2784 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2785 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2786 					OCRDMA_CQE_UD_STATUS_MASK) >>
2787 					OCRDMA_CQE_UD_STATUS_SHIFT;
2788 	} else {
2789 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2790 			     OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2791 	}
2792 
2793 	if (status == OCRDMA_CQE_SUCCESS) {
2794 		*polled = true;
2795 		ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2796 	} else {
2797 		expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2798 					      status);
2799 	}
2800 	return expand;
2801 }
2802 
2803 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2804 				   u16 cur_getp)
2805 {
2806 	if (cq->phase_change) {
2807 		if (cur_getp == 0)
2808 			cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2809 	} else {
2810 		/* clear valid bit */
2811 		cqe->flags_status_srcqpn = 0;
2812 	}
2813 }
2814 
2815 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2816 			    struct ib_wc *ibwc)
2817 {
2818 	u16 qpn = 0;
2819 	int i = 0;
2820 	bool expand = false;
2821 	int polled_hw_cqes = 0;
2822 	struct ocrdma_qp *qp = NULL;
2823 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2824 	struct ocrdma_cqe *cqe;
2825 	u16 cur_getp; bool polled = false; bool stop = false;
2826 
2827 	cur_getp = cq->getp;
2828 	while (num_entries) {
2829 		cqe = cq->va + cur_getp;
2830 		/* check whether valid cqe or not */
2831 		if (!is_cqe_valid(cq, cqe))
2832 			break;
2833 		qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2834 		/* ignore discarded cqe */
2835 		if (qpn == 0)
2836 			goto skip_cqe;
2837 		qp = dev->qp_tbl[qpn];
2838 		BUG_ON(qp == NULL);
2839 
2840 		if (is_cqe_for_sq(cqe)) {
2841 			expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2842 						  &stop);
2843 		} else {
2844 			expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2845 						  &stop);
2846 		}
2847 		if (expand)
2848 			goto expand_cqe;
2849 		if (stop)
2850 			goto stop_cqe;
2851 		/* clear qpn to avoid duplicate processing by discard_cqe() */
2852 		cqe->cmn.qpn = 0;
2853 skip_cqe:
2854 		polled_hw_cqes += 1;
2855 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2856 		ocrdma_change_cq_phase(cq, cqe, cur_getp);
2857 expand_cqe:
2858 		if (polled) {
2859 			num_entries -= 1;
2860 			i += 1;
2861 			ibwc = ibwc + 1;
2862 			polled = false;
2863 		}
2864 	}
2865 stop_cqe:
2866 	cq->getp = cur_getp;
2867 	if (cq->deferred_arm) {
2868 		ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
2869 				  polled_hw_cqes);
2870 		cq->deferred_arm = false;
2871 		cq->deferred_sol = false;
2872 	} else {
2873 		/* We need to pop the CQE. No need to arm */
2874 		ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2875 				  polled_hw_cqes);
2876 		cq->deferred_sol = false;
2877 	}
2878 
2879 	return i;
2880 }
2881 
2882 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2883 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2884 			      struct ocrdma_qp *qp, struct ib_wc *ibwc)
2885 {
2886 	int err_cqes = 0;
2887 
2888 	while (num_entries) {
2889 		if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2890 			break;
2891 		if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2892 			ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2893 			ocrdma_hwq_inc_tail(&qp->sq);
2894 		} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2895 			ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2896 			ocrdma_hwq_inc_tail(&qp->rq);
2897 		} else {
2898 			return err_cqes;
2899 		}
2900 		ibwc->byte_len = 0;
2901 		ibwc->status = IB_WC_WR_FLUSH_ERR;
2902 		ibwc = ibwc + 1;
2903 		err_cqes += 1;
2904 		num_entries -= 1;
2905 	}
2906 	return err_cqes;
2907 }
2908 
2909 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2910 {
2911 	int cqes_to_poll = num_entries;
2912 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2913 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2914 	int num_os_cqe = 0, err_cqes = 0;
2915 	struct ocrdma_qp *qp;
2916 	unsigned long flags;
2917 
2918 	/* poll cqes from adapter CQ */
2919 	spin_lock_irqsave(&cq->cq_lock, flags);
2920 	num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2921 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2922 	cqes_to_poll -= num_os_cqe;
2923 
2924 	if (cqes_to_poll) {
2925 		wc = wc + num_os_cqe;
2926 		/* adapter returns single error cqe when qp moves to
2927 		 * error state. So insert error cqes with wc_status as
2928 		 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2929 		 * respectively which uses this CQ.
2930 		 */
2931 		spin_lock_irqsave(&dev->flush_q_lock, flags);
2932 		list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2933 			if (cqes_to_poll == 0)
2934 				break;
2935 			err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2936 			cqes_to_poll -= err_cqes;
2937 			num_os_cqe += err_cqes;
2938 			wc = wc + err_cqes;
2939 		}
2940 		spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2941 	}
2942 	return num_os_cqe;
2943 }
2944 
2945 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2946 {
2947 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2948 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2949 	u16 cq_id;
2950 	unsigned long flags;
2951 	bool arm_needed = false, sol_needed = false;
2952 
2953 	cq_id = cq->id;
2954 
2955 	spin_lock_irqsave(&cq->cq_lock, flags);
2956 	if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2957 		arm_needed = true;
2958 	if (cq_flags & IB_CQ_SOLICITED)
2959 		sol_needed = true;
2960 
2961 	if (cq->first_arm) {
2962 		ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2963 		cq->first_arm = false;
2964 	}
2965 
2966 	cq->deferred_arm = true;
2967 	cq->deferred_sol = sol_needed;
2968 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2969 
2970 	return 0;
2971 }
2972 
2973 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2974 {
2975 	int status;
2976 	struct ocrdma_mr *mr;
2977 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2978 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2979 
2980 	if (max_page_list_len > dev->attr.max_pages_per_frmr)
2981 		return ERR_PTR(-EINVAL);
2982 
2983 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2984 	if (!mr)
2985 		return ERR_PTR(-ENOMEM);
2986 
2987 	status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2988 	if (status)
2989 		goto pbl_err;
2990 	mr->hwmr.fr_mr = 1;
2991 	mr->hwmr.remote_rd = 0;
2992 	mr->hwmr.remote_wr = 0;
2993 	mr->hwmr.local_rd = 0;
2994 	mr->hwmr.local_wr = 0;
2995 	mr->hwmr.mw_bind = 0;
2996 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2997 	if (status)
2998 		goto pbl_err;
2999 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3000 	if (status)
3001 		goto mbx_err;
3002 	mr->ibmr.rkey = mr->hwmr.lkey;
3003 	mr->ibmr.lkey = mr->hwmr.lkey;
3004 	dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3005 		(unsigned long) mr;
3006 	return &mr->ibmr;
3007 mbx_err:
3008 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3009 pbl_err:
3010 	kfree(mr);
3011 	return ERR_PTR(-ENOMEM);
3012 }
3013 
3014 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3015 							  *ibdev,
3016 							  int page_list_len)
3017 {
3018 	struct ib_fast_reg_page_list *frmr_list;
3019 	int size;
3020 
3021 	size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3022 	frmr_list = kzalloc(size, GFP_KERNEL);
3023 	if (!frmr_list)
3024 		return ERR_PTR(-ENOMEM);
3025 	frmr_list->page_list = (u64 *)(frmr_list + 1);
3026 	return frmr_list;
3027 }
3028 
3029 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3030 {
3031 	kfree(page_list);
3032 }
3033 
3034 #define MAX_KERNEL_PBE_SIZE 65536
3035 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3036 				    int buf_cnt, u32 *pbe_size)
3037 {
3038 	u64 total_size = 0;
3039 	u64 buf_size = 0;
3040 	int i;
3041 	*pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3042 	*pbe_size = roundup_pow_of_two(*pbe_size);
3043 
3044 	/* find the smallest PBE size that we can have */
3045 	for (i = 0; i < buf_cnt; i++) {
3046 		/* first addr may not be page aligned, so ignore checking */
3047 		if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3048 				 (buf_list[i].size & ~PAGE_MASK))) {
3049 			return 0;
3050 		}
3051 
3052 		/* if configured PBE size is greater then the chosen one,
3053 		 * reduce the PBE size.
3054 		 */
3055 		buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3056 		/* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3057 		buf_size = roundup_pow_of_two(buf_size);
3058 		if (*pbe_size > buf_size)
3059 			*pbe_size = buf_size;
3060 
3061 		total_size += buf_size;
3062 	}
3063 	*pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3064 	    (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3065 
3066 	/* num_pbes = total_size / (*pbe_size);  this is implemented below. */
3067 
3068 	return total_size >> ilog2(*pbe_size);
3069 }
3070 
3071 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3072 			      u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3073 			      struct ocrdma_hw_mr *hwmr)
3074 {
3075 	int i;
3076 	int idx;
3077 	int pbes_per_buf = 0;
3078 	u64 buf_addr = 0;
3079 	int num_pbes;
3080 	struct ocrdma_pbe *pbe;
3081 	int total_num_pbes = 0;
3082 
3083 	if (!hwmr->num_pbes)
3084 		return;
3085 
3086 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3087 	num_pbes = 0;
3088 
3089 	/* go through the OS phy regions & fill hw pbe entries into pbls. */
3090 	for (i = 0; i < ib_buf_cnt; i++) {
3091 		buf_addr = buf_list[i].addr;
3092 		pbes_per_buf =
3093 		    roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3094 		    pbe_size;
3095 		hwmr->len += buf_list[i].size;
3096 		/* number of pbes can be more for one OS buf, when
3097 		 * buffers are of different sizes.
3098 		 * split the ib_buf to one or more pbes.
3099 		 */
3100 		for (idx = 0; idx < pbes_per_buf; idx++) {
3101 			/* we program always page aligned addresses,
3102 			 * first unaligned address is taken care by fbo.
3103 			 */
3104 			if (i == 0) {
3105 				/* for non zero fbo, assign the
3106 				 * start of the page.
3107 				 */
3108 				pbe->pa_lo =
3109 				    cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3110 				pbe->pa_hi =
3111 				    cpu_to_le32((u32) upper_32_bits(buf_addr));
3112 			} else {
3113 				pbe->pa_lo =
3114 				    cpu_to_le32((u32) (buf_addr & 0xffffffff));
3115 				pbe->pa_hi =
3116 				    cpu_to_le32((u32) upper_32_bits(buf_addr));
3117 			}
3118 			buf_addr += pbe_size;
3119 			num_pbes += 1;
3120 			total_num_pbes += 1;
3121 			pbe++;
3122 
3123 			if (total_num_pbes == hwmr->num_pbes)
3124 				goto mr_tbl_done;
3125 			/* if the pbl is full storing the pbes,
3126 			 * move to next pbl.
3127 			 */
3128 			if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3129 				pbl_tbl++;
3130 				pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3131 				num_pbes = 0;
3132 			}
3133 		}
3134 	}
3135 mr_tbl_done:
3136 	return;
3137 }
3138 
3139 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3140 				   struct ib_phys_buf *buf_list,
3141 				   int buf_cnt, int acc, u64 *iova_start)
3142 {
3143 	int status = -ENOMEM;
3144 	struct ocrdma_mr *mr;
3145 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3146 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3147 	u32 num_pbes;
3148 	u32 pbe_size = 0;
3149 
3150 	if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3151 		return ERR_PTR(-EINVAL);
3152 
3153 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3154 	if (!mr)
3155 		return ERR_PTR(status);
3156 
3157 	num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3158 	if (num_pbes == 0) {
3159 		status = -EINVAL;
3160 		goto pbl_err;
3161 	}
3162 	status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3163 	if (status)
3164 		goto pbl_err;
3165 
3166 	mr->hwmr.pbe_size = pbe_size;
3167 	mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3168 	mr->hwmr.va = *iova_start;
3169 	mr->hwmr.local_rd = 1;
3170 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3171 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3172 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3173 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3174 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3175 
3176 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3177 	if (status)
3178 		goto pbl_err;
3179 	build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3180 			  &mr->hwmr);
3181 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3182 	if (status)
3183 		goto mbx_err;
3184 
3185 	mr->ibmr.lkey = mr->hwmr.lkey;
3186 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3187 		mr->ibmr.rkey = mr->hwmr.lkey;
3188 	return &mr->ibmr;
3189 
3190 mbx_err:
3191 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3192 pbl_err:
3193 	kfree(mr);
3194 	return ERR_PTR(status);
3195 }
3196