1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42 
43 #include <linux/sched.h>
44 #include <linux/interrupt.h>
45 #include <linux/log2.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/if_ether.h>
48 
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_cache.h>
52 
53 #include "ocrdma.h"
54 #include "ocrdma_hw.h"
55 #include "ocrdma_verbs.h"
56 #include "ocrdma_ah.h"
57 
58 enum mbx_status {
59 	OCRDMA_MBX_STATUS_FAILED		= 1,
60 	OCRDMA_MBX_STATUS_ILLEGAL_FIELD		= 3,
61 	OCRDMA_MBX_STATUS_OOR			= 100,
62 	OCRDMA_MBX_STATUS_INVALID_PD		= 101,
63 	OCRDMA_MBX_STATUS_PD_INUSE		= 102,
64 	OCRDMA_MBX_STATUS_INVALID_CQ		= 103,
65 	OCRDMA_MBX_STATUS_INVALID_QP		= 104,
66 	OCRDMA_MBX_STATUS_INVALID_LKEY		= 105,
67 	OCRDMA_MBX_STATUS_ORD_EXCEEDS		= 106,
68 	OCRDMA_MBX_STATUS_IRD_EXCEEDS		= 107,
69 	OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS	= 108,
70 	OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS	= 109,
71 	OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS	= 110,
72 	OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS	= 111,
73 	OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS	= 112,
74 	OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE	= 113,
75 	OCRDMA_MBX_STATUS_MW_BOUND		= 114,
76 	OCRDMA_MBX_STATUS_INVALID_VA		= 115,
77 	OCRDMA_MBX_STATUS_INVALID_LENGTH	= 116,
78 	OCRDMA_MBX_STATUS_INVALID_FBO		= 117,
79 	OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS	= 118,
80 	OCRDMA_MBX_STATUS_INVALID_PBE_SIZE	= 119,
81 	OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY	= 120,
82 	OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT	= 121,
83 	OCRDMA_MBX_STATUS_INVALID_SRQ_ID	= 129,
84 	OCRDMA_MBX_STATUS_SRQ_ERROR		= 133,
85 	OCRDMA_MBX_STATUS_RQE_EXCEEDS		= 134,
86 	OCRDMA_MBX_STATUS_MTU_EXCEEDS		= 135,
87 	OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS	= 136,
88 	OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS	= 137,
89 	OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS	= 138,
90 	OCRDMA_MBX_STATUS_QP_BOUND		= 130,
91 	OCRDMA_MBX_STATUS_INVALID_CHANGE	= 139,
92 	OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP	= 140,
93 	OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER	= 141,
94 	OCRDMA_MBX_STATUS_MW_STILL_BOUND	= 142,
95 	OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID	= 143,
96 	OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS	= 144
97 };
98 
99 enum additional_status {
100 	OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
101 };
102 
103 enum cqe_status {
104 	OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES	= 1,
105 	OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER		= 2,
106 	OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES	= 3,
107 	OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING		= 4,
108 	OCRDMA_MBX_CQE_STATUS_DMA_FAILED		= 5
109 };
110 
111 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
112 {
113 	return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
114 }
115 
116 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
117 {
118 	eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
119 }
120 
121 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
122 {
123 	struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
124 	    (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
125 
126 	if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
127 		return NULL;
128 	return cqe;
129 }
130 
131 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
132 {
133 	dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
134 }
135 
136 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
137 {
138 	return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
139 }
140 
141 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
142 {
143 	dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
144 }
145 
146 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
147 {
148 	return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
149 }
150 
151 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
152 {
153 	switch (qps) {
154 	case OCRDMA_QPS_RST:
155 		return IB_QPS_RESET;
156 	case OCRDMA_QPS_INIT:
157 		return IB_QPS_INIT;
158 	case OCRDMA_QPS_RTR:
159 		return IB_QPS_RTR;
160 	case OCRDMA_QPS_RTS:
161 		return IB_QPS_RTS;
162 	case OCRDMA_QPS_SQD:
163 	case OCRDMA_QPS_SQ_DRAINING:
164 		return IB_QPS_SQD;
165 	case OCRDMA_QPS_SQE:
166 		return IB_QPS_SQE;
167 	case OCRDMA_QPS_ERR:
168 		return IB_QPS_ERR;
169 	}
170 	return IB_QPS_ERR;
171 }
172 
173 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
174 {
175 	switch (qps) {
176 	case IB_QPS_RESET:
177 		return OCRDMA_QPS_RST;
178 	case IB_QPS_INIT:
179 		return OCRDMA_QPS_INIT;
180 	case IB_QPS_RTR:
181 		return OCRDMA_QPS_RTR;
182 	case IB_QPS_RTS:
183 		return OCRDMA_QPS_RTS;
184 	case IB_QPS_SQD:
185 		return OCRDMA_QPS_SQD;
186 	case IB_QPS_SQE:
187 		return OCRDMA_QPS_SQE;
188 	case IB_QPS_ERR:
189 		return OCRDMA_QPS_ERR;
190 	}
191 	return OCRDMA_QPS_ERR;
192 }
193 
194 static int ocrdma_get_mbx_errno(u32 status)
195 {
196 	int err_num;
197 	u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
198 					OCRDMA_MBX_RSP_STATUS_SHIFT;
199 	u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
200 					OCRDMA_MBX_RSP_ASTATUS_SHIFT;
201 
202 	switch (mbox_status) {
203 	case OCRDMA_MBX_STATUS_OOR:
204 	case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
205 		err_num = -EAGAIN;
206 		break;
207 
208 	case OCRDMA_MBX_STATUS_INVALID_PD:
209 	case OCRDMA_MBX_STATUS_INVALID_CQ:
210 	case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
211 	case OCRDMA_MBX_STATUS_INVALID_QP:
212 	case OCRDMA_MBX_STATUS_INVALID_CHANGE:
213 	case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
214 	case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
215 	case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
216 	case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
217 	case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
218 	case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
219 	case OCRDMA_MBX_STATUS_INVALID_LKEY:
220 	case OCRDMA_MBX_STATUS_INVALID_VA:
221 	case OCRDMA_MBX_STATUS_INVALID_LENGTH:
222 	case OCRDMA_MBX_STATUS_INVALID_FBO:
223 	case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
224 	case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
225 	case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
226 	case OCRDMA_MBX_STATUS_SRQ_ERROR:
227 	case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
228 		err_num = -EINVAL;
229 		break;
230 
231 	case OCRDMA_MBX_STATUS_PD_INUSE:
232 	case OCRDMA_MBX_STATUS_QP_BOUND:
233 	case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
234 	case OCRDMA_MBX_STATUS_MW_BOUND:
235 		err_num = -EBUSY;
236 		break;
237 
238 	case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
239 	case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
240 	case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
241 	case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
242 	case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
243 	case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
244 	case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
245 	case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
246 	case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
247 		err_num = -ENOBUFS;
248 		break;
249 
250 	case OCRDMA_MBX_STATUS_FAILED:
251 		switch (add_status) {
252 		case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
253 			err_num = -EAGAIN;
254 			break;
255 		default:
256 			err_num = -EFAULT;
257 		}
258 		break;
259 	default:
260 		err_num = -EFAULT;
261 	}
262 	return err_num;
263 }
264 
265 char *port_speed_string(struct ocrdma_dev *dev)
266 {
267 	char *str = "";
268 	u16 speeds_supported;
269 
270 	speeds_supported = dev->phy.fixed_speeds_supported |
271 				dev->phy.auto_speeds_supported;
272 	if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
273 		str = "40Gbps ";
274 	else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
275 		str = "10Gbps ";
276 	else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
277 		str = "1Gbps ";
278 
279 	return str;
280 }
281 
282 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
283 {
284 	int err_num = -EINVAL;
285 
286 	switch (cqe_status) {
287 	case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
288 		err_num = -EPERM;
289 		break;
290 	case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
291 		err_num = -EINVAL;
292 		break;
293 	case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
294 	case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
295 		err_num = -EINVAL;
296 		break;
297 	case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
298 	default:
299 		err_num = -EINVAL;
300 		break;
301 	}
302 	return err_num;
303 }
304 
305 void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
306 		       bool solicited, u16 cqe_popped)
307 {
308 	u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
309 
310 	val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
311 	     OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
312 
313 	if (armed)
314 		val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
315 	if (solicited)
316 		val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
317 	val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
318 	iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
319 }
320 
321 static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
322 {
323 	u32 val = 0;
324 
325 	val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
326 	val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
327 	iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
328 }
329 
330 static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
331 			      bool arm, bool clear_int, u16 num_eqe)
332 {
333 	u32 val = 0;
334 
335 	val |= eq_id & OCRDMA_EQ_ID_MASK;
336 	val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
337 	if (arm)
338 		val |= (1 << OCRDMA_REARM_SHIFT);
339 	if (clear_int)
340 		val |= (1 << OCRDMA_EQ_CLR_SHIFT);
341 	val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
342 	val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
343 	iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
344 }
345 
346 static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
347 			    u8 opcode, u8 subsys, u32 cmd_len)
348 {
349 	cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
350 	cmd_hdr->timeout = 20; /* seconds */
351 	cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
352 }
353 
354 static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
355 {
356 	struct ocrdma_mqe *mqe;
357 
358 	mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
359 	if (!mqe)
360 		return NULL;
361 	mqe->hdr.spcl_sge_cnt_emb |=
362 		(OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
363 					OCRDMA_MQE_HDR_EMB_MASK;
364 	mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
365 
366 	ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
367 			mqe->hdr.pyld_len);
368 	return mqe;
369 }
370 
371 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
372 {
373 	dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
374 }
375 
376 static int ocrdma_alloc_q(struct ocrdma_dev *dev,
377 			  struct ocrdma_queue_info *q, u16 len, u16 entry_size)
378 {
379 	memset(q, 0, sizeof(*q));
380 	q->len = len;
381 	q->entry_size = entry_size;
382 	q->size = len * entry_size;
383 	q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
384 				    &q->dma, GFP_KERNEL);
385 	if (!q->va)
386 		return -ENOMEM;
387 	return 0;
388 }
389 
390 static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
391 					dma_addr_t host_pa, int hw_page_size)
392 {
393 	int i;
394 
395 	for (i = 0; i < cnt; i++) {
396 		q_pa[i].lo = (u32) (host_pa & 0xffffffff);
397 		q_pa[i].hi = (u32) upper_32_bits(host_pa);
398 		host_pa += hw_page_size;
399 	}
400 }
401 
402 static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
403 			       struct ocrdma_queue_info *q, int queue_type)
404 {
405 	u8 opcode = 0;
406 	int status;
407 	struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
408 
409 	switch (queue_type) {
410 	case QTYPE_MCCQ:
411 		opcode = OCRDMA_CMD_DELETE_MQ;
412 		break;
413 	case QTYPE_CQ:
414 		opcode = OCRDMA_CMD_DELETE_CQ;
415 		break;
416 	case QTYPE_EQ:
417 		opcode = OCRDMA_CMD_DELETE_EQ;
418 		break;
419 	default:
420 		BUG();
421 	}
422 	memset(cmd, 0, sizeof(*cmd));
423 	ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
424 	cmd->id = q->id;
425 
426 	status = be_roce_mcc_cmd(dev->nic_info.netdev,
427 				 cmd, sizeof(*cmd), NULL, NULL);
428 	if (!status)
429 		q->created = false;
430 	return status;
431 }
432 
433 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
434 {
435 	int status;
436 	struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
437 	struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
438 
439 	memset(cmd, 0, sizeof(*cmd));
440 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
441 			sizeof(*cmd));
442 
443 	cmd->req.rsvd_version = 2;
444 	cmd->num_pages = 4;
445 	cmd->valid = OCRDMA_CREATE_EQ_VALID;
446 	cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
447 
448 	ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
449 			     PAGE_SIZE_4K);
450 	status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
451 				 NULL);
452 	if (!status) {
453 		eq->q.id = rsp->vector_eqid & 0xffff;
454 		eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
455 		eq->q.created = true;
456 	}
457 	return status;
458 }
459 
460 static int ocrdma_create_eq(struct ocrdma_dev *dev,
461 			    struct ocrdma_eq *eq, u16 q_len)
462 {
463 	int status;
464 
465 	status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
466 				sizeof(struct ocrdma_eqe));
467 	if (status)
468 		return status;
469 
470 	status = ocrdma_mbx_create_eq(dev, eq);
471 	if (status)
472 		goto mbx_err;
473 	eq->dev = dev;
474 	ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
475 
476 	return 0;
477 mbx_err:
478 	ocrdma_free_q(dev, &eq->q);
479 	return status;
480 }
481 
482 int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
483 {
484 	int irq;
485 
486 	if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
487 		irq = dev->nic_info.pdev->irq;
488 	else
489 		irq = dev->nic_info.msix.vector_list[eq->vector];
490 	return irq;
491 }
492 
493 static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
494 {
495 	if (eq->q.created) {
496 		ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
497 		ocrdma_free_q(dev, &eq->q);
498 	}
499 }
500 
501 static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
502 {
503 	int irq;
504 
505 	/* disarm EQ so that interrupts are not generated
506 	 * during freeing and EQ delete is in progress.
507 	 */
508 	ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
509 
510 	irq = ocrdma_get_irq(dev, eq);
511 	free_irq(irq, eq);
512 	_ocrdma_destroy_eq(dev, eq);
513 }
514 
515 static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
516 {
517 	int i;
518 
519 	for (i = 0; i < dev->eq_cnt; i++)
520 		ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
521 }
522 
523 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
524 				   struct ocrdma_queue_info *cq,
525 				   struct ocrdma_queue_info *eq)
526 {
527 	struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
528 	struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
529 	int status;
530 
531 	memset(cmd, 0, sizeof(*cmd));
532 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
533 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
534 
535 	cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
536 	cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
537 		OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
538 	cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
539 
540 	cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
541 	cmd->eqn = eq->id;
542 	cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
543 
544 	ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
545 			     cq->dma, PAGE_SIZE_4K);
546 	status = be_roce_mcc_cmd(dev->nic_info.netdev,
547 				 cmd, sizeof(*cmd), NULL, NULL);
548 	if (!status) {
549 		cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
550 		cq->created = true;
551 	}
552 	return status;
553 }
554 
555 static u32 ocrdma_encoded_q_len(int q_len)
556 {
557 	u32 len_encoded = fls(q_len);	/* log2(len) + 1 */
558 
559 	if (len_encoded == 16)
560 		len_encoded = 0;
561 	return len_encoded;
562 }
563 
564 static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
565 				struct ocrdma_queue_info *mq,
566 				struct ocrdma_queue_info *cq)
567 {
568 	int num_pages, status;
569 	struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
570 	struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
571 	struct ocrdma_pa *pa;
572 
573 	memset(cmd, 0, sizeof(*cmd));
574 	num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
575 
576 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
577 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
578 	cmd->req.rsvd_version = 1;
579 	cmd->cqid_pages = num_pages;
580 	cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
581 	cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
582 
583 	cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
584 	cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
585 	/* Request link events on this  MQ. */
586 	cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
587 
588 	cmd->async_cqid_ringsize = cq->id;
589 	cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
590 				OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
591 	cmd->valid = OCRDMA_CREATE_MQ_VALID;
592 	pa = &cmd->pa[0];
593 
594 	ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
595 	status = be_roce_mcc_cmd(dev->nic_info.netdev,
596 				 cmd, sizeof(*cmd), NULL, NULL);
597 	if (!status) {
598 		mq->id = rsp->id;
599 		mq->created = true;
600 	}
601 	return status;
602 }
603 
604 static int ocrdma_create_mq(struct ocrdma_dev *dev)
605 {
606 	int status;
607 
608 	/* Alloc completion queue for Mailbox queue */
609 	status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
610 				sizeof(struct ocrdma_mcqe));
611 	if (status)
612 		goto alloc_err;
613 
614 	dev->eq_tbl[0].cq_cnt++;
615 	status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
616 	if (status)
617 		goto mbx_cq_free;
618 
619 	memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
620 	init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
621 	mutex_init(&dev->mqe_ctx.lock);
622 
623 	/* Alloc Mailbox queue */
624 	status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
625 				sizeof(struct ocrdma_mqe));
626 	if (status)
627 		goto mbx_cq_destroy;
628 	status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
629 	if (status)
630 		goto mbx_q_free;
631 	ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
632 	return 0;
633 
634 mbx_q_free:
635 	ocrdma_free_q(dev, &dev->mq.sq);
636 mbx_cq_destroy:
637 	ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
638 mbx_cq_free:
639 	ocrdma_free_q(dev, &dev->mq.cq);
640 alloc_err:
641 	return status;
642 }
643 
644 static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
645 {
646 	struct ocrdma_queue_info *mbxq, *cq;
647 
648 	/* mqe_ctx lock synchronizes with any other pending cmds. */
649 	mutex_lock(&dev->mqe_ctx.lock);
650 	mbxq = &dev->mq.sq;
651 	if (mbxq->created) {
652 		ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
653 		ocrdma_free_q(dev, mbxq);
654 	}
655 	mutex_unlock(&dev->mqe_ctx.lock);
656 
657 	cq = &dev->mq.cq;
658 	if (cq->created) {
659 		ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
660 		ocrdma_free_q(dev, cq);
661 	}
662 }
663 
664 static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
665 				       struct ocrdma_qp *qp)
666 {
667 	enum ib_qp_state new_ib_qps = IB_QPS_ERR;
668 	enum ib_qp_state old_ib_qps;
669 
670 	if (qp == NULL)
671 		BUG();
672 	ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
673 }
674 
675 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
676 				    struct ocrdma_ae_mcqe *cqe)
677 {
678 	struct ocrdma_qp *qp = NULL;
679 	struct ocrdma_cq *cq = NULL;
680 	struct ib_event ib_evt;
681 	int cq_event = 0;
682 	int qp_event = 1;
683 	int srq_event = 0;
684 	int dev_event = 0;
685 	int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
686 	    OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
687 	u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
688 	u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
689 
690 	/*
691 	 * Some FW version returns wrong qp or cq ids in CQEs.
692 	 * Checking whether the IDs are valid
693 	 */
694 
695 	if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
696 		if (qpid < dev->attr.max_qp)
697 			qp = dev->qp_tbl[qpid];
698 		if (qp == NULL) {
699 			pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
700 			       dev->id, qpid);
701 			return;
702 		}
703 	}
704 
705 	if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
706 		if (cqid < dev->attr.max_cq)
707 			cq = dev->cq_tbl[cqid];
708 		if (cq == NULL) {
709 			pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
710 			       dev->id, cqid);
711 			return;
712 		}
713 	}
714 
715 	memset(&ib_evt, 0, sizeof(ib_evt));
716 
717 	ib_evt.device = &dev->ibdev;
718 
719 	switch (type) {
720 	case OCRDMA_CQ_ERROR:
721 		ib_evt.element.cq = &cq->ibcq;
722 		ib_evt.event = IB_EVENT_CQ_ERR;
723 		cq_event = 1;
724 		qp_event = 0;
725 		break;
726 	case OCRDMA_CQ_OVERRUN_ERROR:
727 		ib_evt.element.cq = &cq->ibcq;
728 		ib_evt.event = IB_EVENT_CQ_ERR;
729 		cq_event = 1;
730 		qp_event = 0;
731 		break;
732 	case OCRDMA_CQ_QPCAT_ERROR:
733 		ib_evt.element.qp = &qp->ibqp;
734 		ib_evt.event = IB_EVENT_QP_FATAL;
735 		ocrdma_process_qpcat_error(dev, qp);
736 		break;
737 	case OCRDMA_QP_ACCESS_ERROR:
738 		ib_evt.element.qp = &qp->ibqp;
739 		ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
740 		break;
741 	case OCRDMA_QP_COMM_EST_EVENT:
742 		ib_evt.element.qp = &qp->ibqp;
743 		ib_evt.event = IB_EVENT_COMM_EST;
744 		break;
745 	case OCRDMA_SQ_DRAINED_EVENT:
746 		ib_evt.element.qp = &qp->ibqp;
747 		ib_evt.event = IB_EVENT_SQ_DRAINED;
748 		break;
749 	case OCRDMA_DEVICE_FATAL_EVENT:
750 		ib_evt.element.port_num = 1;
751 		ib_evt.event = IB_EVENT_DEVICE_FATAL;
752 		qp_event = 0;
753 		dev_event = 1;
754 		break;
755 	case OCRDMA_SRQCAT_ERROR:
756 		ib_evt.element.srq = &qp->srq->ibsrq;
757 		ib_evt.event = IB_EVENT_SRQ_ERR;
758 		srq_event = 1;
759 		qp_event = 0;
760 		break;
761 	case OCRDMA_SRQ_LIMIT_EVENT:
762 		ib_evt.element.srq = &qp->srq->ibsrq;
763 		ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
764 		srq_event = 1;
765 		qp_event = 0;
766 		break;
767 	case OCRDMA_QP_LAST_WQE_EVENT:
768 		ib_evt.element.qp = &qp->ibqp;
769 		ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
770 		break;
771 	default:
772 		cq_event = 0;
773 		qp_event = 0;
774 		srq_event = 0;
775 		dev_event = 0;
776 		pr_err("%s() unknown type=0x%x\n", __func__, type);
777 		break;
778 	}
779 
780 	if (type < OCRDMA_MAX_ASYNC_ERRORS)
781 		atomic_inc(&dev->async_err_stats[type]);
782 
783 	if (qp_event) {
784 		if (qp->ibqp.event_handler)
785 			qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
786 	} else if (cq_event) {
787 		if (cq->ibcq.event_handler)
788 			cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
789 	} else if (srq_event) {
790 		if (qp->srq->ibsrq.event_handler)
791 			qp->srq->ibsrq.event_handler(&ib_evt,
792 						     qp->srq->ibsrq.
793 						     srq_context);
794 	} else if (dev_event) {
795 		pr_err("%s: Fatal event received\n", dev->ibdev.name);
796 		ib_dispatch_event(&ib_evt);
797 	}
798 
799 }
800 
801 static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
802 					struct ocrdma_ae_mcqe *cqe)
803 {
804 	struct ocrdma_ae_pvid_mcqe *evt;
805 	int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
806 			OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
807 
808 	switch (type) {
809 	case OCRDMA_ASYNC_EVENT_PVID_STATE:
810 		evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
811 		if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
812 			OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
813 			dev->pvid = ((evt->tag_enabled &
814 					OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
815 					OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
816 		break;
817 
818 	case OCRDMA_ASYNC_EVENT_COS_VALUE:
819 		atomic_set(&dev->update_sl, 1);
820 		break;
821 	default:
822 		/* Not interested evts. */
823 		break;
824 	}
825 }
826 
827 static void ocrdma_process_link_state(struct ocrdma_dev *dev,
828 				      struct ocrdma_ae_mcqe *cqe)
829 {
830 	struct ocrdma_ae_lnkst_mcqe *evt;
831 	u8 lstate;
832 
833 	evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
834 	lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
835 
836 	if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
837 		return;
838 
839 	if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
840 		ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
841 }
842 
843 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
844 {
845 	/* async CQE processing */
846 	struct ocrdma_ae_mcqe *cqe = ae_cqe;
847 	u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
848 			OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
849 	switch (evt_code) {
850 	case OCRDMA_ASYNC_LINK_EVE_CODE:
851 		ocrdma_process_link_state(dev, cqe);
852 		break;
853 	case OCRDMA_ASYNC_RDMA_EVE_CODE:
854 		ocrdma_dispatch_ibevent(dev, cqe);
855 		break;
856 	case OCRDMA_ASYNC_GRP5_EVE_CODE:
857 		ocrdma_process_grp5_aync(dev, cqe);
858 		break;
859 	default:
860 		pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
861 		       dev->id, evt_code);
862 	}
863 }
864 
865 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
866 {
867 	if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
868 		dev->mqe_ctx.cqe_status = (cqe->status &
869 		     OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
870 		dev->mqe_ctx.ext_status =
871 		    (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
872 		    >> OCRDMA_MCQE_ESTATUS_SHIFT;
873 		dev->mqe_ctx.cmd_done = true;
874 		wake_up(&dev->mqe_ctx.cmd_wait);
875 	} else
876 		pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
877 		       __func__, cqe->tag_lo, dev->mqe_ctx.tag);
878 }
879 
880 static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
881 {
882 	u16 cqe_popped = 0;
883 	struct ocrdma_mcqe *cqe;
884 
885 	while (1) {
886 		cqe = ocrdma_get_mcqe(dev);
887 		if (cqe == NULL)
888 			break;
889 		ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
890 		cqe_popped += 1;
891 		if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
892 			ocrdma_process_acqe(dev, cqe);
893 		else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
894 			ocrdma_process_mcqe(dev, cqe);
895 		memset(cqe, 0, sizeof(struct ocrdma_mcqe));
896 		ocrdma_mcq_inc_tail(dev);
897 	}
898 	ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
899 	return 0;
900 }
901 
902 static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
903 				struct ocrdma_cq *cq, bool sq)
904 {
905 	struct ocrdma_qp *qp;
906 	struct list_head *cur;
907 	struct ocrdma_cq *bcq = NULL;
908 	struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
909 
910 	list_for_each(cur, head) {
911 		if (sq)
912 			qp = list_entry(cur, struct ocrdma_qp, sq_entry);
913 		else
914 			qp = list_entry(cur, struct ocrdma_qp, rq_entry);
915 
916 		if (qp->srq)
917 			continue;
918 		/* if wq and rq share the same cq, than comp_handler
919 		 * is already invoked.
920 		 */
921 		if (qp->sq_cq == qp->rq_cq)
922 			continue;
923 		/* if completion came on sq, rq's cq is buddy cq.
924 		 * if completion came on rq, sq's cq is buddy cq.
925 		 */
926 		if (qp->sq_cq == cq)
927 			bcq = qp->rq_cq;
928 		else
929 			bcq = qp->sq_cq;
930 		return bcq;
931 	}
932 	return NULL;
933 }
934 
935 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
936 				       struct ocrdma_cq *cq)
937 {
938 	unsigned long flags;
939 	struct ocrdma_cq *bcq = NULL;
940 
941 	/* Go through list of QPs in error state which are using this CQ
942 	 * and invoke its callback handler to trigger CQE processing for
943 	 * error/flushed CQE. It is rare to find more than few entries in
944 	 * this list as most consumers stops after getting error CQE.
945 	 * List is traversed only once when a matching buddy cq found for a QP.
946 	 */
947 	spin_lock_irqsave(&dev->flush_q_lock, flags);
948 	/* Check if buddy CQ is present.
949 	 * true - Check for  SQ CQ
950 	 * false - Check for RQ CQ
951 	 */
952 	bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
953 	if (bcq == NULL)
954 		bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
955 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
956 
957 	/* if there is valid buddy cq, look for its completion handler */
958 	if (bcq && bcq->ibcq.comp_handler) {
959 		spin_lock_irqsave(&bcq->comp_handler_lock, flags);
960 		(*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
961 		spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
962 	}
963 }
964 
965 static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
966 {
967 	unsigned long flags;
968 	struct ocrdma_cq *cq;
969 
970 	if (cq_idx >= OCRDMA_MAX_CQ)
971 		BUG();
972 
973 	cq = dev->cq_tbl[cq_idx];
974 	if (cq == NULL)
975 		return;
976 
977 	if (cq->ibcq.comp_handler) {
978 		spin_lock_irqsave(&cq->comp_handler_lock, flags);
979 		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
980 		spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
981 	}
982 	ocrdma_qp_buddy_cq_handler(dev, cq);
983 }
984 
985 static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
986 {
987 	/* process the MQ-CQE. */
988 	if (cq_id == dev->mq.cq.id)
989 		ocrdma_mq_cq_handler(dev, cq_id);
990 	else
991 		ocrdma_qp_cq_handler(dev, cq_id);
992 }
993 
994 static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
995 {
996 	struct ocrdma_eq *eq = handle;
997 	struct ocrdma_dev *dev = eq->dev;
998 	struct ocrdma_eqe eqe;
999 	struct ocrdma_eqe *ptr;
1000 	u16 cq_id;
1001 	u8 mcode;
1002 	int budget = eq->cq_cnt;
1003 
1004 	do {
1005 		ptr = ocrdma_get_eqe(eq);
1006 		eqe = *ptr;
1007 		ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
1008 		mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
1009 				>> OCRDMA_EQE_MAJOR_CODE_SHIFT;
1010 		if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
1011 			pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
1012 			       eq->q.id, eqe.id_valid);
1013 		if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
1014 			break;
1015 
1016 		ptr->id_valid = 0;
1017 		/* ring eq doorbell as soon as its consumed. */
1018 		ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
1019 		/* check whether its CQE or not. */
1020 		if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
1021 			cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
1022 			ocrdma_cq_handler(dev, cq_id);
1023 		}
1024 		ocrdma_eq_inc_tail(eq);
1025 
1026 		/* There can be a stale EQE after the last bound CQ is
1027 		 * destroyed. EQE valid and budget == 0 implies this.
1028 		 */
1029 		if (budget)
1030 			budget--;
1031 
1032 	} while (budget);
1033 
1034 	eq->aic_obj.eq_intr_cnt++;
1035 	ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
1036 	return IRQ_HANDLED;
1037 }
1038 
1039 static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
1040 {
1041 	struct ocrdma_mqe *mqe;
1042 
1043 	dev->mqe_ctx.tag = dev->mq.sq.head;
1044 	dev->mqe_ctx.cmd_done = false;
1045 	mqe = ocrdma_get_mqe(dev);
1046 	cmd->hdr.tag_lo = dev->mq.sq.head;
1047 	ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
1048 	/* make sure descriptor is written before ringing doorbell */
1049 	wmb();
1050 	ocrdma_mq_inc_head(dev);
1051 	ocrdma_ring_mq_db(dev);
1052 }
1053 
1054 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
1055 {
1056 	long status;
1057 	/* 30 sec timeout */
1058 	status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
1059 				    (dev->mqe_ctx.cmd_done != false),
1060 				    msecs_to_jiffies(30000));
1061 	if (status)
1062 		return 0;
1063 	else {
1064 		dev->mqe_ctx.fw_error_state = true;
1065 		pr_err("%s(%d) mailbox timeout: fw not responding\n",
1066 		       __func__, dev->id);
1067 		return -1;
1068 	}
1069 }
1070 
1071 /* issue a mailbox command on the MQ */
1072 static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
1073 {
1074 	int status = 0;
1075 	u16 cqe_status, ext_status;
1076 	struct ocrdma_mqe *rsp_mqe;
1077 	struct ocrdma_mbx_rsp *rsp = NULL;
1078 
1079 	mutex_lock(&dev->mqe_ctx.lock);
1080 	if (dev->mqe_ctx.fw_error_state)
1081 		goto mbx_err;
1082 	ocrdma_post_mqe(dev, mqe);
1083 	status = ocrdma_wait_mqe_cmpl(dev);
1084 	if (status)
1085 		goto mbx_err;
1086 	cqe_status = dev->mqe_ctx.cqe_status;
1087 	ext_status = dev->mqe_ctx.ext_status;
1088 	rsp_mqe = ocrdma_get_mqe_rsp(dev);
1089 	ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
1090 	if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1091 				OCRDMA_MQE_HDR_EMB_SHIFT)
1092 		rsp = &mqe->u.rsp;
1093 
1094 	if (cqe_status || ext_status) {
1095 		pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
1096 		       __func__, cqe_status, ext_status);
1097 		if (rsp) {
1098 			/* This is for embedded cmds. */
1099 			pr_err("opcode=0x%x, subsystem=0x%x\n",
1100 			       (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1101 				OCRDMA_MBX_RSP_OPCODE_SHIFT,
1102 				(rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1103 				OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1104 		}
1105 		status = ocrdma_get_mbx_cqe_errno(cqe_status);
1106 		goto mbx_err;
1107 	}
1108 	/* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1109 	if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
1110 		status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
1111 mbx_err:
1112 	mutex_unlock(&dev->mqe_ctx.lock);
1113 	return status;
1114 }
1115 
1116 static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
1117 				 void *payload_va)
1118 {
1119 	int status;
1120 	struct ocrdma_mbx_rsp *rsp = payload_va;
1121 
1122 	if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1123 				OCRDMA_MQE_HDR_EMB_SHIFT)
1124 		BUG();
1125 
1126 	status = ocrdma_mbx_cmd(dev, mqe);
1127 	if (!status)
1128 		/* For non embedded, only CQE failures are handled in
1129 		 * ocrdma_mbx_cmd. We need to check for RSP errors.
1130 		 */
1131 		if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
1132 			status = ocrdma_get_mbx_errno(rsp->status);
1133 
1134 	if (status)
1135 		pr_err("opcode=0x%x, subsystem=0x%x\n",
1136 		       (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1137 			OCRDMA_MBX_RSP_OPCODE_SHIFT,
1138 			(rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1139 			OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1140 	return status;
1141 }
1142 
1143 static void ocrdma_get_attr(struct ocrdma_dev *dev,
1144 			      struct ocrdma_dev_attr *attr,
1145 			      struct ocrdma_mbx_query_config *rsp)
1146 {
1147 	attr->max_pd =
1148 	    (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1149 	    OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1150 	attr->udp_encap = (rsp->max_pd_ca_ack_delay &
1151 			   OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
1152 			   OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
1153 	attr->max_dpp_pds =
1154 	   (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1155 	    OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1156 	attr->max_qp =
1157 	    (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1158 	    OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
1159 	attr->max_srq =
1160 		(rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1161 		OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
1162 	attr->max_send_sge = ((rsp->max_recv_send_sge &
1163 			       OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1164 			      OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1165 	attr->max_recv_sge = (rsp->max_recv_send_sge &
1166 			      OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
1167 	    OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
1168 	attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1169 			      OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1170 	    OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1171 	attr->max_rdma_sge = (rsp->max_wr_rd_sge &
1172 			      OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
1173 	    OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
1174 	attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1175 				OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1176 	    OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1177 	attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1178 				OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1179 	    OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1180 	attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1181 				    OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1182 	    OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1183 	attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1184 			       OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1185 	    OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1186 	attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1187 				    OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1188 	    OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1189 	attr->max_mw = rsp->max_mw;
1190 	attr->max_mr = rsp->max_mr;
1191 	attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
1192 			      rsp->max_mr_size_lo;
1193 	attr->max_fmr = 0;
1194 	attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1195 	attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1196 	attr->max_cqe = rsp->max_cq_cqes_per_cq &
1197 			OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1198 	attr->max_cq = (rsp->max_cq_cqes_per_cq &
1199 			OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
1200 			OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
1201 	attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1202 		OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1203 		OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1204 		OCRDMA_WQE_STRIDE;
1205 	attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1206 		OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1207 		OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1208 		OCRDMA_WQE_STRIDE;
1209 	attr->max_inline_data =
1210 	    attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1211 			      sizeof(struct ocrdma_sge));
1212 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1213 		attr->ird = 1;
1214 		attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1215 		attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1216 	}
1217 	dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1218 		 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1219 	dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1220 		OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1221 }
1222 
1223 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1224 				   struct ocrdma_fw_conf_rsp *conf)
1225 {
1226 	u32 fn_mode;
1227 
1228 	fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1229 	if (fn_mode != OCRDMA_FN_MODE_RDMA)
1230 		return -EINVAL;
1231 	dev->base_eqid = conf->base_eqid;
1232 	dev->max_eq = conf->max_eq;
1233 	return 0;
1234 }
1235 
1236 /* can be issued only during init time. */
1237 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1238 {
1239 	int status = -ENOMEM;
1240 	struct ocrdma_mqe *cmd;
1241 	struct ocrdma_fw_ver_rsp *rsp;
1242 
1243 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1244 	if (!cmd)
1245 		return -ENOMEM;
1246 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1247 			OCRDMA_CMD_GET_FW_VER,
1248 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1249 
1250 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1251 	if (status)
1252 		goto mbx_err;
1253 	rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1254 	memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1255 	memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1256 	       sizeof(rsp->running_ver));
1257 	ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1258 mbx_err:
1259 	kfree(cmd);
1260 	return status;
1261 }
1262 
1263 /* can be issued only during init time. */
1264 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1265 {
1266 	int status = -ENOMEM;
1267 	struct ocrdma_mqe *cmd;
1268 	struct ocrdma_fw_conf_rsp *rsp;
1269 
1270 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1271 	if (!cmd)
1272 		return -ENOMEM;
1273 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1274 			OCRDMA_CMD_GET_FW_CONFIG,
1275 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1276 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1277 	if (status)
1278 		goto mbx_err;
1279 	rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1280 	status = ocrdma_check_fw_config(dev, rsp);
1281 mbx_err:
1282 	kfree(cmd);
1283 	return status;
1284 }
1285 
1286 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1287 {
1288 	struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1289 	struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
1290 	struct ocrdma_rdma_stats_resp *old_stats;
1291 	int status;
1292 
1293 	old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
1294 	if (old_stats == NULL)
1295 		return -ENOMEM;
1296 
1297 	memset(mqe, 0, sizeof(*mqe));
1298 	mqe->hdr.pyld_len = dev->stats_mem.size;
1299 	mqe->hdr.spcl_sge_cnt_emb |=
1300 			(1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1301 				OCRDMA_MQE_HDR_SGE_CNT_MASK;
1302 	mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1303 	mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1304 	mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1305 
1306 	/* Cache the old stats */
1307 	memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1308 	memset(req, 0, dev->stats_mem.size);
1309 
1310 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1311 			OCRDMA_CMD_GET_RDMA_STATS,
1312 			OCRDMA_SUBSYS_ROCE,
1313 			dev->stats_mem.size);
1314 	if (reset)
1315 		req->reset_stats = reset;
1316 
1317 	status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1318 	if (status)
1319 		/* Copy from cache, if mbox fails */
1320 		memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1321 	else
1322 		ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1323 
1324 	kfree(old_stats);
1325 	return status;
1326 }
1327 
1328 static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1329 {
1330 	int status = -ENOMEM;
1331 	struct ocrdma_dma_mem dma;
1332 	struct ocrdma_mqe *mqe;
1333 	struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1334 	struct mgmt_hba_attribs *hba_attribs;
1335 
1336 	mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
1337 	if (!mqe)
1338 		return status;
1339 
1340 	dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1341 	dma.va	 = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1342 					dma.size, &dma.pa, GFP_KERNEL);
1343 	if (!dma.va)
1344 		goto free_mqe;
1345 
1346 	mqe->hdr.pyld_len = dma.size;
1347 	mqe->hdr.spcl_sge_cnt_emb |=
1348 			(1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1349 			OCRDMA_MQE_HDR_SGE_CNT_MASK;
1350 	mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1351 	mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1352 	mqe->u.nonemb_req.sge[0].len = dma.size;
1353 
1354 	memset(dma.va, 0, dma.size);
1355 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1356 			OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1357 			OCRDMA_SUBSYS_COMMON,
1358 			dma.size);
1359 
1360 	status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1361 	if (!status) {
1362 		ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1363 		hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1364 
1365 		dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
1366 					OCRDMA_HBA_ATTRB_PTNUM_MASK)
1367 					>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
1368 		strncpy(dev->model_number,
1369 			hba_attribs->controller_model_number, 31);
1370 	}
1371 	dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1372 free_mqe:
1373 	kfree(mqe);
1374 	return status;
1375 }
1376 
1377 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1378 {
1379 	int status = -ENOMEM;
1380 	struct ocrdma_mbx_query_config *rsp;
1381 	struct ocrdma_mqe *cmd;
1382 
1383 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1384 	if (!cmd)
1385 		return status;
1386 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1387 	if (status)
1388 		goto mbx_err;
1389 	rsp = (struct ocrdma_mbx_query_config *)cmd;
1390 	ocrdma_get_attr(dev, &dev->attr, rsp);
1391 mbx_err:
1392 	kfree(cmd);
1393 	return status;
1394 }
1395 
1396 int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
1397 			      u8 *lnk_state)
1398 {
1399 	int status = -ENOMEM;
1400 	struct ocrdma_get_link_speed_rsp *rsp;
1401 	struct ocrdma_mqe *cmd;
1402 
1403 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1404 				  sizeof(*cmd));
1405 	if (!cmd)
1406 		return status;
1407 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1408 			OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1409 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1410 
1411 	((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
1412 
1413 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1414 	if (status)
1415 		goto mbx_err;
1416 
1417 	rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
1418 	if (lnk_speed)
1419 		*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1420 			      >> OCRDMA_PHY_PS_SHIFT;
1421 	if (lnk_state)
1422 		*lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
1423 
1424 mbx_err:
1425 	kfree(cmd);
1426 	return status;
1427 }
1428 
1429 static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1430 {
1431 	int status = -ENOMEM;
1432 	struct ocrdma_mqe *cmd;
1433 	struct ocrdma_get_phy_info_rsp *rsp;
1434 
1435 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1436 	if (!cmd)
1437 		return status;
1438 
1439 	ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1440 			OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1441 			sizeof(*cmd));
1442 
1443 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1444 	if (status)
1445 		goto mbx_err;
1446 
1447 	rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
1448 	dev->phy.phy_type =
1449 			(rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
1450 	dev->phy.interface_type =
1451 			(rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
1452 				>> OCRDMA_IF_TYPE_SHIFT;
1453 	dev->phy.auto_speeds_supported  =
1454 			(rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
1455 	dev->phy.fixed_speeds_supported =
1456 			(rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
1457 				>> OCRDMA_FSPEED_SUPP_SHIFT;
1458 mbx_err:
1459 	kfree(cmd);
1460 	return status;
1461 }
1462 
1463 int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1464 {
1465 	int status = -ENOMEM;
1466 	struct ocrdma_alloc_pd *cmd;
1467 	struct ocrdma_alloc_pd_rsp *rsp;
1468 
1469 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1470 	if (!cmd)
1471 		return status;
1472 	if (pd->dpp_enabled)
1473 		cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1474 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1475 	if (status)
1476 		goto mbx_err;
1477 	rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1478 	pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1479 	if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1480 		pd->dpp_enabled = true;
1481 		pd->dpp_page = rsp->dpp_page_pdid >>
1482 				OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1483 	} else {
1484 		pd->dpp_enabled = false;
1485 		pd->num_dpp_qp = 0;
1486 	}
1487 mbx_err:
1488 	kfree(cmd);
1489 	return status;
1490 }
1491 
1492 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1493 {
1494 	int status = -ENOMEM;
1495 	struct ocrdma_dealloc_pd *cmd;
1496 
1497 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1498 	if (!cmd)
1499 		return status;
1500 	cmd->id = pd->id;
1501 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1502 	kfree(cmd);
1503 	return status;
1504 }
1505 
1506 
1507 static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1508 {
1509 	int status = -ENOMEM;
1510 	size_t pd_bitmap_size;
1511 	struct ocrdma_alloc_pd_range *cmd;
1512 	struct ocrdma_alloc_pd_range_rsp *rsp;
1513 
1514 	/* Pre allocate the DPP PDs */
1515 	if (dev->attr.max_dpp_pds) {
1516 		cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1517 					  sizeof(*cmd));
1518 		if (!cmd)
1519 			return -ENOMEM;
1520 		cmd->pd_count = dev->attr.max_dpp_pds;
1521 		cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1522 		status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1523 		rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1524 
1525 		if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1526 		    rsp->pd_count) {
1527 			dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1528 					OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1529 			dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1530 					OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1531 			dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1532 			pd_bitmap_size =
1533 				BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1534 			dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1535 							     GFP_KERNEL);
1536 		}
1537 		kfree(cmd);
1538 	}
1539 
1540 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1541 	if (!cmd)
1542 		return -ENOMEM;
1543 
1544 	cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1545 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1546 	rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1547 	if (!status && rsp->pd_count) {
1548 		dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1549 					OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1550 		dev->pd_mgr->max_normal_pd = rsp->pd_count;
1551 		pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1552 		dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1553 						      GFP_KERNEL);
1554 	}
1555 	kfree(cmd);
1556 
1557 	if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1558 		/* Enable PD resource manager */
1559 		dev->pd_mgr->pd_prealloc_valid = true;
1560 		return 0;
1561 	}
1562 	return status;
1563 }
1564 
1565 static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1566 {
1567 	struct ocrdma_dealloc_pd_range *cmd;
1568 
1569 	/* return normal PDs to firmware */
1570 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1571 	if (!cmd)
1572 		goto mbx_err;
1573 
1574 	if (dev->pd_mgr->max_normal_pd) {
1575 		cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1576 		cmd->pd_count = dev->pd_mgr->max_normal_pd;
1577 		ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1578 	}
1579 
1580 	if (dev->pd_mgr->max_dpp_pd) {
1581 		kfree(cmd);
1582 		/* return DPP PDs to firmware */
1583 		cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1584 					  sizeof(*cmd));
1585 		if (!cmd)
1586 			goto mbx_err;
1587 
1588 		cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1589 		cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1590 		ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1591 	}
1592 mbx_err:
1593 	kfree(cmd);
1594 }
1595 
1596 void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1597 {
1598 	int status;
1599 
1600 	dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1601 			      GFP_KERNEL);
1602 	if (!dev->pd_mgr)
1603 		return;
1604 
1605 	status = ocrdma_mbx_alloc_pd_range(dev);
1606 	if (status) {
1607 		pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1608 			 __func__, dev->id);
1609 	}
1610 }
1611 
1612 static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1613 {
1614 	ocrdma_mbx_dealloc_pd_range(dev);
1615 	kfree(dev->pd_mgr->pd_norm_bitmap);
1616 	kfree(dev->pd_mgr->pd_dpp_bitmap);
1617 	kfree(dev->pd_mgr);
1618 }
1619 
1620 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1621 			       int *num_pages, int *page_size)
1622 {
1623 	int i;
1624 	int mem_size;
1625 
1626 	*num_entries = roundup_pow_of_two(*num_entries);
1627 	mem_size = *num_entries * entry_size;
1628 	/* find the possible lowest possible multiplier */
1629 	for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1630 		if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1631 			break;
1632 	}
1633 	if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1634 		return -EINVAL;
1635 	mem_size = roundup(mem_size,
1636 		       ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1637 	*num_pages =
1638 	    mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1639 	*page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1640 	*num_entries = mem_size / entry_size;
1641 	return 0;
1642 }
1643 
1644 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1645 {
1646 	int i;
1647 	int status = -ENOMEM;
1648 	int max_ah;
1649 	struct ocrdma_create_ah_tbl *cmd;
1650 	struct ocrdma_create_ah_tbl_rsp *rsp;
1651 	struct pci_dev *pdev = dev->nic_info.pdev;
1652 	dma_addr_t pa;
1653 	struct ocrdma_pbe *pbes;
1654 
1655 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1656 	if (!cmd)
1657 		return status;
1658 
1659 	max_ah = OCRDMA_MAX_AH;
1660 	dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1661 
1662 	/* number of PBEs in PBL */
1663 	cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1664 				OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1665 				OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1666 
1667 	/* page size */
1668 	for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1669 		if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1670 			break;
1671 	}
1672 	cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1673 				OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1674 
1675 	/* ah_entry size */
1676 	cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1677 				OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1678 				OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1679 
1680 	dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1681 						&dev->av_tbl.pbl.pa,
1682 						GFP_KERNEL);
1683 	if (dev->av_tbl.pbl.va == NULL)
1684 		goto mem_err;
1685 
1686 	dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1687 					    &pa, GFP_KERNEL);
1688 	if (dev->av_tbl.va == NULL)
1689 		goto mem_err_ah;
1690 	dev->av_tbl.pa = pa;
1691 	dev->av_tbl.num_ah = max_ah;
1692 	memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1693 
1694 	pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1695 	for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1696 		pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
1697 		pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
1698 		pa += PAGE_SIZE;
1699 	}
1700 	cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1701 	cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1702 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1703 	if (status)
1704 		goto mbx_err;
1705 	rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1706 	dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1707 	kfree(cmd);
1708 	return 0;
1709 
1710 mbx_err:
1711 	dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1712 			  dev->av_tbl.pa);
1713 	dev->av_tbl.va = NULL;
1714 mem_err_ah:
1715 	dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1716 			  dev->av_tbl.pbl.pa);
1717 	dev->av_tbl.pbl.va = NULL;
1718 	dev->av_tbl.size = 0;
1719 mem_err:
1720 	kfree(cmd);
1721 	return status;
1722 }
1723 
1724 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1725 {
1726 	struct ocrdma_delete_ah_tbl *cmd;
1727 	struct pci_dev *pdev = dev->nic_info.pdev;
1728 
1729 	if (dev->av_tbl.va == NULL)
1730 		return;
1731 
1732 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1733 	if (!cmd)
1734 		return;
1735 	cmd->ahid = dev->av_tbl.ahid;
1736 
1737 	ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1738 	dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1739 			  dev->av_tbl.pa);
1740 	dev->av_tbl.va = NULL;
1741 	dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1742 			  dev->av_tbl.pbl.pa);
1743 	kfree(cmd);
1744 }
1745 
1746 /* Multiple CQs uses the EQ. This routine returns least used
1747  * EQ to associate with CQ. This will distributes the interrupt
1748  * processing and CPU load to associated EQ, vector and so to that CPU.
1749  */
1750 static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1751 {
1752 	int i, selected_eq = 0, cq_cnt = 0;
1753 	u16 eq_id;
1754 
1755 	mutex_lock(&dev->dev_lock);
1756 	cq_cnt = dev->eq_tbl[0].cq_cnt;
1757 	eq_id = dev->eq_tbl[0].q.id;
1758 	/* find the EQ which is has the least number of
1759 	 * CQs associated with it.
1760 	 */
1761 	for (i = 0; i < dev->eq_cnt; i++) {
1762 		if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
1763 			cq_cnt = dev->eq_tbl[i].cq_cnt;
1764 			eq_id = dev->eq_tbl[i].q.id;
1765 			selected_eq = i;
1766 		}
1767 	}
1768 	dev->eq_tbl[selected_eq].cq_cnt += 1;
1769 	mutex_unlock(&dev->dev_lock);
1770 	return eq_id;
1771 }
1772 
1773 static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1774 {
1775 	int i;
1776 
1777 	mutex_lock(&dev->dev_lock);
1778 	i = ocrdma_get_eq_table_index(dev, eq_id);
1779 	if (i == -EINVAL)
1780 		BUG();
1781 	dev->eq_tbl[i].cq_cnt -= 1;
1782 	mutex_unlock(&dev->dev_lock);
1783 }
1784 
1785 int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1786 			 int entries, int dpp_cq, u16 pd_id)
1787 {
1788 	int status = -ENOMEM; int max_hw_cqe;
1789 	struct pci_dev *pdev = dev->nic_info.pdev;
1790 	struct ocrdma_create_cq *cmd;
1791 	struct ocrdma_create_cq_rsp *rsp;
1792 	u32 hw_pages, cqe_size, page_size, cqe_count;
1793 
1794 	if (entries > dev->attr.max_cqe) {
1795 		pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1796 		       __func__, dev->id, dev->attr.max_cqe, entries);
1797 		return -EINVAL;
1798 	}
1799 	if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
1800 		return -EINVAL;
1801 
1802 	if (dpp_cq) {
1803 		cq->max_hw_cqe = 1;
1804 		max_hw_cqe = 1;
1805 		cqe_size = OCRDMA_DPP_CQE_SIZE;
1806 		hw_pages = 1;
1807 	} else {
1808 		cq->max_hw_cqe = dev->attr.max_cqe;
1809 		max_hw_cqe = dev->attr.max_cqe;
1810 		cqe_size = sizeof(struct ocrdma_cqe);
1811 		hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1812 	}
1813 
1814 	cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1815 
1816 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1817 	if (!cmd)
1818 		return -ENOMEM;
1819 	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1820 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1821 	cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1822 	if (!cq->va) {
1823 		status = -ENOMEM;
1824 		goto mem_err;
1825 	}
1826 	page_size = cq->len / hw_pages;
1827 	cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1828 					OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1829 	cmd->cmd.pgsz_pgcnt |= hw_pages;
1830 	cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1831 
1832 	cq->eqn = ocrdma_bind_eq(dev);
1833 	cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
1834 	cqe_count = cq->len / cqe_size;
1835 	cq->cqe_cnt = cqe_count;
1836 	if (cqe_count > 1024) {
1837 		/* Set cnt to 3 to indicate more than 1024 cq entries */
1838 		cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1839 	} else {
1840 		u8 count = 0;
1841 		switch (cqe_count) {
1842 		case 256:
1843 			count = 0;
1844 			break;
1845 		case 512:
1846 			count = 1;
1847 			break;
1848 		case 1024:
1849 			count = 2;
1850 			break;
1851 		default:
1852 			goto mbx_err;
1853 		}
1854 		cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1855 	}
1856 	/* shared eq between all the consumer cqs. */
1857 	cmd->cmd.eqn = cq->eqn;
1858 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1859 		if (dpp_cq)
1860 			cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1861 				OCRDMA_CREATE_CQ_TYPE_SHIFT;
1862 		cq->phase_change = false;
1863 		cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
1864 	} else {
1865 		cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
1866 		cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1867 		cq->phase_change = true;
1868 	}
1869 
1870 	/* pd_id valid only for v3 */
1871 	cmd->cmd.pdid_cqecnt |= (pd_id <<
1872 		OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
1873 	ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1874 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1875 	if (status)
1876 		goto mbx_err;
1877 
1878 	rsp = (struct ocrdma_create_cq_rsp *)cmd;
1879 	cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1880 	kfree(cmd);
1881 	return 0;
1882 mbx_err:
1883 	ocrdma_unbind_eq(dev, cq->eqn);
1884 	dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1885 mem_err:
1886 	kfree(cmd);
1887 	return status;
1888 }
1889 
1890 int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1891 {
1892 	int status = -ENOMEM;
1893 	struct ocrdma_destroy_cq *cmd;
1894 
1895 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1896 	if (!cmd)
1897 		return status;
1898 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1899 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1900 
1901 	cmd->bypass_flush_qid |=
1902 	    (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1903 	    OCRDMA_DESTROY_CQ_QID_MASK;
1904 
1905 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1906 	ocrdma_unbind_eq(dev, cq->eqn);
1907 	dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1908 	kfree(cmd);
1909 	return status;
1910 }
1911 
1912 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1913 			  u32 pdid, int addr_check)
1914 {
1915 	int status = -ENOMEM;
1916 	struct ocrdma_alloc_lkey *cmd;
1917 	struct ocrdma_alloc_lkey_rsp *rsp;
1918 
1919 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1920 	if (!cmd)
1921 		return status;
1922 	cmd->pdid = pdid;
1923 	cmd->pbl_sz_flags |= addr_check;
1924 	cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1925 	cmd->pbl_sz_flags |=
1926 	    (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1927 	cmd->pbl_sz_flags |=
1928 	    (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1929 	cmd->pbl_sz_flags |=
1930 	    (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1931 	cmd->pbl_sz_flags |=
1932 	    (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1933 	cmd->pbl_sz_flags |=
1934 	    (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1935 
1936 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1937 	if (status)
1938 		goto mbx_err;
1939 	rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1940 	hwmr->lkey = rsp->lrkey;
1941 mbx_err:
1942 	kfree(cmd);
1943 	return status;
1944 }
1945 
1946 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1947 {
1948 	int status;
1949 	struct ocrdma_dealloc_lkey *cmd;
1950 
1951 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1952 	if (!cmd)
1953 		return -ENOMEM;
1954 	cmd->lkey = lkey;
1955 	cmd->rsvd_frmr = fr_mr ? 1 : 0;
1956 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1957 
1958 	kfree(cmd);
1959 	return status;
1960 }
1961 
1962 static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1963 			     u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1964 {
1965 	int status = -ENOMEM;
1966 	int i;
1967 	struct ocrdma_reg_nsmr *cmd;
1968 	struct ocrdma_reg_nsmr_rsp *rsp;
1969 
1970 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1971 	if (!cmd)
1972 		return -ENOMEM;
1973 	cmd->num_pbl_pdid =
1974 	    pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1975 	cmd->fr_mr = hwmr->fr_mr;
1976 
1977 	cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1978 				    OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1979 	cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1980 				    OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1981 	cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1982 				    OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1983 	cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1984 				    OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1985 	cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1986 				    OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1987 	cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1988 
1989 	cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1990 	cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1991 					OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1992 	cmd->totlen_low = hwmr->len;
1993 	cmd->totlen_high = upper_32_bits(hwmr->len);
1994 	cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1995 	cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1996 	cmd->va_loaddr = (u32) hwmr->va;
1997 	cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1998 
1999 	for (i = 0; i < pbl_cnt; i++) {
2000 		cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
2001 		cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
2002 	}
2003 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2004 	if (status)
2005 		goto mbx_err;
2006 	rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
2007 	hwmr->lkey = rsp->lrkey;
2008 mbx_err:
2009 	kfree(cmd);
2010 	return status;
2011 }
2012 
2013 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
2014 				  struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
2015 				  u32 pbl_offset, u32 last)
2016 {
2017 	int status;
2018 	int i;
2019 	struct ocrdma_reg_nsmr_cont *cmd;
2020 
2021 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
2022 	if (!cmd)
2023 		return -ENOMEM;
2024 	cmd->lrkey = hwmr->lkey;
2025 	cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
2026 	    (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
2027 	cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
2028 
2029 	for (i = 0; i < pbl_cnt; i++) {
2030 		cmd->pbl[i].lo =
2031 		    (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
2032 		cmd->pbl[i].hi =
2033 		    upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
2034 	}
2035 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2036 
2037 	kfree(cmd);
2038 	return status;
2039 }
2040 
2041 int ocrdma_reg_mr(struct ocrdma_dev *dev,
2042 		  struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
2043 {
2044 	int status;
2045 	u32 last = 0;
2046 	u32 cur_pbl_cnt, pbl_offset;
2047 	u32 pending_pbl_cnt = hwmr->num_pbls;
2048 
2049 	pbl_offset = 0;
2050 	cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2051 	if (cur_pbl_cnt == pending_pbl_cnt)
2052 		last = 1;
2053 
2054 	status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
2055 				   cur_pbl_cnt, hwmr->pbe_size, last);
2056 	if (status) {
2057 		pr_err("%s() status=%d\n", __func__, status);
2058 		return status;
2059 	}
2060 	/* if there is no more pbls to register then exit. */
2061 	if (last)
2062 		return 0;
2063 
2064 	while (!last) {
2065 		pbl_offset += cur_pbl_cnt;
2066 		pending_pbl_cnt -= cur_pbl_cnt;
2067 		cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2068 		/* if we reach the end of the pbls, then need to set the last
2069 		 * bit, indicating no more pbls to register for this memory key.
2070 		 */
2071 		if (cur_pbl_cnt == pending_pbl_cnt)
2072 			last = 1;
2073 
2074 		status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
2075 						pbl_offset, last);
2076 		if (status)
2077 			break;
2078 	}
2079 	if (status)
2080 		pr_err("%s() err. status=%d\n", __func__, status);
2081 
2082 	return status;
2083 }
2084 
2085 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2086 {
2087 	struct ocrdma_qp *tmp;
2088 	bool found = false;
2089 	list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
2090 		if (qp == tmp) {
2091 			found = true;
2092 			break;
2093 		}
2094 	}
2095 	return found;
2096 }
2097 
2098 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2099 {
2100 	struct ocrdma_qp *tmp;
2101 	bool found = false;
2102 	list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
2103 		if (qp == tmp) {
2104 			found = true;
2105 			break;
2106 		}
2107 	}
2108 	return found;
2109 }
2110 
2111 void ocrdma_flush_qp(struct ocrdma_qp *qp)
2112 {
2113 	bool found;
2114 	unsigned long flags;
2115 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2116 
2117 	spin_lock_irqsave(&dev->flush_q_lock, flags);
2118 	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
2119 	if (!found)
2120 		list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
2121 	if (!qp->srq) {
2122 		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
2123 		if (!found)
2124 			list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
2125 	}
2126 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2127 }
2128 
2129 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
2130 {
2131 	qp->sq.head = 0;
2132 	qp->sq.tail = 0;
2133 	qp->rq.head = 0;
2134 	qp->rq.tail = 0;
2135 }
2136 
2137 int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
2138 			   enum ib_qp_state *old_ib_state)
2139 {
2140 	unsigned long flags;
2141 	enum ocrdma_qp_state new_state;
2142 	new_state = get_ocrdma_qp_state(new_ib_state);
2143 
2144 	/* sync with wqe and rqe posting */
2145 	spin_lock_irqsave(&qp->q_lock, flags);
2146 
2147 	if (old_ib_state)
2148 		*old_ib_state = get_ibqp_state(qp->state);
2149 	if (new_state == qp->state) {
2150 		spin_unlock_irqrestore(&qp->q_lock, flags);
2151 		return 1;
2152 	}
2153 
2154 
2155 	if (new_state == OCRDMA_QPS_INIT) {
2156 		ocrdma_init_hwq_ptr(qp);
2157 		ocrdma_del_flush_qp(qp);
2158 	} else if (new_state == OCRDMA_QPS_ERR) {
2159 		ocrdma_flush_qp(qp);
2160 	}
2161 
2162 	qp->state = new_state;
2163 
2164 	spin_unlock_irqrestore(&qp->q_lock, flags);
2165 	return 0;
2166 }
2167 
2168 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
2169 {
2170 	u32 flags = 0;
2171 	if (qp->cap_flags & OCRDMA_QP_INB_RD)
2172 		flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
2173 	if (qp->cap_flags & OCRDMA_QP_INB_WR)
2174 		flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
2175 	if (qp->cap_flags & OCRDMA_QP_MW_BIND)
2176 		flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
2177 	if (qp->cap_flags & OCRDMA_QP_LKEY0)
2178 		flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
2179 	if (qp->cap_flags & OCRDMA_QP_FAST_REG)
2180 		flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
2181 	return flags;
2182 }
2183 
2184 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2185 					struct ib_qp_init_attr *attrs,
2186 					struct ocrdma_qp *qp)
2187 {
2188 	int status;
2189 	u32 len, hw_pages, hw_page_size;
2190 	dma_addr_t pa;
2191 	struct ocrdma_pd *pd = qp->pd;
2192 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2193 	struct pci_dev *pdev = dev->nic_info.pdev;
2194 	u32 max_wqe_allocated;
2195 	u32 max_sges = attrs->cap.max_send_sge;
2196 
2197 	/* QP1 may exceed 127 */
2198 	max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
2199 				dev->attr.max_wqe);
2200 
2201 	status = ocrdma_build_q_conf(&max_wqe_allocated,
2202 		dev->attr.wqe_size, &hw_pages, &hw_page_size);
2203 	if (status) {
2204 		pr_err("%s() req. max_send_wr=0x%x\n", __func__,
2205 		       max_wqe_allocated);
2206 		return -EINVAL;
2207 	}
2208 	qp->sq.max_cnt = max_wqe_allocated;
2209 	len = (hw_pages * hw_page_size);
2210 
2211 	qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2212 	if (!qp->sq.va)
2213 		return -EINVAL;
2214 	qp->sq.len = len;
2215 	qp->sq.pa = pa;
2216 	qp->sq.entry_size = dev->attr.wqe_size;
2217 	ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
2218 
2219 	cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2220 				<< OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
2221 	cmd->num_wq_rq_pages |= (hw_pages <<
2222 				 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
2223 	    OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
2224 	cmd->max_sge_send_write |= (max_sges <<
2225 				    OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
2226 	    OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
2227 	cmd->max_sge_send_write |= (max_sges <<
2228 				    OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
2229 					OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
2230 	cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
2231 			     OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
2232 				OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
2233 	cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
2234 			      OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
2235 				OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
2236 	return 0;
2237 }
2238 
2239 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2240 					struct ib_qp_init_attr *attrs,
2241 					struct ocrdma_qp *qp)
2242 {
2243 	int status;
2244 	u32 len, hw_pages, hw_page_size;
2245 	dma_addr_t pa = 0;
2246 	struct ocrdma_pd *pd = qp->pd;
2247 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2248 	struct pci_dev *pdev = dev->nic_info.pdev;
2249 	u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2250 
2251 	status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
2252 				     &hw_pages, &hw_page_size);
2253 	if (status) {
2254 		pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
2255 		       attrs->cap.max_recv_wr + 1);
2256 		return status;
2257 	}
2258 	qp->rq.max_cnt = max_rqe_allocated;
2259 	len = (hw_pages * hw_page_size);
2260 
2261 	qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2262 	if (!qp->rq.va)
2263 		return -ENOMEM;
2264 	qp->rq.pa = pa;
2265 	qp->rq.len = len;
2266 	qp->rq.entry_size = dev->attr.rqe_size;
2267 
2268 	ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2269 	cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
2270 		OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
2271 	cmd->num_wq_rq_pages |=
2272 	    (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
2273 	    OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
2274 	cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
2275 				OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
2276 				OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
2277 	cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
2278 				OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
2279 				OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
2280 	cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
2281 			OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
2282 			OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
2283 	return 0;
2284 }
2285 
2286 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2287 					 struct ocrdma_pd *pd,
2288 					 struct ocrdma_qp *qp,
2289 					 u8 enable_dpp_cq, u16 dpp_cq_id)
2290 {
2291 	pd->num_dpp_qp--;
2292 	qp->dpp_enabled = true;
2293 	cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2294 	if (!enable_dpp_cq)
2295 		return;
2296 	cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2297 	cmd->dpp_credits_cqid = dpp_cq_id;
2298 	cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
2299 					OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
2300 }
2301 
2302 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2303 					struct ocrdma_qp *qp)
2304 {
2305 	struct ocrdma_pd *pd = qp->pd;
2306 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2307 	struct pci_dev *pdev = dev->nic_info.pdev;
2308 	dma_addr_t pa = 0;
2309 	int ird_page_size = dev->attr.ird_page_size;
2310 	int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
2311 	struct ocrdma_hdr_wqe *rqe;
2312 	int i  = 0;
2313 
2314 	if (dev->attr.ird == 0)
2315 		return 0;
2316 
2317 	qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
2318 					   GFP_KERNEL);
2319 	if (!qp->ird_q_va)
2320 		return -ENOMEM;
2321 	ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
2322 			     pa, ird_page_size);
2323 	for (; i < ird_q_len / dev->attr.rqe_size; i++) {
2324 		rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
2325 			(i * dev->attr.rqe_size));
2326 		rqe->cw = 0;
2327 		rqe->cw |= 2;
2328 		rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2329 		rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
2330 		rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
2331 	}
2332 	return 0;
2333 }
2334 
2335 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
2336 				     struct ocrdma_qp *qp,
2337 				     struct ib_qp_init_attr *attrs,
2338 				     u16 *dpp_offset, u16 *dpp_credit_lmt)
2339 {
2340 	u32 max_wqe_allocated, max_rqe_allocated;
2341 	qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
2342 	qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
2343 	qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
2344 	qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
2345 	qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
2346 	qp->dpp_enabled = false;
2347 	if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
2348 		qp->dpp_enabled = true;
2349 		*dpp_credit_lmt = (rsp->dpp_response &
2350 				OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
2351 				OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
2352 		*dpp_offset = (rsp->dpp_response &
2353 				OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
2354 				OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
2355 	}
2356 	max_wqe_allocated =
2357 		rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
2358 	max_wqe_allocated = 1 << max_wqe_allocated;
2359 	max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
2360 
2361 	qp->sq.max_cnt = max_wqe_allocated;
2362 	qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2363 
2364 	if (!attrs->srq) {
2365 		qp->rq.max_cnt = max_rqe_allocated;
2366 		qp->rq.max_wqe_idx = max_rqe_allocated - 1;
2367 	}
2368 }
2369 
2370 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2371 			 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2372 			 u16 *dpp_credit_lmt)
2373 {
2374 	int status = -ENOMEM;
2375 	u32 flags = 0;
2376 	struct ocrdma_pd *pd = qp->pd;
2377 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2378 	struct pci_dev *pdev = dev->nic_info.pdev;
2379 	struct ocrdma_cq *cq;
2380 	struct ocrdma_create_qp_req *cmd;
2381 	struct ocrdma_create_qp_rsp *rsp;
2382 	int qptype;
2383 
2384 	switch (attrs->qp_type) {
2385 	case IB_QPT_GSI:
2386 		qptype = OCRDMA_QPT_GSI;
2387 		break;
2388 	case IB_QPT_RC:
2389 		qptype = OCRDMA_QPT_RC;
2390 		break;
2391 	case IB_QPT_UD:
2392 		qptype = OCRDMA_QPT_UD;
2393 		break;
2394 	default:
2395 		return -EINVAL;
2396 	}
2397 
2398 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2399 	if (!cmd)
2400 		return status;
2401 	cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2402 						OCRDMA_CREATE_QP_REQ_QPT_MASK;
2403 	status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2404 	if (status)
2405 		goto sq_err;
2406 
2407 	if (attrs->srq) {
2408 		struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2409 		cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2410 		cmd->rq_addr[0].lo = srq->id;
2411 		qp->srq = srq;
2412 	} else {
2413 		status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2414 		if (status)
2415 			goto rq_err;
2416 	}
2417 
2418 	status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2419 	if (status)
2420 		goto mbx_err;
2421 
2422 	cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2423 				OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2424 
2425 	flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2426 
2427 	cmd->max_sge_recv_flags |= flags;
2428 	cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2429 			     OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2430 				OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2431 	cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2432 			     OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2433 				OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2434 	cq = get_ocrdma_cq(attrs->send_cq);
2435 	cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2436 				OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2437 	qp->sq_cq = cq;
2438 	cq = get_ocrdma_cq(attrs->recv_cq);
2439 	cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2440 				OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2441 	qp->rq_cq = cq;
2442 
2443 	if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2444 	    (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
2445 		ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2446 					     dpp_cq_id);
2447 	}
2448 
2449 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2450 	if (status)
2451 		goto mbx_err;
2452 	rsp = (struct ocrdma_create_qp_rsp *)cmd;
2453 	ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2454 	qp->state = OCRDMA_QPS_RST;
2455 	kfree(cmd);
2456 	return 0;
2457 mbx_err:
2458 	if (qp->rq.va)
2459 		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2460 rq_err:
2461 	pr_err("%s(%d) rq_err\n", __func__, dev->id);
2462 	dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2463 sq_err:
2464 	pr_err("%s(%d) sq_err\n", __func__, dev->id);
2465 	kfree(cmd);
2466 	return status;
2467 }
2468 
2469 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2470 			struct ocrdma_qp_params *param)
2471 {
2472 	int status = -ENOMEM;
2473 	struct ocrdma_query_qp *cmd;
2474 	struct ocrdma_query_qp_rsp *rsp;
2475 
2476 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
2477 	if (!cmd)
2478 		return status;
2479 	cmd->qp_id = qp->id;
2480 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2481 	if (status)
2482 		goto mbx_err;
2483 	rsp = (struct ocrdma_query_qp_rsp *)cmd;
2484 	memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2485 mbx_err:
2486 	kfree(cmd);
2487 	return status;
2488 }
2489 
2490 static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2491 				struct ocrdma_modify_qp *cmd,
2492 				struct ib_qp_attr *attrs,
2493 				int attr_mask)
2494 {
2495 	int status;
2496 	struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
2497 	union ib_gid sgid;
2498 	struct ib_gid_attr sgid_attr;
2499 	u32 vlan_id = 0xFFFF;
2500 	u8 mac_addr[6], hdr_type;
2501 	union {
2502 		struct sockaddr     _sockaddr;
2503 		struct sockaddr_in  _sockaddr_in;
2504 		struct sockaddr_in6 _sockaddr_in6;
2505 	} sgid_addr, dgid_addr;
2506 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2507 	const struct ib_global_route *grh;
2508 
2509 	if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) == 0)
2510 		return -EINVAL;
2511 	grh = rdma_ah_read_grh(ah_attr);
2512 	if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2513 		ocrdma_init_service_level(dev);
2514 	cmd->params.tclass_sq_psn |=
2515 	    (grh->traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2516 	cmd->params.rnt_rc_sl_fl |=
2517 	    (grh->flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2518 	cmd->params.rnt_rc_sl_fl |= (rdma_ah_get_sl(ah_attr) <<
2519 				     OCRDMA_QP_PARAMS_SL_SHIFT);
2520 	cmd->params.hop_lmt_rq_psn |=
2521 	    (grh->hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2522 	cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2523 
2524 	/* GIDs */
2525 	memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
2526 	       sizeof(cmd->params.dgid));
2527 
2528 	status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index,
2529 				   &sgid, &sgid_attr);
2530 	if (!status) {
2531 		vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
2532 		memcpy(mac_addr, sgid_attr.ndev->dev_addr, ETH_ALEN);
2533 		dev_put(sgid_attr.ndev);
2534 	}
2535 
2536 	qp->sgid_idx = grh->sgid_index;
2537 	memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2538 	status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2539 	if (status)
2540 		return status;
2541 	cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2542 				(mac_addr[2] << 16) | (mac_addr[3] << 24);
2543 
2544 	hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
2545 	if (hdr_type == RDMA_NETWORK_IPV4) {
2546 		rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
2547 		rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
2548 		memcpy(&cmd->params.dgid[0],
2549 		       &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
2550 		memcpy(&cmd->params.sgid[0],
2551 		       &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
2552 	}
2553 	/* convert them to LE format. */
2554 	ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2555 	ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2556 	cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2557 
2558 	if (vlan_id == 0xFFFF)
2559 		vlan_id = 0;
2560 	if (vlan_id || dev->pfc_state) {
2561 		if (!vlan_id) {
2562 			pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2563 			       dev->id);
2564 			pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2565 			       dev->id);
2566 		}
2567 		cmd->params.vlan_dmac_b4_to_b5 |=
2568 		    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2569 		cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2570 		cmd->params.rnt_rc_sl_fl |=
2571 			(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2572 	}
2573 	cmd->params.max_sge_recv_flags |= ((hdr_type <<
2574 					OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
2575 					OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
2576 	return 0;
2577 }
2578 
2579 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2580 				struct ocrdma_modify_qp *cmd,
2581 				struct ib_qp_attr *attrs, int attr_mask)
2582 {
2583 	int status = 0;
2584 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2585 
2586 	if (attr_mask & IB_QP_PKEY_INDEX) {
2587 		cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2588 					    OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2589 		cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2590 	}
2591 	if (attr_mask & IB_QP_QKEY) {
2592 		qp->qkey = attrs->qkey;
2593 		cmd->params.qkey = attrs->qkey;
2594 		cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2595 	}
2596 	if (attr_mask & IB_QP_AV) {
2597 		status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
2598 		if (status)
2599 			return status;
2600 	} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2601 		/* set the default mac address for UD, GSI QPs */
2602 		cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2603 			(dev->nic_info.mac_addr[1] << 8) |
2604 			(dev->nic_info.mac_addr[2] << 16) |
2605 			(dev->nic_info.mac_addr[3] << 24);
2606 		cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2607 					(dev->nic_info.mac_addr[5] << 8);
2608 	}
2609 	if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2610 	    attrs->en_sqd_async_notify) {
2611 		cmd->params.max_sge_recv_flags |=
2612 			OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2613 		cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2614 	}
2615 	if (attr_mask & IB_QP_DEST_QPN) {
2616 		cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2617 				OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2618 		cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2619 	}
2620 	if (attr_mask & IB_QP_PATH_MTU) {
2621 		if (attrs->path_mtu < IB_MTU_512 ||
2622 		    attrs->path_mtu > IB_MTU_4096) {
2623 			pr_err("ocrdma%d: IB MTU %d is not supported\n",
2624 			       dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
2625 			status = -EINVAL;
2626 			goto pmtu_err;
2627 		}
2628 		cmd->params.path_mtu_pkey_indx |=
2629 		    (ib_mtu_enum_to_int(attrs->path_mtu) <<
2630 		     OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2631 		    OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2632 		cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2633 	}
2634 	if (attr_mask & IB_QP_TIMEOUT) {
2635 		cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2636 		    OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2637 		cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2638 	}
2639 	if (attr_mask & IB_QP_RETRY_CNT) {
2640 		cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2641 				      OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2642 		    OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2643 		cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2644 	}
2645 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2646 		cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2647 				      OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2648 		    OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2649 		cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2650 	}
2651 	if (attr_mask & IB_QP_RNR_RETRY) {
2652 		cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2653 			OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2654 			& OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2655 		cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2656 	}
2657 	if (attr_mask & IB_QP_SQ_PSN) {
2658 		cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2659 		cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2660 	}
2661 	if (attr_mask & IB_QP_RQ_PSN) {
2662 		cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2663 		cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2664 	}
2665 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2666 		if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2667 			status = -EINVAL;
2668 			goto pmtu_err;
2669 		}
2670 		qp->max_ord = attrs->max_rd_atomic;
2671 		cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2672 	}
2673 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2674 		if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2675 			status = -EINVAL;
2676 			goto pmtu_err;
2677 		}
2678 		qp->max_ird = attrs->max_dest_rd_atomic;
2679 		cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2680 	}
2681 	cmd->params.max_ord_ird = (qp->max_ord <<
2682 				OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2683 				(qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2684 pmtu_err:
2685 	return status;
2686 }
2687 
2688 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2689 			 struct ib_qp_attr *attrs, int attr_mask)
2690 {
2691 	int status = -ENOMEM;
2692 	struct ocrdma_modify_qp *cmd;
2693 
2694 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2695 	if (!cmd)
2696 		return status;
2697 
2698 	cmd->params.id = qp->id;
2699 	cmd->flags = 0;
2700 	if (attr_mask & IB_QP_STATE) {
2701 		cmd->params.max_sge_recv_flags |=
2702 		    (get_ocrdma_qp_state(attrs->qp_state) <<
2703 		     OCRDMA_QP_PARAMS_STATE_SHIFT) &
2704 		    OCRDMA_QP_PARAMS_STATE_MASK;
2705 		cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
2706 	} else {
2707 		cmd->params.max_sge_recv_flags |=
2708 		    (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2709 		    OCRDMA_QP_PARAMS_STATE_MASK;
2710 	}
2711 
2712 	status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
2713 	if (status)
2714 		goto mbx_err;
2715 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2716 	if (status)
2717 		goto mbx_err;
2718 
2719 mbx_err:
2720 	kfree(cmd);
2721 	return status;
2722 }
2723 
2724 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2725 {
2726 	int status = -ENOMEM;
2727 	struct ocrdma_destroy_qp *cmd;
2728 	struct pci_dev *pdev = dev->nic_info.pdev;
2729 
2730 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2731 	if (!cmd)
2732 		return status;
2733 	cmd->qp_id = qp->id;
2734 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2735 	if (status)
2736 		goto mbx_err;
2737 
2738 mbx_err:
2739 	kfree(cmd);
2740 	if (qp->sq.va)
2741 		dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2742 	if (!qp->srq && qp->rq.va)
2743 		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2744 	if (qp->dpp_enabled)
2745 		qp->pd->num_dpp_qp++;
2746 	return status;
2747 }
2748 
2749 int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
2750 			  struct ib_srq_init_attr *srq_attr,
2751 			  struct ocrdma_pd *pd)
2752 {
2753 	int status = -ENOMEM;
2754 	int hw_pages, hw_page_size;
2755 	int len;
2756 	struct ocrdma_create_srq_rsp *rsp;
2757 	struct ocrdma_create_srq *cmd;
2758 	dma_addr_t pa;
2759 	struct pci_dev *pdev = dev->nic_info.pdev;
2760 	u32 max_rqe_allocated;
2761 
2762 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2763 	if (!cmd)
2764 		return status;
2765 
2766 	cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2767 	max_rqe_allocated = srq_attr->attr.max_wr + 1;
2768 	status = ocrdma_build_q_conf(&max_rqe_allocated,
2769 				dev->attr.rqe_size,
2770 				&hw_pages, &hw_page_size);
2771 	if (status) {
2772 		pr_err("%s() req. max_wr=0x%x\n", __func__,
2773 		       srq_attr->attr.max_wr);
2774 		status = -EINVAL;
2775 		goto ret;
2776 	}
2777 	len = hw_pages * hw_page_size;
2778 	srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2779 	if (!srq->rq.va) {
2780 		status = -ENOMEM;
2781 		goto ret;
2782 	}
2783 	ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2784 
2785 	srq->rq.entry_size = dev->attr.rqe_size;
2786 	srq->rq.pa = pa;
2787 	srq->rq.len = len;
2788 	srq->rq.max_cnt = max_rqe_allocated;
2789 
2790 	cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2791 	cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2792 				OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2793 
2794 	cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2795 		<< OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2796 	cmd->pages_rqe_sz |= (dev->attr.rqe_size
2797 		<< OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2798 		& OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2799 	cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2800 
2801 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2802 	if (status)
2803 		goto mbx_err;
2804 	rsp = (struct ocrdma_create_srq_rsp *)cmd;
2805 	srq->id = rsp->id;
2806 	srq->rq.dbid = rsp->id;
2807 	max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2808 		OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2809 		OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2810 	max_rqe_allocated = (1 << max_rqe_allocated);
2811 	srq->rq.max_cnt = max_rqe_allocated;
2812 	srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2813 	srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2814 		OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2815 		OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2816 	goto ret;
2817 mbx_err:
2818 	dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2819 ret:
2820 	kfree(cmd);
2821 	return status;
2822 }
2823 
2824 int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2825 {
2826 	int status = -ENOMEM;
2827 	struct ocrdma_modify_srq *cmd;
2828 	struct ocrdma_pd *pd = srq->pd;
2829 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2830 
2831 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
2832 	if (!cmd)
2833 		return status;
2834 	cmd->id = srq->id;
2835 	cmd->limit_max_rqe |= srq_attr->srq_limit <<
2836 	    OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
2837 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2838 	kfree(cmd);
2839 	return status;
2840 }
2841 
2842 int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2843 {
2844 	int status = -ENOMEM;
2845 	struct ocrdma_query_srq *cmd;
2846 	struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2847 
2848 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
2849 	if (!cmd)
2850 		return status;
2851 	cmd->id = srq->rq.dbid;
2852 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2853 	if (status == 0) {
2854 		struct ocrdma_query_srq_rsp *rsp =
2855 		    (struct ocrdma_query_srq_rsp *)cmd;
2856 		srq_attr->max_sge =
2857 		    rsp->srq_lmt_max_sge &
2858 		    OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2859 		srq_attr->max_wr =
2860 		    rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2861 		srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2862 		    OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2863 	}
2864 	kfree(cmd);
2865 	return status;
2866 }
2867 
2868 int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2869 {
2870 	int status = -ENOMEM;
2871 	struct ocrdma_destroy_srq *cmd;
2872 	struct pci_dev *pdev = dev->nic_info.pdev;
2873 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2874 	if (!cmd)
2875 		return status;
2876 	cmd->id = srq->id;
2877 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2878 	if (srq->rq.va)
2879 		dma_free_coherent(&pdev->dev, srq->rq.len,
2880 				  srq->rq.va, srq->rq.pa);
2881 	kfree(cmd);
2882 	return status;
2883 }
2884 
2885 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
2886 				      struct ocrdma_dcbx_cfg *dcbxcfg)
2887 {
2888 	int status;
2889 	dma_addr_t pa;
2890 	struct ocrdma_mqe cmd;
2891 
2892 	struct ocrdma_get_dcbx_cfg_req *req = NULL;
2893 	struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
2894 	struct pci_dev *pdev = dev->nic_info.pdev;
2895 	struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
2896 
2897 	memset(&cmd, 0, sizeof(struct ocrdma_mqe));
2898 	cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
2899 					sizeof(struct ocrdma_get_dcbx_cfg_req));
2900 	req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
2901 	if (!req) {
2902 		status = -ENOMEM;
2903 		goto mem_err;
2904 	}
2905 
2906 	cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
2907 					OCRDMA_MQE_HDR_SGE_CNT_MASK;
2908 	mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
2909 	mqe_sge->pa_hi = (u32) upper_32_bits(pa);
2910 	mqe_sge->len = cmd.hdr.pyld_len;
2911 
2912 	memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
2913 	ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
2914 			OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
2915 	req->param_type = ptype;
2916 
2917 	status = ocrdma_mbx_cmd(dev, &cmd);
2918 	if (status)
2919 		goto mbx_err;
2920 
2921 	rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
2922 	ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
2923 	memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
2924 
2925 mbx_err:
2926 	dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
2927 mem_err:
2928 	return status;
2929 }
2930 
2931 #define OCRDMA_MAX_SERVICE_LEVEL_INDEX	0x08
2932 #define OCRDMA_DEFAULT_SERVICE_LEVEL	0x05
2933 
2934 static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
2935 				    struct ocrdma_dcbx_cfg *dcbxcfg,
2936 				    u8 *srvc_lvl)
2937 {
2938 	int status = -EINVAL, indx, slindx;
2939 	int ventry_cnt;
2940 	struct ocrdma_app_parameter *app_param;
2941 	u8 valid, proto_sel;
2942 	u8 app_prio, pfc_prio;
2943 	u16 proto;
2944 
2945 	if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
2946 		pr_info("%s ocrdma%d DCBX is disabled\n",
2947 			dev_name(&dev->nic_info.pdev->dev), dev->id);
2948 		goto out;
2949 	}
2950 
2951 	if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
2952 		pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2953 			dev_name(&dev->nic_info.pdev->dev), dev->id,
2954 			(ptype > 0 ? "operational" : "admin"),
2955 			(dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
2956 			"enabled" : "disabled",
2957 			(dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
2958 			"" : ", not sync'ed");
2959 		goto out;
2960 	} else {
2961 		pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2962 			dev_name(&dev->nic_info.pdev->dev), dev->id);
2963 	}
2964 
2965 	ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
2966 				OCRDMA_DCBX_APP_ENTRY_SHIFT)
2967 				& OCRDMA_DCBX_STATE_MASK;
2968 
2969 	for (indx = 0; indx < ventry_cnt; indx++) {
2970 		app_param = &dcbxcfg->app_param[indx];
2971 		valid = (app_param->valid_proto_app >>
2972 				OCRDMA_APP_PARAM_VALID_SHIFT)
2973 				& OCRDMA_APP_PARAM_VALID_MASK;
2974 		proto_sel = (app_param->valid_proto_app
2975 				>>  OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
2976 				& OCRDMA_APP_PARAM_PROTO_SEL_MASK;
2977 		proto = app_param->valid_proto_app &
2978 				OCRDMA_APP_PARAM_APP_PROTO_MASK;
2979 
2980 		if (
2981 			valid && proto == ETH_P_IBOE &&
2982 			proto_sel == OCRDMA_PROTO_SELECT_L2) {
2983 			for (slindx = 0; slindx <
2984 				OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
2985 				app_prio = ocrdma_get_app_prio(
2986 						(u8 *)app_param->app_prio,
2987 						slindx);
2988 				pfc_prio = ocrdma_get_pfc_prio(
2989 						(u8 *)dcbxcfg->pfc_prio,
2990 						slindx);
2991 
2992 				if (app_prio && pfc_prio) {
2993 					*srvc_lvl = slindx;
2994 					status = 0;
2995 					goto out;
2996 				}
2997 			}
2998 			if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
2999 				pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
3000 					dev_name(&dev->nic_info.pdev->dev),
3001 					dev->id, proto);
3002 			}
3003 		}
3004 	}
3005 
3006 out:
3007 	return status;
3008 }
3009 
3010 void ocrdma_init_service_level(struct ocrdma_dev *dev)
3011 {
3012 	int status = 0, indx;
3013 	struct ocrdma_dcbx_cfg dcbxcfg;
3014 	u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
3015 	int ptype = OCRDMA_PARAMETER_TYPE_OPER;
3016 
3017 	for (indx = 0; indx < 2; indx++) {
3018 		status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
3019 		if (status) {
3020 			pr_err("%s(): status=%d\n", __func__, status);
3021 			ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
3022 			continue;
3023 		}
3024 
3025 		status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
3026 						  &dcbxcfg, &srvc_lvl);
3027 		if (status) {
3028 			ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
3029 			continue;
3030 		}
3031 
3032 		break;
3033 	}
3034 
3035 	if (status)
3036 		pr_info("%s ocrdma%d service level default\n",
3037 			dev_name(&dev->nic_info.pdev->dev), dev->id);
3038 	else
3039 		pr_info("%s ocrdma%d service level %d\n",
3040 			dev_name(&dev->nic_info.pdev->dev), dev->id,
3041 			srvc_lvl);
3042 
3043 	dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
3044 	dev->sl = srvc_lvl;
3045 }
3046 
3047 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3048 {
3049 	int i;
3050 	int status = -EINVAL;
3051 	struct ocrdma_av *av;
3052 	unsigned long flags;
3053 
3054 	av = dev->av_tbl.va;
3055 	spin_lock_irqsave(&dev->av_tbl.lock, flags);
3056 	for (i = 0; i < dev->av_tbl.num_ah; i++) {
3057 		if (av->valid == 0) {
3058 			av->valid = OCRDMA_AV_VALID;
3059 			ah->av = av;
3060 			ah->id = i;
3061 			status = 0;
3062 			break;
3063 		}
3064 		av++;
3065 	}
3066 	if (i == dev->av_tbl.num_ah)
3067 		status = -EAGAIN;
3068 	spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3069 	return status;
3070 }
3071 
3072 int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3073 {
3074 	unsigned long flags;
3075 	spin_lock_irqsave(&dev->av_tbl.lock, flags);
3076 	ah->av->valid = 0;
3077 	spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3078 	return 0;
3079 }
3080 
3081 static int ocrdma_create_eqs(struct ocrdma_dev *dev)
3082 {
3083 	int num_eq, i, status = 0;
3084 	int irq;
3085 	unsigned long flags = 0;
3086 
3087 	num_eq = dev->nic_info.msix.num_vectors -
3088 			dev->nic_info.msix.start_vector;
3089 	if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
3090 		num_eq = 1;
3091 		flags = IRQF_SHARED;
3092 	} else {
3093 		num_eq = min_t(u32, num_eq, num_online_cpus());
3094 	}
3095 
3096 	if (!num_eq)
3097 		return -EINVAL;
3098 
3099 	dev->eq_tbl = kcalloc(num_eq, sizeof(struct ocrdma_eq), GFP_KERNEL);
3100 	if (!dev->eq_tbl)
3101 		return -ENOMEM;
3102 
3103 	for (i = 0; i < num_eq; i++) {
3104 		status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
3105 					OCRDMA_EQ_LEN);
3106 		if (status) {
3107 			status = -EINVAL;
3108 			break;
3109 		}
3110 		sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
3111 			dev->id, i);
3112 		irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
3113 		status = request_irq(irq, ocrdma_irq_handler, flags,
3114 				     dev->eq_tbl[i].irq_name,
3115 				     &dev->eq_tbl[i]);
3116 		if (status)
3117 			goto done;
3118 		dev->eq_cnt += 1;
3119 	}
3120 	/* one eq is sufficient for data path to work */
3121 	return 0;
3122 done:
3123 	ocrdma_destroy_eqs(dev);
3124 	return status;
3125 }
3126 
3127 static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3128 				 int num)
3129 {
3130 	int i, status;
3131 	struct ocrdma_modify_eqd_req *cmd;
3132 
3133 	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3134 	if (!cmd)
3135 		return -ENOMEM;
3136 
3137 	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3138 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3139 
3140 	cmd->cmd.num_eq = num;
3141 	for (i = 0; i < num; i++) {
3142 		cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3143 		cmd->cmd.set_eqd[i].phase = 0;
3144 		cmd->cmd.set_eqd[i].delay_multiplier =
3145 				(eq[i].aic_obj.prev_eqd * 65)/100;
3146 	}
3147 	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3148 
3149 	kfree(cmd);
3150 	return status;
3151 }
3152 
3153 static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3154 			     int num)
3155 {
3156 	int num_eqs, i = 0;
3157 	if (num > 8) {
3158 		while (num) {
3159 			num_eqs = min(num, 8);
3160 			ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3161 			i += num_eqs;
3162 			num -= num_eqs;
3163 		}
3164 	} else {
3165 		ocrdma_mbx_modify_eqd(dev, eq, num);
3166 	}
3167 	return 0;
3168 }
3169 
3170 void ocrdma_eqd_set_task(struct work_struct *work)
3171 {
3172 	struct ocrdma_dev *dev =
3173 		container_of(work, struct ocrdma_dev, eqd_work.work);
3174 	struct ocrdma_eq *eq = NULL;
3175 	int i, num = 0;
3176 	u64 eq_intr;
3177 
3178 	for (i = 0; i < dev->eq_cnt; i++) {
3179 		eq = &dev->eq_tbl[i];
3180 		if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3181 			eq_intr = eq->aic_obj.eq_intr_cnt -
3182 				  eq->aic_obj.prev_eq_intr_cnt;
3183 			if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3184 			    (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3185 				eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3186 				num++;
3187 			} else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3188 				   (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3189 				eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3190 				num++;
3191 			}
3192 		}
3193 		eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3194 	}
3195 
3196 	if (num)
3197 		ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3198 	schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3199 }
3200 
3201 int ocrdma_init_hw(struct ocrdma_dev *dev)
3202 {
3203 	int status;
3204 
3205 	/* create the eqs  */
3206 	status = ocrdma_create_eqs(dev);
3207 	if (status)
3208 		goto qpeq_err;
3209 	status = ocrdma_create_mq(dev);
3210 	if (status)
3211 		goto mq_err;
3212 	status = ocrdma_mbx_query_fw_config(dev);
3213 	if (status)
3214 		goto conf_err;
3215 	status = ocrdma_mbx_query_dev(dev);
3216 	if (status)
3217 		goto conf_err;
3218 	status = ocrdma_mbx_query_fw_ver(dev);
3219 	if (status)
3220 		goto conf_err;
3221 	status = ocrdma_mbx_create_ah_tbl(dev);
3222 	if (status)
3223 		goto conf_err;
3224 	status = ocrdma_mbx_get_phy_info(dev);
3225 	if (status)
3226 		goto info_attrb_err;
3227 	status = ocrdma_mbx_get_ctrl_attribs(dev);
3228 	if (status)
3229 		goto info_attrb_err;
3230 
3231 	return 0;
3232 
3233 info_attrb_err:
3234 	ocrdma_mbx_delete_ah_tbl(dev);
3235 conf_err:
3236 	ocrdma_destroy_mq(dev);
3237 mq_err:
3238 	ocrdma_destroy_eqs(dev);
3239 qpeq_err:
3240 	pr_err("%s() status=%d\n", __func__, status);
3241 	return status;
3242 }
3243 
3244 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3245 {
3246 	ocrdma_free_pd_pool(dev);
3247 	ocrdma_mbx_delete_ah_tbl(dev);
3248 
3249 	/* cleanup the control path */
3250 	ocrdma_destroy_mq(dev);
3251 
3252 	/* cleanup the eqs */
3253 	ocrdma_destroy_eqs(dev);
3254 }
3255