1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 
19 /* MSGQ module source file. */
20 
21 #include "bfi.h"
22 #include "bfa_msgq.h"
23 #include "bfa_ioc.h"
24 
25 #define call_cmdq_ent_cbfn(_cmdq_ent, _status)				\
26 {									\
27 	bfa_msgq_cmdcbfn_t cbfn;					\
28 	void *cbarg;							\
29 	cbfn = (_cmdq_ent)->cbfn;					\
30 	cbarg = (_cmdq_ent)->cbarg;					\
31 	(_cmdq_ent)->cbfn = NULL;					\
32 	(_cmdq_ent)->cbarg = NULL;					\
33 	if (cbfn) {							\
34 		cbfn(cbarg, (_status));					\
35 	}								\
36 }
37 
38 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
39 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
40 
41 enum cmdq_event {
42 	CMDQ_E_START			= 1,
43 	CMDQ_E_STOP			= 2,
44 	CMDQ_E_FAIL			= 3,
45 	CMDQ_E_POST			= 4,
46 	CMDQ_E_INIT_RESP		= 5,
47 	CMDQ_E_DB_READY			= 6,
48 };
49 
50 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
51 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
52 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
53 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
54 			enum cmdq_event);
55 
56 static void
57 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
58 {
59 	struct bfa_msgq_cmd_entry *cmdq_ent;
60 
61 	cmdq->producer_index = 0;
62 	cmdq->consumer_index = 0;
63 	cmdq->flags = 0;
64 	cmdq->token = 0;
65 	cmdq->offset = 0;
66 	cmdq->bytes_to_copy = 0;
67 	while (!list_empty(&cmdq->pending_q)) {
68 		bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
69 		bfa_q_qe_init(&cmdq_ent->qe);
70 		call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
71 	}
72 }
73 
74 static void
75 cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
76 {
77 	switch (event) {
78 	case CMDQ_E_START:
79 		bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
80 		break;
81 
82 	case CMDQ_E_STOP:
83 	case CMDQ_E_FAIL:
84 		/* No-op */
85 		break;
86 
87 	case CMDQ_E_POST:
88 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
89 		break;
90 
91 	default:
92 		bfa_sm_fault(event);
93 	}
94 }
95 
96 static void
97 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
98 {
99 	bfa_wc_down(&cmdq->msgq->init_wc);
100 }
101 
102 static void
103 cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
104 {
105 	switch (event) {
106 	case CMDQ_E_STOP:
107 	case CMDQ_E_FAIL:
108 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
109 		break;
110 
111 	case CMDQ_E_POST:
112 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
113 		break;
114 
115 	case CMDQ_E_INIT_RESP:
116 		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
117 			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
118 			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
119 		} else
120 			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
121 		break;
122 
123 	default:
124 		bfa_sm_fault(event);
125 	}
126 }
127 
128 static void
129 cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
130 {
131 }
132 
133 static void
134 cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
135 {
136 	switch (event) {
137 	case CMDQ_E_STOP:
138 	case CMDQ_E_FAIL:
139 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
140 		break;
141 
142 	case CMDQ_E_POST:
143 		bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
144 		break;
145 
146 	default:
147 		bfa_sm_fault(event);
148 	}
149 }
150 
151 static void
152 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
153 {
154 	bfa_msgq_cmdq_dbell(cmdq);
155 }
156 
157 static void
158 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
159 {
160 	switch (event) {
161 	case CMDQ_E_STOP:
162 	case CMDQ_E_FAIL:
163 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
164 		break;
165 
166 	case CMDQ_E_POST:
167 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
168 		break;
169 
170 	case CMDQ_E_DB_READY:
171 		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
172 			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
173 			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
174 		} else
175 			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
176 		break;
177 
178 	default:
179 		bfa_sm_fault(event);
180 	}
181 }
182 
183 static void
184 bfa_msgq_cmdq_dbell_ready(void *arg)
185 {
186 	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
187 	bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
188 }
189 
190 static void
191 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
192 {
193 	struct bfi_msgq_h2i_db *dbell =
194 		(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
195 
196 	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
197 	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
198 	dbell->mh.mtag.i2htok = 0;
199 	dbell->idx.cmdq_pi = htons(cmdq->producer_index);
200 
201 	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
202 				bfa_msgq_cmdq_dbell_ready, cmdq)) {
203 		bfa_msgq_cmdq_dbell_ready(cmdq);
204 	}
205 }
206 
207 static void
208 __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
209 {
210 	size_t len = cmd->msg_size;
211 	int num_entries = 0;
212 	size_t to_copy;
213 	u8 *src, *dst;
214 
215 	src = (u8 *)cmd->msg_hdr;
216 	dst = (u8 *)cmdq->addr.kva;
217 	dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
218 
219 	while (len) {
220 		to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
221 				len : BFI_MSGQ_CMD_ENTRY_SIZE;
222 		memcpy(dst, src, to_copy);
223 		len -= to_copy;
224 		src += BFI_MSGQ_CMD_ENTRY_SIZE;
225 		BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
226 		dst = (u8 *)cmdq->addr.kva;
227 		dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
228 		num_entries++;
229 	}
230 
231 }
232 
233 static void
234 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
235 {
236 	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
237 	struct bfa_msgq_cmd_entry *cmd;
238 	int posted = 0;
239 
240 	cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
241 
242 	/* Walk through pending list to see if the command can be posted */
243 	while (!list_empty(&cmdq->pending_q)) {
244 		cmd =
245 		(struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
246 		if (ntohs(cmd->msg_hdr->num_entries) <=
247 			BFA_MSGQ_FREE_CNT(cmdq)) {
248 			list_del(&cmd->qe);
249 			__cmd_copy(cmdq, cmd);
250 			posted = 1;
251 			call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
252 		} else {
253 			break;
254 		}
255 	}
256 
257 	if (posted)
258 		bfa_fsm_send_event(cmdq, CMDQ_E_POST);
259 }
260 
261 static void
262 bfa_msgq_cmdq_copy_next(void *arg)
263 {
264 	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
265 
266 	if (cmdq->bytes_to_copy)
267 		bfa_msgq_cmdq_copy_rsp(cmdq);
268 }
269 
270 static void
271 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
272 {
273 	struct bfi_msgq_i2h_cmdq_copy_req *req =
274 		(struct bfi_msgq_i2h_cmdq_copy_req *)mb;
275 
276 	cmdq->token = 0;
277 	cmdq->offset = ntohs(req->offset);
278 	cmdq->bytes_to_copy = ntohs(req->len);
279 	bfa_msgq_cmdq_copy_rsp(cmdq);
280 }
281 
282 static void
283 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
284 {
285 	struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
286 		(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
287 	int copied;
288 	u8 *addr = (u8 *)cmdq->addr.kva;
289 
290 	memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
291 	bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
292 	rsp->mh.mtag.i2htok = htons(cmdq->token);
293 	copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
294 		cmdq->bytes_to_copy;
295 	addr += cmdq->offset;
296 	memcpy(rsp->data, addr, copied);
297 
298 	cmdq->token++;
299 	cmdq->offset += copied;
300 	cmdq->bytes_to_copy -= copied;
301 
302 	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
303 				bfa_msgq_cmdq_copy_next, cmdq)) {
304 		bfa_msgq_cmdq_copy_next(cmdq);
305 	}
306 }
307 
308 static void
309 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
310 {
311 	cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
312 	INIT_LIST_HEAD(&cmdq->pending_q);
313 	cmdq->msgq = msgq;
314 	bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
315 }
316 
317 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
318 
319 enum rspq_event {
320 	RSPQ_E_START			= 1,
321 	RSPQ_E_STOP			= 2,
322 	RSPQ_E_FAIL			= 3,
323 	RSPQ_E_RESP			= 4,
324 	RSPQ_E_INIT_RESP		= 5,
325 	RSPQ_E_DB_READY			= 6,
326 };
327 
328 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
329 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
330 			enum rspq_event);
331 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
332 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
333 			enum rspq_event);
334 
335 static void
336 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
337 {
338 	rspq->producer_index = 0;
339 	rspq->consumer_index = 0;
340 	rspq->flags = 0;
341 }
342 
343 static void
344 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
345 {
346 	switch (event) {
347 	case RSPQ_E_START:
348 		bfa_fsm_set_state(rspq, rspq_sm_init_wait);
349 		break;
350 
351 	case RSPQ_E_STOP:
352 	case RSPQ_E_FAIL:
353 		/* No-op */
354 		break;
355 
356 	default:
357 		bfa_sm_fault(event);
358 	}
359 }
360 
361 static void
362 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
363 {
364 	bfa_wc_down(&rspq->msgq->init_wc);
365 }
366 
367 static void
368 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
369 {
370 	switch (event) {
371 	case RSPQ_E_FAIL:
372 	case RSPQ_E_STOP:
373 		bfa_fsm_set_state(rspq, rspq_sm_stopped);
374 		break;
375 
376 	case RSPQ_E_INIT_RESP:
377 		bfa_fsm_set_state(rspq, rspq_sm_ready);
378 		break;
379 
380 	default:
381 		bfa_sm_fault(event);
382 	}
383 }
384 
385 static void
386 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
387 {
388 }
389 
390 static void
391 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
392 {
393 	switch (event) {
394 	case RSPQ_E_STOP:
395 	case RSPQ_E_FAIL:
396 		bfa_fsm_set_state(rspq, rspq_sm_stopped);
397 		break;
398 
399 	case RSPQ_E_RESP:
400 		bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
401 		break;
402 
403 	default:
404 		bfa_sm_fault(event);
405 	}
406 }
407 
408 static void
409 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
410 {
411 	if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
412 		bfa_msgq_rspq_dbell(rspq);
413 }
414 
415 static void
416 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
417 {
418 	switch (event) {
419 	case RSPQ_E_STOP:
420 	case RSPQ_E_FAIL:
421 		bfa_fsm_set_state(rspq, rspq_sm_stopped);
422 		break;
423 
424 	case RSPQ_E_RESP:
425 		rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
426 		break;
427 
428 	case RSPQ_E_DB_READY:
429 		if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
430 			rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
431 			bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
432 		} else
433 			bfa_fsm_set_state(rspq, rspq_sm_ready);
434 		break;
435 
436 	default:
437 		bfa_sm_fault(event);
438 	}
439 }
440 
441 static void
442 bfa_msgq_rspq_dbell_ready(void *arg)
443 {
444 	struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
445 	bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
446 }
447 
448 static void
449 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
450 {
451 	struct bfi_msgq_h2i_db *dbell =
452 		(struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
453 
454 	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
455 	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
456 	dbell->mh.mtag.i2htok = 0;
457 	dbell->idx.rspq_ci = htons(rspq->consumer_index);
458 
459 	if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
460 				bfa_msgq_rspq_dbell_ready, rspq)) {
461 		bfa_msgq_rspq_dbell_ready(rspq);
462 	}
463 }
464 
465 static void
466 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
467 {
468 	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
469 	struct bfi_msgq_mhdr *msghdr;
470 	int num_entries;
471 	int mc;
472 	u8 *rspq_qe;
473 
474 	rspq->producer_index = ntohs(dbell->idx.rspq_pi);
475 
476 	while (rspq->consumer_index != rspq->producer_index) {
477 		rspq_qe = (u8 *)rspq->addr.kva;
478 		rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
479 		msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
480 
481 		mc = msghdr->msg_class;
482 		num_entries = ntohs(msghdr->num_entries);
483 
484 		if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
485 			break;
486 
487 		(rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
488 
489 		BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
490 				rspq->depth);
491 	}
492 
493 	bfa_fsm_send_event(rspq, RSPQ_E_RESP);
494 }
495 
496 static void
497 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
498 {
499 	rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
500 	rspq->msgq = msgq;
501 	bfa_fsm_set_state(rspq, rspq_sm_stopped);
502 }
503 
504 static void
505 bfa_msgq_init_rsp(struct bfa_msgq *msgq,
506 		 struct bfi_mbmsg *mb)
507 {
508 	bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
509 	bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
510 }
511 
512 static void
513 bfa_msgq_init(void *arg)
514 {
515 	struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
516 	struct bfi_msgq_cfg_req *msgq_cfg =
517 		(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
518 
519 	memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
520 	bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
521 	msgq_cfg->mh.mtag.i2htok = 0;
522 
523 	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
524 	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
525 	bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
526 	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
527 
528 	bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
529 }
530 
531 static void
532 bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
533 {
534 	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
535 
536 	switch (msg->mh.msg_id) {
537 	case BFI_MSGQ_I2H_INIT_RSP:
538 		bfa_msgq_init_rsp(msgq, msg);
539 		break;
540 
541 	case BFI_MSGQ_I2H_DOORBELL_PI:
542 		bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
543 		break;
544 
545 	case BFI_MSGQ_I2H_DOORBELL_CI:
546 		bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
547 		break;
548 
549 	case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
550 		bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
551 		break;
552 
553 	default:
554 		BUG_ON(1);
555 	}
556 }
557 
558 static void
559 bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
560 {
561 	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
562 
563 	switch (event) {
564 	case BFA_IOC_E_ENABLED:
565 		bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
566 		bfa_wc_up(&msgq->init_wc);
567 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
568 		bfa_wc_up(&msgq->init_wc);
569 		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
570 		bfa_wc_wait(&msgq->init_wc);
571 		break;
572 
573 	case BFA_IOC_E_DISABLED:
574 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
575 		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
576 		break;
577 
578 	case BFA_IOC_E_FAILED:
579 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
580 		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
581 		break;
582 
583 	default:
584 		break;
585 	}
586 }
587 
588 u32
589 bfa_msgq_meminfo(void)
590 {
591 	return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
592 		roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
593 }
594 
595 void
596 bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
597 {
598 	msgq->cmdq.addr.kva = kva;
599 	msgq->cmdq.addr.pa  = pa;
600 
601 	kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
602 	pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
603 
604 	msgq->rspq.addr.kva = kva;
605 	msgq->rspq.addr.pa = pa;
606 }
607 
608 void
609 bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
610 {
611 	msgq->ioc    = ioc;
612 
613 	bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
614 	bfa_msgq_rspq_attach(&msgq->rspq, msgq);
615 
616 	bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
617 	bfa_q_qe_init(&msgq->ioc_notify);
618 	bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
619 	bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
620 }
621 
622 void
623 bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
624 		bfa_msgq_mcfunc_t cbfn, void *cbarg)
625 {
626 	msgq->rspq.rsphdlr[mc].cbfn	= cbfn;
627 	msgq->rspq.rsphdlr[mc].cbarg	= cbarg;
628 }
629 
630 void
631 bfa_msgq_cmd_post(struct bfa_msgq *msgq,  struct bfa_msgq_cmd_entry *cmd)
632 {
633 	if (ntohs(cmd->msg_hdr->num_entries) <=
634 		BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
635 		__cmd_copy(&msgq->cmdq, cmd);
636 		call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
637 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
638 	} else {
639 		list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
640 	}
641 }
642 
643 void
644 bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
645 {
646 	struct bfa_msgq_rspq *rspq = &msgq->rspq;
647 	size_t len = buf_len;
648 	size_t to_copy;
649 	int ci;
650 	u8 *src, *dst;
651 
652 	ci = rspq->consumer_index;
653 	src = (u8 *)rspq->addr.kva;
654 	src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
655 	dst = buf;
656 
657 	while (len) {
658 		to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
659 				len : BFI_MSGQ_RSP_ENTRY_SIZE;
660 		memcpy(dst, src, to_copy);
661 		len -= to_copy;
662 		dst += BFI_MSGQ_RSP_ENTRY_SIZE;
663 		BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
664 		src = (u8 *)rspq->addr.kva;
665 		src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
666 	}
667 }
668