1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 
19 /**
20  * @file bfa_msgq.c MSGQ module source file.
21  */
22 
23 #include "bfi.h"
24 #include "bfa_msgq.h"
25 #include "bfa_ioc.h"
26 
27 #define call_cmdq_ent_cbfn(_cmdq_ent, _status)				\
28 {									\
29 	bfa_msgq_cmdcbfn_t cbfn;					\
30 	void *cbarg;							\
31 	cbfn = (_cmdq_ent)->cbfn;					\
32 	cbarg = (_cmdq_ent)->cbarg;					\
33 	(_cmdq_ent)->cbfn = NULL;					\
34 	(_cmdq_ent)->cbarg = NULL;					\
35 	if (cbfn) {							\
36 		cbfn(cbarg, (_status));					\
37 	}								\
38 }
39 
40 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
41 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
42 
43 enum cmdq_event {
44 	CMDQ_E_START			= 1,
45 	CMDQ_E_STOP			= 2,
46 	CMDQ_E_FAIL			= 3,
47 	CMDQ_E_POST			= 4,
48 	CMDQ_E_INIT_RESP		= 5,
49 	CMDQ_E_DB_READY			= 6,
50 };
51 
52 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
53 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
54 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
55 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
56 			enum cmdq_event);
57 
58 static void
59 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
60 {
61 	struct bfa_msgq_cmd_entry *cmdq_ent;
62 
63 	cmdq->producer_index = 0;
64 	cmdq->consumer_index = 0;
65 	cmdq->flags = 0;
66 	cmdq->token = 0;
67 	cmdq->offset = 0;
68 	cmdq->bytes_to_copy = 0;
69 	while (!list_empty(&cmdq->pending_q)) {
70 		bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
71 		bfa_q_qe_init(&cmdq_ent->qe);
72 		call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
73 	}
74 }
75 
76 static void
77 cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
78 {
79 	switch (event) {
80 	case CMDQ_E_START:
81 		bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
82 		break;
83 
84 	case CMDQ_E_STOP:
85 	case CMDQ_E_FAIL:
86 		/* No-op */
87 		break;
88 
89 	case CMDQ_E_POST:
90 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
91 		break;
92 
93 	default:
94 		bfa_sm_fault(event);
95 	}
96 }
97 
98 static void
99 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
100 {
101 	bfa_wc_down(&cmdq->msgq->init_wc);
102 }
103 
104 static void
105 cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
106 {
107 	switch (event) {
108 	case CMDQ_E_STOP:
109 	case CMDQ_E_FAIL:
110 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
111 		break;
112 
113 	case CMDQ_E_POST:
114 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
115 		break;
116 
117 	case CMDQ_E_INIT_RESP:
118 		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
119 			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
120 			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
121 		} else
122 			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
123 		break;
124 
125 	default:
126 		bfa_sm_fault(event);
127 	}
128 }
129 
130 static void
131 cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
132 {
133 }
134 
135 static void
136 cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
137 {
138 	switch (event) {
139 	case CMDQ_E_STOP:
140 	case CMDQ_E_FAIL:
141 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
142 		break;
143 
144 	case CMDQ_E_POST:
145 		bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
146 		break;
147 
148 	default:
149 		bfa_sm_fault(event);
150 	}
151 }
152 
153 static void
154 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
155 {
156 	bfa_msgq_cmdq_dbell(cmdq);
157 }
158 
159 static void
160 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
161 {
162 	switch (event) {
163 	case CMDQ_E_STOP:
164 	case CMDQ_E_FAIL:
165 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
166 		break;
167 
168 	case CMDQ_E_POST:
169 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
170 		break;
171 
172 	case CMDQ_E_DB_READY:
173 		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
174 			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
175 			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
176 		} else
177 			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
178 		break;
179 
180 	default:
181 		bfa_sm_fault(event);
182 	}
183 }
184 
185 static void
186 bfa_msgq_cmdq_dbell_ready(void *arg)
187 {
188 	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
189 	bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
190 }
191 
192 static void
193 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
194 {
195 	struct bfi_msgq_h2i_db *dbell =
196 		(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
197 
198 	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
199 	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
200 	dbell->mh.mtag.i2htok = 0;
201 	dbell->idx.cmdq_pi = htons(cmdq->producer_index);
202 
203 	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
204 				bfa_msgq_cmdq_dbell_ready, cmdq)) {
205 		bfa_msgq_cmdq_dbell_ready(cmdq);
206 	}
207 }
208 
209 static void
210 __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
211 {
212 	size_t len = cmd->msg_size;
213 	int num_entries = 0;
214 	size_t to_copy;
215 	u8 *src, *dst;
216 
217 	src = (u8 *)cmd->msg_hdr;
218 	dst = (u8 *)cmdq->addr.kva;
219 	dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
220 
221 	while (len) {
222 		to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
223 				len : BFI_MSGQ_CMD_ENTRY_SIZE;
224 		memcpy(dst, src, to_copy);
225 		len -= to_copy;
226 		src += BFI_MSGQ_CMD_ENTRY_SIZE;
227 		BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
228 		dst = (u8 *)cmdq->addr.kva;
229 		dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
230 		num_entries++;
231 	}
232 
233 }
234 
235 static void
236 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
237 {
238 	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
239 	struct bfa_msgq_cmd_entry *cmd;
240 	int posted = 0;
241 
242 	cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
243 
244 	/* Walk through pending list to see if the command can be posted */
245 	while (!list_empty(&cmdq->pending_q)) {
246 		cmd =
247 		(struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
248 		if (ntohs(cmd->msg_hdr->num_entries) <=
249 			BFA_MSGQ_FREE_CNT(cmdq)) {
250 			list_del(&cmd->qe);
251 			__cmd_copy(cmdq, cmd);
252 			posted = 1;
253 			call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
254 		} else {
255 			break;
256 		}
257 	}
258 
259 	if (posted)
260 		bfa_fsm_send_event(cmdq, CMDQ_E_POST);
261 }
262 
263 static void
264 bfa_msgq_cmdq_copy_next(void *arg)
265 {
266 	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
267 
268 	if (cmdq->bytes_to_copy)
269 		bfa_msgq_cmdq_copy_rsp(cmdq);
270 }
271 
272 static void
273 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
274 {
275 	struct bfi_msgq_i2h_cmdq_copy_req *req =
276 		(struct bfi_msgq_i2h_cmdq_copy_req *)mb;
277 
278 	cmdq->token = 0;
279 	cmdq->offset = ntohs(req->offset);
280 	cmdq->bytes_to_copy = ntohs(req->len);
281 	bfa_msgq_cmdq_copy_rsp(cmdq);
282 }
283 
284 static void
285 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
286 {
287 	struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
288 		(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
289 	int copied;
290 	u8 *addr = (u8 *)cmdq->addr.kva;
291 
292 	memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
293 	bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
294 	rsp->mh.mtag.i2htok = htons(cmdq->token);
295 	copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
296 		cmdq->bytes_to_copy;
297 	addr += cmdq->offset;
298 	memcpy(rsp->data, addr, copied);
299 
300 	cmdq->token++;
301 	cmdq->offset += copied;
302 	cmdq->bytes_to_copy -= copied;
303 
304 	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
305 				bfa_msgq_cmdq_copy_next, cmdq)) {
306 		bfa_msgq_cmdq_copy_next(cmdq);
307 	}
308 }
309 
310 static void
311 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
312 {
313 	cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
314 	INIT_LIST_HEAD(&cmdq->pending_q);
315 	cmdq->msgq = msgq;
316 	bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
317 }
318 
319 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
320 
321 enum rspq_event {
322 	RSPQ_E_START			= 1,
323 	RSPQ_E_STOP			= 2,
324 	RSPQ_E_FAIL			= 3,
325 	RSPQ_E_RESP			= 4,
326 	RSPQ_E_INIT_RESP		= 5,
327 	RSPQ_E_DB_READY			= 6,
328 };
329 
330 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
331 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
332 			enum rspq_event);
333 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
334 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
335 			enum rspq_event);
336 
337 static void
338 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
339 {
340 	rspq->producer_index = 0;
341 	rspq->consumer_index = 0;
342 	rspq->flags = 0;
343 }
344 
345 static void
346 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
347 {
348 	switch (event) {
349 	case RSPQ_E_START:
350 		bfa_fsm_set_state(rspq, rspq_sm_init_wait);
351 		break;
352 
353 	case RSPQ_E_STOP:
354 	case RSPQ_E_FAIL:
355 		/* No-op */
356 		break;
357 
358 	default:
359 		bfa_sm_fault(event);
360 	}
361 }
362 
363 static void
364 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
365 {
366 	bfa_wc_down(&rspq->msgq->init_wc);
367 }
368 
369 static void
370 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
371 {
372 	switch (event) {
373 	case RSPQ_E_FAIL:
374 	case RSPQ_E_STOP:
375 		bfa_fsm_set_state(rspq, rspq_sm_stopped);
376 		break;
377 
378 	case RSPQ_E_INIT_RESP:
379 		bfa_fsm_set_state(rspq, rspq_sm_ready);
380 		break;
381 
382 	default:
383 		bfa_sm_fault(event);
384 	}
385 }
386 
387 static void
388 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
389 {
390 }
391 
392 static void
393 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
394 {
395 	switch (event) {
396 	case RSPQ_E_STOP:
397 	case RSPQ_E_FAIL:
398 		bfa_fsm_set_state(rspq, rspq_sm_stopped);
399 		break;
400 
401 	case RSPQ_E_RESP:
402 		bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
403 		break;
404 
405 	default:
406 		bfa_sm_fault(event);
407 	}
408 }
409 
410 static void
411 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
412 {
413 	if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
414 		bfa_msgq_rspq_dbell(rspq);
415 }
416 
417 static void
418 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
419 {
420 	switch (event) {
421 	case RSPQ_E_STOP:
422 	case RSPQ_E_FAIL:
423 		bfa_fsm_set_state(rspq, rspq_sm_stopped);
424 		break;
425 
426 	case RSPQ_E_RESP:
427 		rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
428 		break;
429 
430 	case RSPQ_E_DB_READY:
431 		if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
432 			rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
433 			bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
434 		} else
435 			bfa_fsm_set_state(rspq, rspq_sm_ready);
436 		break;
437 
438 	default:
439 		bfa_sm_fault(event);
440 	}
441 }
442 
443 static void
444 bfa_msgq_rspq_dbell_ready(void *arg)
445 {
446 	struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
447 	bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
448 }
449 
450 static void
451 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
452 {
453 	struct bfi_msgq_h2i_db *dbell =
454 		(struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
455 
456 	memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
457 	bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
458 	dbell->mh.mtag.i2htok = 0;
459 	dbell->idx.rspq_ci = htons(rspq->consumer_index);
460 
461 	if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
462 				bfa_msgq_rspq_dbell_ready, rspq)) {
463 		bfa_msgq_rspq_dbell_ready(rspq);
464 	}
465 }
466 
467 static void
468 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
469 {
470 	struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
471 	struct bfi_msgq_mhdr *msghdr;
472 	int num_entries;
473 	int mc;
474 	u8 *rspq_qe;
475 
476 	rspq->producer_index = ntohs(dbell->idx.rspq_pi);
477 
478 	while (rspq->consumer_index != rspq->producer_index) {
479 		rspq_qe = (u8 *)rspq->addr.kva;
480 		rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
481 		msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
482 
483 		mc = msghdr->msg_class;
484 		num_entries = ntohs(msghdr->num_entries);
485 
486 		if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
487 			break;
488 
489 		(rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
490 
491 		BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
492 				rspq->depth);
493 	}
494 
495 	bfa_fsm_send_event(rspq, RSPQ_E_RESP);
496 }
497 
498 static void
499 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
500 {
501 	rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
502 	rspq->msgq = msgq;
503 	bfa_fsm_set_state(rspq, rspq_sm_stopped);
504 }
505 
506 static void
507 bfa_msgq_init_rsp(struct bfa_msgq *msgq,
508 		 struct bfi_mbmsg *mb)
509 {
510 	bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
511 	bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
512 }
513 
514 static void
515 bfa_msgq_init(void *arg)
516 {
517 	struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
518 	struct bfi_msgq_cfg_req *msgq_cfg =
519 		(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
520 
521 	memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
522 	bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
523 	msgq_cfg->mh.mtag.i2htok = 0;
524 
525 	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
526 	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
527 	bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
528 	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
529 
530 	bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
531 }
532 
533 static void
534 bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
535 {
536 	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
537 
538 	switch (msg->mh.msg_id) {
539 	case BFI_MSGQ_I2H_INIT_RSP:
540 		bfa_msgq_init_rsp(msgq, msg);
541 		break;
542 
543 	case BFI_MSGQ_I2H_DOORBELL_PI:
544 		bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
545 		break;
546 
547 	case BFI_MSGQ_I2H_DOORBELL_CI:
548 		bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
549 		break;
550 
551 	case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
552 		bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
553 		break;
554 
555 	default:
556 		BUG_ON(1);
557 	}
558 }
559 
560 static void
561 bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
562 {
563 	struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
564 
565 	switch (event) {
566 	case BFA_IOC_E_ENABLED:
567 		bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
568 		bfa_wc_up(&msgq->init_wc);
569 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
570 		bfa_wc_up(&msgq->init_wc);
571 		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
572 		bfa_wc_wait(&msgq->init_wc);
573 		break;
574 
575 	case BFA_IOC_E_DISABLED:
576 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
577 		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
578 		break;
579 
580 	case BFA_IOC_E_FAILED:
581 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
582 		bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
583 		break;
584 
585 	default:
586 		break;
587 	}
588 }
589 
590 u32
591 bfa_msgq_meminfo(void)
592 {
593 	return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
594 		roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
595 }
596 
597 void
598 bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
599 {
600 	msgq->cmdq.addr.kva = kva;
601 	msgq->cmdq.addr.pa  = pa;
602 
603 	kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
604 	pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
605 
606 	msgq->rspq.addr.kva = kva;
607 	msgq->rspq.addr.pa = pa;
608 }
609 
610 void
611 bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
612 {
613 	msgq->ioc    = ioc;
614 
615 	bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
616 	bfa_msgq_rspq_attach(&msgq->rspq, msgq);
617 
618 	bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
619 	bfa_q_qe_init(&msgq->ioc_notify);
620 	bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
621 	bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
622 }
623 
624 void
625 bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
626 		bfa_msgq_mcfunc_t cbfn, void *cbarg)
627 {
628 	msgq->rspq.rsphdlr[mc].cbfn	= cbfn;
629 	msgq->rspq.rsphdlr[mc].cbarg	= cbarg;
630 }
631 
632 void
633 bfa_msgq_cmd_post(struct bfa_msgq *msgq,  struct bfa_msgq_cmd_entry *cmd)
634 {
635 	if (ntohs(cmd->msg_hdr->num_entries) <=
636 		BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
637 		__cmd_copy(&msgq->cmdq, cmd);
638 		call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
639 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
640 	} else {
641 		list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
642 	}
643 }
644 
645 void
646 bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
647 {
648 	struct bfa_msgq_rspq *rspq = &msgq->rspq;
649 	size_t len = buf_len;
650 	size_t to_copy;
651 	int ci;
652 	u8 *src, *dst;
653 
654 	ci = rspq->consumer_index;
655 	src = (u8 *)rspq->addr.kva;
656 	src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
657 	dst = buf;
658 
659 	while (len) {
660 		to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
661 				len : BFI_MSGQ_RSP_ENTRY_SIZE;
662 		memcpy(dst, src, to_copy);
663 		len -= to_copy;
664 		dst += BFI_MSGQ_RSP_ENTRY_SIZE;
665 		BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
666 		src = (u8 *)rspq->addr.kva;
667 		src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
668 	}
669 }
670