1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include "bna.h"
19 
20 static inline int
21 ethport_can_be_up(struct bna_ethport *ethport)
22 {
23 	int ready = 0;
24 	if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 		ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 			 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 			 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28 	else
29 		ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 			 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 			 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32 	return ready;
33 }
34 
35 #define ethport_is_up ethport_can_be_up
36 
37 enum bna_ethport_event {
38 	ETHPORT_E_START			= 1,
39 	ETHPORT_E_STOP			= 2,
40 	ETHPORT_E_FAIL			= 3,
41 	ETHPORT_E_UP			= 4,
42 	ETHPORT_E_DOWN			= 5,
43 	ETHPORT_E_FWRESP_UP_OK		= 6,
44 	ETHPORT_E_FWRESP_DOWN		= 7,
45 	ETHPORT_E_FWRESP_UP_FAIL	= 8,
46 };
47 
48 enum bna_enet_event {
49 	ENET_E_START			= 1,
50 	ENET_E_STOP			= 2,
51 	ENET_E_FAIL			= 3,
52 	ENET_E_PAUSE_CFG		= 4,
53 	ENET_E_MTU_CFG			= 5,
54 	ENET_E_FWRESP_PAUSE		= 6,
55 	ENET_E_CHLD_STOPPED		= 7,
56 };
57 
58 enum bna_ioceth_event {
59 	IOCETH_E_ENABLE			= 1,
60 	IOCETH_E_DISABLE		= 2,
61 	IOCETH_E_IOC_RESET		= 3,
62 	IOCETH_E_IOC_FAILED		= 4,
63 	IOCETH_E_IOC_READY		= 5,
64 	IOCETH_E_ENET_ATTR_RESP		= 6,
65 	IOCETH_E_ENET_STOPPED		= 7,
66 	IOCETH_E_IOC_DISABLED		= 8,
67 };
68 
69 #define bna_stats_copy(_name, _type)					\
70 do {									\
71 	count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);	\
72 	stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;	\
73 	stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;	\
74 	for (i = 0; i < count; i++)					\
75 		stats_dst[i] = be64_to_cpu(stats_src[i]);		\
76 } while (0)								\
77 
78 /*
79  * FW response handlers
80  */
81 
82 static void
83 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 				struct bfi_msgq_mhdr *msghdr)
85 {
86 	ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87 
88 	if (ethport_can_be_up(ethport))
89 		bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90 }
91 
92 static void
93 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 				struct bfi_msgq_mhdr *msghdr)
95 {
96 	int ethport_up = ethport_is_up(ethport);
97 
98 	ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99 
100 	if (ethport_up)
101 		bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102 }
103 
104 static void
105 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 				struct bfi_msgq_mhdr *msghdr)
107 {
108 	struct bfi_enet_enable_req *admin_req =
109 		&ethport->bfi_enet_cmd.admin_req;
110 	struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111 
112 	switch (admin_req->enable) {
113 	case BNA_STATUS_T_ENABLED:
114 		if (rsp->error == BFI_ENET_CMD_OK)
115 			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116 		else {
117 			ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118 			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119 		}
120 		break;
121 
122 	case BNA_STATUS_T_DISABLED:
123 		bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124 		ethport->link_status = BNA_LINK_DOWN;
125 		ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126 		break;
127 	}
128 }
129 
130 static void
131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132 				struct bfi_msgq_mhdr *msghdr)
133 {
134 	struct bfi_enet_diag_lb_req *diag_lb_req =
135 		&ethport->bfi_enet_cmd.lpbk_req;
136 	struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137 
138 	switch (diag_lb_req->enable) {
139 	case BNA_STATUS_T_ENABLED:
140 		if (rsp->error == BFI_ENET_CMD_OK)
141 			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142 		else {
143 			ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144 			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145 		}
146 		break;
147 
148 	case BNA_STATUS_T_DISABLED:
149 		bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150 		break;
151 	}
152 }
153 
154 static void
155 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156 {
157 	bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158 }
159 
160 static void
161 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162 			struct bfi_msgq_mhdr *msghdr)
163 {
164 	struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165 
166 	/**
167 	 * Store only if not set earlier, since BNAD can override the HW
168 	 * attributes
169 	 */
170 	if (!ioceth->attr.fw_query_complete) {
171 		ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172 		ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
173 		ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
174 		ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
175 		ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
176 		ioceth->attr.fw_query_complete = true;
177 	}
178 
179 	bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
180 }
181 
182 static void
183 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
184 {
185 	struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
186 	u64 *stats_src;
187 	u64 *stats_dst;
188 	u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
189 	u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
190 	int count;
191 	int i;
192 
193 	bna_stats_copy(mac, mac);
194 	bna_stats_copy(bpc, bpc);
195 	bna_stats_copy(rad, rad);
196 	bna_stats_copy(rlb, rad);
197 	bna_stats_copy(fc_rx, fc_rx);
198 	bna_stats_copy(fc_tx, fc_tx);
199 
200 	stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
201 
202 	/* Copy Rxf stats to SW area, scatter them while copying */
203 	for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
204 		stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
205 		memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
206 		if (rx_enet_mask & ((u32)(1 << i))) {
207 			int k;
208 			count = sizeof(struct bfi_enet_stats_rxf) /
209 				sizeof(u64);
210 			for (k = 0; k < count; k++) {
211 				stats_dst[k] = be64_to_cpu(*stats_src);
212 				stats_src++;
213 			}
214 		}
215 	}
216 
217 	/* Copy Txf stats to SW area, scatter them while copying */
218 	for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
219 		stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
220 		memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
221 		if (tx_enet_mask & ((u32)(1 << i))) {
222 			int k;
223 			count = sizeof(struct bfi_enet_stats_txf) /
224 				sizeof(u64);
225 			for (k = 0; k < count; k++) {
226 				stats_dst[k] = be64_to_cpu(*stats_src);
227 				stats_src++;
228 			}
229 		}
230 	}
231 
232 	bna->stats_mod.stats_get_busy = false;
233 	bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
234 }
235 
236 static void
237 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
238 			struct bfi_msgq_mhdr *msghdr)
239 {
240 	ethport->link_status = BNA_LINK_UP;
241 
242 	/* Dispatch events */
243 	ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
244 }
245 
246 static void
247 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
248 				struct bfi_msgq_mhdr *msghdr)
249 {
250 	ethport->link_status = BNA_LINK_DOWN;
251 
252 	/* Dispatch events */
253 	ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
254 }
255 
256 static void
257 bna_err_handler(struct bna *bna, u32 intr_status)
258 {
259 	if (BNA_IS_HALT_INTR(bna, intr_status))
260 		bna_halt_clear(bna);
261 
262 	bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
263 }
264 
265 void
266 bna_mbox_handler(struct bna *bna, u32 intr_status)
267 {
268 	if (BNA_IS_ERR_INTR(bna, intr_status)) {
269 		bna_err_handler(bna, intr_status);
270 		return;
271 	}
272 	if (BNA_IS_MBOX_INTR(bna, intr_status))
273 		bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
274 }
275 
276 static void
277 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
278 {
279 	struct bna *bna = (struct bna *)arg;
280 	struct bna_tx *tx;
281 	struct bna_rx *rx;
282 
283 	switch (msghdr->msg_id) {
284 	case BFI_ENET_I2H_RX_CFG_SET_RSP:
285 		bna_rx_from_rid(bna, msghdr->enet_id, rx);
286 		if (rx)
287 			bna_bfi_rx_enet_start_rsp(rx, msghdr);
288 		break;
289 
290 	case BFI_ENET_I2H_RX_CFG_CLR_RSP:
291 		bna_rx_from_rid(bna, msghdr->enet_id, rx);
292 		if (rx)
293 			bna_bfi_rx_enet_stop_rsp(rx, msghdr);
294 		break;
295 
296 	case BFI_ENET_I2H_RIT_CFG_RSP:
297 	case BFI_ENET_I2H_RSS_CFG_RSP:
298 	case BFI_ENET_I2H_RSS_ENABLE_RSP:
299 	case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300 	case BFI_ENET_I2H_RX_DEFAULT_RSP:
301 	case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
302 	case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
303 	case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
304 	case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
305 	case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
306 	case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
307 	case BFI_ENET_I2H_RX_VLAN_SET_RSP:
308 	case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
309 		bna_rx_from_rid(bna, msghdr->enet_id, rx);
310 		if (rx)
311 			bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
312 		break;
313 
314 	case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
315 		bna_rx_from_rid(bna, msghdr->enet_id, rx);
316 		if (rx)
317 			bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
318 		break;
319 
320 	case BFI_ENET_I2H_TX_CFG_SET_RSP:
321 		bna_tx_from_rid(bna, msghdr->enet_id, tx);
322 		if (tx)
323 			bna_bfi_tx_enet_start_rsp(tx, msghdr);
324 		break;
325 
326 	case BFI_ENET_I2H_TX_CFG_CLR_RSP:
327 		bna_tx_from_rid(bna, msghdr->enet_id, tx);
328 		if (tx)
329 			bna_bfi_tx_enet_stop_rsp(tx, msghdr);
330 		break;
331 
332 	case BFI_ENET_I2H_PORT_ADMIN_RSP:
333 		bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
334 		break;
335 
336 	case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
337 		bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
338 		break;
339 
340 	case BFI_ENET_I2H_SET_PAUSE_RSP:
341 		bna_bfi_pause_set_rsp(&bna->enet, msghdr);
342 		break;
343 
344 	case BFI_ENET_I2H_GET_ATTR_RSP:
345 		bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
346 		break;
347 
348 	case BFI_ENET_I2H_STATS_GET_RSP:
349 		bna_bfi_stats_get_rsp(bna, msghdr);
350 		break;
351 
352 	case BFI_ENET_I2H_STATS_CLR_RSP:
353 		/* No-op */
354 		break;
355 
356 	case BFI_ENET_I2H_LINK_UP_AEN:
357 		bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
358 		break;
359 
360 	case BFI_ENET_I2H_LINK_DOWN_AEN:
361 		bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
362 		break;
363 
364 	case BFI_ENET_I2H_PORT_ENABLE_AEN:
365 		bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
366 		break;
367 
368 	case BFI_ENET_I2H_PORT_DISABLE_AEN:
369 		bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
370 		break;
371 
372 	case BFI_ENET_I2H_BW_UPDATE_AEN:
373 		bna_bfi_bw_update_aen(&bna->tx_mod);
374 		break;
375 
376 	default:
377 		break;
378 	}
379 }
380 
381 /* ETHPORT */
382 
383 #define call_ethport_stop_cbfn(_ethport)				\
384 do {									\
385 	if ((_ethport)->stop_cbfn) {					\
386 		void (*cbfn)(struct bna_enet *);			\
387 		cbfn = (_ethport)->stop_cbfn;				\
388 		(_ethport)->stop_cbfn = NULL;				\
389 		cbfn(&(_ethport)->bna->enet);				\
390 	}								\
391 } while (0)
392 
393 #define call_ethport_adminup_cbfn(ethport, status)			\
394 do {									\
395 	if ((ethport)->adminup_cbfn) {					\
396 		void (*cbfn)(struct bnad *, enum bna_cb_status);	\
397 		cbfn = (ethport)->adminup_cbfn;				\
398 		(ethport)->adminup_cbfn = NULL;				\
399 		cbfn((ethport)->bna->bnad, status);			\
400 	}								\
401 } while (0)
402 
403 static void
404 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
405 {
406 	struct bfi_enet_enable_req *admin_up_req =
407 		&ethport->bfi_enet_cmd.admin_req;
408 
409 	bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
410 		BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
411 	admin_up_req->mh.num_entries = htons(
412 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
413 	admin_up_req->enable = BNA_STATUS_T_ENABLED;
414 
415 	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
416 		sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
417 	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
418 }
419 
420 static void
421 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
422 {
423 	struct bfi_enet_enable_req *admin_down_req =
424 		&ethport->bfi_enet_cmd.admin_req;
425 
426 	bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
427 		BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
428 	admin_down_req->mh.num_entries = htons(
429 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
430 	admin_down_req->enable = BNA_STATUS_T_DISABLED;
431 
432 	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
433 		sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
434 	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
435 }
436 
437 static void
438 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
439 {
440 	struct bfi_enet_diag_lb_req *lpbk_up_req =
441 		&ethport->bfi_enet_cmd.lpbk_req;
442 
443 	bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
444 		BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
445 	lpbk_up_req->mh.num_entries = htons(
446 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
447 	lpbk_up_req->mode = (ethport->bna->enet.type ==
448 				BNA_ENET_T_LOOPBACK_INTERNAL) ?
449 				BFI_ENET_DIAG_LB_OPMODE_EXT :
450 				BFI_ENET_DIAG_LB_OPMODE_CBL;
451 	lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
452 
453 	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
454 		sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
455 	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
456 }
457 
458 static void
459 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
460 {
461 	struct bfi_enet_diag_lb_req *lpbk_down_req =
462 		&ethport->bfi_enet_cmd.lpbk_req;
463 
464 	bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
465 		BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
466 	lpbk_down_req->mh.num_entries = htons(
467 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
468 	lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
469 
470 	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
471 		sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
472 	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
473 }
474 
475 static void
476 bna_bfi_ethport_up(struct bna_ethport *ethport)
477 {
478 	if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
479 		bna_bfi_ethport_admin_up(ethport);
480 	else
481 		bna_bfi_ethport_lpbk_up(ethport);
482 }
483 
484 static void
485 bna_bfi_ethport_down(struct bna_ethport *ethport)
486 {
487 	if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488 		bna_bfi_ethport_admin_down(ethport);
489 	else
490 		bna_bfi_ethport_lpbk_down(ethport);
491 }
492 
493 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
494 			enum bna_ethport_event);
495 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
496 			enum bna_ethport_event);
497 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
498 			enum bna_ethport_event);
499 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
500 			enum bna_ethport_event);
501 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
502 			enum bna_ethport_event);
503 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
504 			enum bna_ethport_event);
505 
506 static void
507 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
508 {
509 	call_ethport_stop_cbfn(ethport);
510 }
511 
512 static void
513 bna_ethport_sm_stopped(struct bna_ethport *ethport,
514 			enum bna_ethport_event event)
515 {
516 	switch (event) {
517 	case ETHPORT_E_START:
518 		bfa_fsm_set_state(ethport, bna_ethport_sm_down);
519 		break;
520 
521 	case ETHPORT_E_STOP:
522 		call_ethport_stop_cbfn(ethport);
523 		break;
524 
525 	case ETHPORT_E_FAIL:
526 		/* No-op */
527 		break;
528 
529 	case ETHPORT_E_DOWN:
530 		/* This event is received due to Rx objects failing */
531 		/* No-op */
532 		break;
533 
534 	default:
535 		bfa_sm_fault(event);
536 	}
537 }
538 
539 static void
540 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
541 {
542 }
543 
544 static void
545 bna_ethport_sm_down(struct bna_ethport *ethport,
546 			enum bna_ethport_event event)
547 {
548 	switch (event) {
549 	case ETHPORT_E_STOP:
550 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
551 		break;
552 
553 	case ETHPORT_E_FAIL:
554 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
555 		break;
556 
557 	case ETHPORT_E_UP:
558 		bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
559 		bna_bfi_ethport_up(ethport);
560 		break;
561 
562 	default:
563 		bfa_sm_fault(event);
564 	}
565 }
566 
567 static void
568 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
569 {
570 }
571 
572 static void
573 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
574 			enum bna_ethport_event event)
575 {
576 	switch (event) {
577 	case ETHPORT_E_STOP:
578 		bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
579 		break;
580 
581 	case ETHPORT_E_FAIL:
582 		call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
583 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
584 		break;
585 
586 	case ETHPORT_E_DOWN:
587 		call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
588 		bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
589 		break;
590 
591 	case ETHPORT_E_FWRESP_UP_OK:
592 		call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
593 		bfa_fsm_set_state(ethport, bna_ethport_sm_up);
594 		break;
595 
596 	case ETHPORT_E_FWRESP_UP_FAIL:
597 		call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
598 		bfa_fsm_set_state(ethport, bna_ethport_sm_down);
599 		break;
600 
601 	case ETHPORT_E_FWRESP_DOWN:
602 		/* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
603 		bna_bfi_ethport_up(ethport);
604 		break;
605 
606 	default:
607 		bfa_sm_fault(event);
608 	}
609 }
610 
611 static void
612 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
613 {
614 	/**
615 	 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
616 	 * mbox due to up_resp_wait -> down_resp_wait transition on event
617 	 * ETHPORT_E_DOWN
618 	 */
619 }
620 
621 static void
622 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
623 			enum bna_ethport_event event)
624 {
625 	switch (event) {
626 	case ETHPORT_E_STOP:
627 		bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
628 		break;
629 
630 	case ETHPORT_E_FAIL:
631 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
632 		break;
633 
634 	case ETHPORT_E_UP:
635 		bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
636 		break;
637 
638 	case ETHPORT_E_FWRESP_UP_OK:
639 		/* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
640 		bna_bfi_ethport_down(ethport);
641 		break;
642 
643 	case ETHPORT_E_FWRESP_UP_FAIL:
644 	case ETHPORT_E_FWRESP_DOWN:
645 		bfa_fsm_set_state(ethport, bna_ethport_sm_down);
646 		break;
647 
648 	default:
649 		bfa_sm_fault(event);
650 	}
651 }
652 
653 static void
654 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
655 {
656 }
657 
658 static void
659 bna_ethport_sm_up(struct bna_ethport *ethport,
660 			enum bna_ethport_event event)
661 {
662 	switch (event) {
663 	case ETHPORT_E_STOP:
664 		bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
665 		bna_bfi_ethport_down(ethport);
666 		break;
667 
668 	case ETHPORT_E_FAIL:
669 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
670 		break;
671 
672 	case ETHPORT_E_DOWN:
673 		bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
674 		bna_bfi_ethport_down(ethport);
675 		break;
676 
677 	default:
678 		bfa_sm_fault(event);
679 	}
680 }
681 
682 static void
683 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
684 {
685 }
686 
687 static void
688 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
689 			enum bna_ethport_event event)
690 {
691 	switch (event) {
692 	case ETHPORT_E_FAIL:
693 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
694 		break;
695 
696 	case ETHPORT_E_DOWN:
697 		/**
698 		 * This event is received due to Rx objects stopping in
699 		 * parallel to ethport
700 		 */
701 		/* No-op */
702 		break;
703 
704 	case ETHPORT_E_FWRESP_UP_OK:
705 		/* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
706 		bna_bfi_ethport_down(ethport);
707 		break;
708 
709 	case ETHPORT_E_FWRESP_UP_FAIL:
710 	case ETHPORT_E_FWRESP_DOWN:
711 		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
712 		break;
713 
714 	default:
715 		bfa_sm_fault(event);
716 	}
717 }
718 
719 static void
720 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
721 {
722 	ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
723 	ethport->bna = bna;
724 
725 	ethport->link_status = BNA_LINK_DOWN;
726 	ethport->link_cbfn = bnad_cb_ethport_link_status;
727 
728 	ethport->rx_started_count = 0;
729 
730 	ethport->stop_cbfn = NULL;
731 	ethport->adminup_cbfn = NULL;
732 
733 	bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
734 }
735 
736 static void
737 bna_ethport_uninit(struct bna_ethport *ethport)
738 {
739 	ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
740 	ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
741 
742 	ethport->bna = NULL;
743 }
744 
745 static void
746 bna_ethport_start(struct bna_ethport *ethport)
747 {
748 	bfa_fsm_send_event(ethport, ETHPORT_E_START);
749 }
750 
751 static void
752 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
753 {
754 	bfa_wc_down(&enet->chld_stop_wc);
755 }
756 
757 static void
758 bna_ethport_stop(struct bna_ethport *ethport)
759 {
760 	ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
761 	bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
762 }
763 
764 static void
765 bna_ethport_fail(struct bna_ethport *ethport)
766 {
767 	/* Reset the physical port status to enabled */
768 	ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
769 
770 	if (ethport->link_status != BNA_LINK_DOWN) {
771 		ethport->link_status = BNA_LINK_DOWN;
772 		ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
773 	}
774 	bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
775 }
776 
777 /* Should be called only when ethport is disabled */
778 void
779 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
780 {
781 	ethport->rx_started_count++;
782 
783 	if (ethport->rx_started_count == 1) {
784 		ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
785 
786 		if (ethport_can_be_up(ethport))
787 			bfa_fsm_send_event(ethport, ETHPORT_E_UP);
788 	}
789 }
790 
791 void
792 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
793 {
794 	int ethport_up = ethport_is_up(ethport);
795 
796 	ethport->rx_started_count--;
797 
798 	if (ethport->rx_started_count == 0) {
799 		ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
800 
801 		if (ethport_up)
802 			bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
803 	}
804 }
805 
806 /* ENET */
807 
808 #define bna_enet_chld_start(enet)					\
809 do {									\
810 	enum bna_tx_type tx_type =					\
811 		((enet)->type == BNA_ENET_T_REGULAR) ?			\
812 		BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;			\
813 	enum bna_rx_type rx_type =					\
814 		((enet)->type == BNA_ENET_T_REGULAR) ?			\
815 		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
816 	bna_ethport_start(&(enet)->bna->ethport);			\
817 	bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);		\
818 	bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);		\
819 } while (0)
820 
821 #define bna_enet_chld_stop(enet)					\
822 do {									\
823 	enum bna_tx_type tx_type =					\
824 		((enet)->type == BNA_ENET_T_REGULAR) ?			\
825 		BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;			\
826 	enum bna_rx_type rx_type =					\
827 		((enet)->type == BNA_ENET_T_REGULAR) ?			\
828 		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
829 	bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
830 	bfa_wc_up(&(enet)->chld_stop_wc);				\
831 	bna_ethport_stop(&(enet)->bna->ethport);			\
832 	bfa_wc_up(&(enet)->chld_stop_wc);				\
833 	bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);			\
834 	bfa_wc_up(&(enet)->chld_stop_wc);				\
835 	bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);			\
836 	bfa_wc_wait(&(enet)->chld_stop_wc);				\
837 } while (0)
838 
839 #define bna_enet_chld_fail(enet)					\
840 do {									\
841 	bna_ethport_fail(&(enet)->bna->ethport);			\
842 	bna_tx_mod_fail(&(enet)->bna->tx_mod);				\
843 	bna_rx_mod_fail(&(enet)->bna->rx_mod);				\
844 } while (0)
845 
846 #define bna_enet_rx_start(enet)						\
847 do {									\
848 	enum bna_rx_type rx_type =					\
849 		((enet)->type == BNA_ENET_T_REGULAR) ?			\
850 		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
851 	bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);		\
852 } while (0)
853 
854 #define bna_enet_rx_stop(enet)						\
855 do {									\
856 	enum bna_rx_type rx_type =					\
857 		((enet)->type == BNA_ENET_T_REGULAR) ?			\
858 		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
859 	bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
860 	bfa_wc_up(&(enet)->chld_stop_wc);				\
861 	bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);			\
862 	bfa_wc_wait(&(enet)->chld_stop_wc);				\
863 } while (0)
864 
865 #define call_enet_stop_cbfn(enet)					\
866 do {									\
867 	if ((enet)->stop_cbfn) {					\
868 		void (*cbfn)(void *);					\
869 		void *cbarg;						\
870 		cbfn = (enet)->stop_cbfn;				\
871 		cbarg = (enet)->stop_cbarg;				\
872 		(enet)->stop_cbfn = NULL;				\
873 		(enet)->stop_cbarg = NULL;				\
874 		cbfn(cbarg);						\
875 	}								\
876 } while (0)
877 
878 #define call_enet_pause_cbfn(enet)					\
879 do {									\
880 	if ((enet)->pause_cbfn) {					\
881 		void (*cbfn)(struct bnad *);				\
882 		cbfn = (enet)->pause_cbfn;				\
883 		(enet)->pause_cbfn = NULL;				\
884 		cbfn((enet)->bna->bnad);				\
885 	}								\
886 } while (0)
887 
888 #define call_enet_mtu_cbfn(enet)					\
889 do {									\
890 	if ((enet)->mtu_cbfn) {						\
891 		void (*cbfn)(struct bnad *);				\
892 		cbfn = (enet)->mtu_cbfn;				\
893 		(enet)->mtu_cbfn = NULL;				\
894 		cbfn((enet)->bna->bnad);				\
895 	}								\
896 } while (0)
897 
898 static void bna_enet_cb_chld_stopped(void *arg);
899 static void bna_bfi_pause_set(struct bna_enet *enet);
900 
901 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
902 			enum bna_enet_event);
903 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
904 			enum bna_enet_event);
905 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
906 			enum bna_enet_event);
907 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
908 			enum bna_enet_event);
909 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
910 			enum bna_enet_event);
911 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
912 			enum bna_enet_event);
913 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
914 			enum bna_enet_event);
915 
916 static void
917 bna_enet_sm_stopped_entry(struct bna_enet *enet)
918 {
919 	call_enet_pause_cbfn(enet);
920 	call_enet_mtu_cbfn(enet);
921 	call_enet_stop_cbfn(enet);
922 }
923 
924 static void
925 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
926 {
927 	switch (event) {
928 	case ENET_E_START:
929 		bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
930 		break;
931 
932 	case ENET_E_STOP:
933 		call_enet_stop_cbfn(enet);
934 		break;
935 
936 	case ENET_E_FAIL:
937 		/* No-op */
938 		break;
939 
940 	case ENET_E_PAUSE_CFG:
941 		call_enet_pause_cbfn(enet);
942 		break;
943 
944 	case ENET_E_MTU_CFG:
945 		call_enet_mtu_cbfn(enet);
946 		break;
947 
948 	case ENET_E_CHLD_STOPPED:
949 		/**
950 		 * This event is received due to Ethport, Tx and Rx objects
951 		 * failing
952 		 */
953 		/* No-op */
954 		break;
955 
956 	default:
957 		bfa_sm_fault(event);
958 	}
959 }
960 
961 static void
962 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
963 {
964 	bna_bfi_pause_set(enet);
965 }
966 
967 static void
968 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
969 				enum bna_enet_event event)
970 {
971 	switch (event) {
972 	case ENET_E_STOP:
973 		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
974 		bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
975 		break;
976 
977 	case ENET_E_FAIL:
978 		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
979 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
980 		break;
981 
982 	case ENET_E_PAUSE_CFG:
983 		enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
984 		break;
985 
986 	case ENET_E_MTU_CFG:
987 		/* No-op */
988 		break;
989 
990 	case ENET_E_FWRESP_PAUSE:
991 		if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
992 			enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
993 			bna_bfi_pause_set(enet);
994 		} else {
995 			bfa_fsm_set_state(enet, bna_enet_sm_started);
996 			bna_enet_chld_start(enet);
997 		}
998 		break;
999 
1000 	default:
1001 		bfa_sm_fault(event);
1002 	}
1003 }
1004 
1005 static void
1006 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1007 {
1008 	enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1009 }
1010 
1011 static void
1012 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1013 				enum bna_enet_event event)
1014 {
1015 	switch (event) {
1016 	case ENET_E_FAIL:
1017 	case ENET_E_FWRESP_PAUSE:
1018 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1019 		break;
1020 
1021 	default:
1022 		bfa_sm_fault(event);
1023 	}
1024 }
1025 
1026 static void
1027 bna_enet_sm_started_entry(struct bna_enet *enet)
1028 {
1029 	/**
1030 	 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1031 	 * inadvertently called during cfg_wait->started transition as well
1032 	 */
1033 	call_enet_pause_cbfn(enet);
1034 	call_enet_mtu_cbfn(enet);
1035 }
1036 
1037 static void
1038 bna_enet_sm_started(struct bna_enet *enet,
1039 			enum bna_enet_event event)
1040 {
1041 	switch (event) {
1042 	case ENET_E_STOP:
1043 		bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1044 		break;
1045 
1046 	case ENET_E_FAIL:
1047 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1048 		bna_enet_chld_fail(enet);
1049 		break;
1050 
1051 	case ENET_E_PAUSE_CFG:
1052 		bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1053 		bna_bfi_pause_set(enet);
1054 		break;
1055 
1056 	case ENET_E_MTU_CFG:
1057 		bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1058 		bna_enet_rx_stop(enet);
1059 		break;
1060 
1061 	default:
1062 		bfa_sm_fault(event);
1063 	}
1064 }
1065 
1066 static void
1067 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1068 {
1069 }
1070 
1071 static void
1072 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1073 			enum bna_enet_event event)
1074 {
1075 	switch (event) {
1076 	case ENET_E_STOP:
1077 		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1078 		enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1079 		bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1080 		break;
1081 
1082 	case ENET_E_FAIL:
1083 		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1084 		enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1085 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1086 		bna_enet_chld_fail(enet);
1087 		break;
1088 
1089 	case ENET_E_PAUSE_CFG:
1090 		enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1091 		break;
1092 
1093 	case ENET_E_MTU_CFG:
1094 		enet->flags |= BNA_ENET_F_MTU_CHANGED;
1095 		break;
1096 
1097 	case ENET_E_CHLD_STOPPED:
1098 		bna_enet_rx_start(enet);
1099 		/* Fall through */
1100 	case ENET_E_FWRESP_PAUSE:
1101 		if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1102 			enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1103 			bna_bfi_pause_set(enet);
1104 		} else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1105 			enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1106 			bna_enet_rx_stop(enet);
1107 		} else {
1108 			bfa_fsm_set_state(enet, bna_enet_sm_started);
1109 		}
1110 		break;
1111 
1112 	default:
1113 		bfa_sm_fault(event);
1114 	}
1115 }
1116 
1117 static void
1118 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1119 {
1120 	enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1121 	enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1122 }
1123 
1124 static void
1125 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1126 				enum bna_enet_event event)
1127 {
1128 	switch (event) {
1129 	case ENET_E_FAIL:
1130 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1131 		bna_enet_chld_fail(enet);
1132 		break;
1133 
1134 	case ENET_E_FWRESP_PAUSE:
1135 	case ENET_E_CHLD_STOPPED:
1136 		bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1137 		break;
1138 
1139 	default:
1140 		bfa_sm_fault(event);
1141 	}
1142 }
1143 
1144 static void
1145 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1146 {
1147 	bna_enet_chld_stop(enet);
1148 }
1149 
1150 static void
1151 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1152 				enum bna_enet_event event)
1153 {
1154 	switch (event) {
1155 	case ENET_E_FAIL:
1156 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1157 		bna_enet_chld_fail(enet);
1158 		break;
1159 
1160 	case ENET_E_CHLD_STOPPED:
1161 		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1162 		break;
1163 
1164 	default:
1165 		bfa_sm_fault(event);
1166 	}
1167 }
1168 
1169 static void
1170 bna_bfi_pause_set(struct bna_enet *enet)
1171 {
1172 	struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1173 
1174 	bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1175 		BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1176 	pause_req->mh.num_entries = htons(
1177 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1178 	pause_req->tx_pause = enet->pause_config.tx_pause;
1179 	pause_req->rx_pause = enet->pause_config.rx_pause;
1180 
1181 	bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1182 		sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1183 	bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1184 }
1185 
1186 static void
1187 bna_enet_cb_chld_stopped(void *arg)
1188 {
1189 	struct bna_enet *enet = (struct bna_enet *)arg;
1190 
1191 	bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1192 }
1193 
1194 static void
1195 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1196 {
1197 	enet->bna = bna;
1198 	enet->flags = 0;
1199 	enet->mtu = 0;
1200 	enet->type = BNA_ENET_T_REGULAR;
1201 
1202 	enet->stop_cbfn = NULL;
1203 	enet->stop_cbarg = NULL;
1204 
1205 	enet->pause_cbfn = NULL;
1206 
1207 	enet->mtu_cbfn = NULL;
1208 
1209 	bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1210 }
1211 
1212 static void
1213 bna_enet_uninit(struct bna_enet *enet)
1214 {
1215 	enet->flags = 0;
1216 
1217 	enet->bna = NULL;
1218 }
1219 
1220 static void
1221 bna_enet_start(struct bna_enet *enet)
1222 {
1223 	enet->flags |= BNA_ENET_F_IOCETH_READY;
1224 	if (enet->flags & BNA_ENET_F_ENABLED)
1225 		bfa_fsm_send_event(enet, ENET_E_START);
1226 }
1227 
1228 static void
1229 bna_ioceth_cb_enet_stopped(void *arg)
1230 {
1231 	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1232 
1233 	bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1234 }
1235 
1236 static void
1237 bna_enet_stop(struct bna_enet *enet)
1238 {
1239 	enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1240 	enet->stop_cbarg = &enet->bna->ioceth;
1241 
1242 	enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1243 	bfa_fsm_send_event(enet, ENET_E_STOP);
1244 }
1245 
1246 static void
1247 bna_enet_fail(struct bna_enet *enet)
1248 {
1249 	enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1250 	bfa_fsm_send_event(enet, ENET_E_FAIL);
1251 }
1252 
1253 void
1254 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1255 {
1256 	bfa_wc_down(&enet->chld_stop_wc);
1257 }
1258 
1259 void
1260 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1261 {
1262 	bfa_wc_down(&enet->chld_stop_wc);
1263 }
1264 
1265 int
1266 bna_enet_mtu_get(struct bna_enet *enet)
1267 {
1268 	return enet->mtu;
1269 }
1270 
1271 void
1272 bna_enet_enable(struct bna_enet *enet)
1273 {
1274 	if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1275 		return;
1276 
1277 	enet->flags |= BNA_ENET_F_ENABLED;
1278 
1279 	if (enet->flags & BNA_ENET_F_IOCETH_READY)
1280 		bfa_fsm_send_event(enet, ENET_E_START);
1281 }
1282 
1283 void
1284 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1285 		 void (*cbfn)(void *))
1286 {
1287 	if (type == BNA_SOFT_CLEANUP) {
1288 		(*cbfn)(enet->bna->bnad);
1289 		return;
1290 	}
1291 
1292 	enet->stop_cbfn = cbfn;
1293 	enet->stop_cbarg = enet->bna->bnad;
1294 
1295 	enet->flags &= ~BNA_ENET_F_ENABLED;
1296 
1297 	bfa_fsm_send_event(enet, ENET_E_STOP);
1298 }
1299 
1300 void
1301 bna_enet_pause_config(struct bna_enet *enet,
1302 		      struct bna_pause_config *pause_config,
1303 		      void (*cbfn)(struct bnad *))
1304 {
1305 	enet->pause_config = *pause_config;
1306 
1307 	enet->pause_cbfn = cbfn;
1308 
1309 	bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1310 }
1311 
1312 void
1313 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1314 		 void (*cbfn)(struct bnad *))
1315 {
1316 	enet->mtu = mtu;
1317 
1318 	enet->mtu_cbfn = cbfn;
1319 
1320 	bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1321 }
1322 
1323 void
1324 bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1325 {
1326 	*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1327 }
1328 
1329 /* IOCETH */
1330 
1331 #define enable_mbox_intr(_ioceth)					\
1332 do {									\
1333 	u32 intr_status;						\
1334 	bna_intr_status_get((_ioceth)->bna, intr_status);		\
1335 	bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);			\
1336 	bna_mbox_intr_enable((_ioceth)->bna);				\
1337 } while (0)
1338 
1339 #define disable_mbox_intr(_ioceth)					\
1340 do {									\
1341 	bna_mbox_intr_disable((_ioceth)->bna);				\
1342 	bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);		\
1343 } while (0)
1344 
1345 #define call_ioceth_stop_cbfn(_ioceth)					\
1346 do {									\
1347 	if ((_ioceth)->stop_cbfn) {					\
1348 		void (*cbfn)(struct bnad *);				\
1349 		struct bnad *cbarg;					\
1350 		cbfn = (_ioceth)->stop_cbfn;				\
1351 		cbarg = (_ioceth)->stop_cbarg;				\
1352 		(_ioceth)->stop_cbfn = NULL;				\
1353 		(_ioceth)->stop_cbarg = NULL;				\
1354 		cbfn(cbarg);						\
1355 	}								\
1356 } while (0)
1357 
1358 #define bna_stats_mod_uninit(_stats_mod)				\
1359 do {									\
1360 } while (0)
1361 
1362 #define bna_stats_mod_start(_stats_mod)					\
1363 do {									\
1364 	(_stats_mod)->ioc_ready = true;					\
1365 } while (0)
1366 
1367 #define bna_stats_mod_stop(_stats_mod)					\
1368 do {									\
1369 	(_stats_mod)->ioc_ready = false;				\
1370 } while (0)
1371 
1372 #define bna_stats_mod_fail(_stats_mod)					\
1373 do {									\
1374 	(_stats_mod)->ioc_ready = false;				\
1375 	(_stats_mod)->stats_get_busy = false;				\
1376 	(_stats_mod)->stats_clr_busy = false;				\
1377 } while (0)
1378 
1379 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1380 
1381 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1382 			enum bna_ioceth_event);
1383 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1384 			enum bna_ioceth_event);
1385 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1386 			enum bna_ioceth_event);
1387 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1388 			enum bna_ioceth_event);
1389 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1390 			enum bna_ioceth_event);
1391 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1392 			enum bna_ioceth_event);
1393 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1394 			enum bna_ioceth_event);
1395 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1396 			enum bna_ioceth_event);
1397 
1398 static void
1399 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1400 {
1401 	call_ioceth_stop_cbfn(ioceth);
1402 }
1403 
1404 static void
1405 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1406 			enum bna_ioceth_event event)
1407 {
1408 	switch (event) {
1409 	case IOCETH_E_ENABLE:
1410 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1411 		bfa_nw_ioc_enable(&ioceth->ioc);
1412 		break;
1413 
1414 	case IOCETH_E_DISABLE:
1415 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1416 		break;
1417 
1418 	case IOCETH_E_IOC_RESET:
1419 		enable_mbox_intr(ioceth);
1420 		break;
1421 
1422 	case IOCETH_E_IOC_FAILED:
1423 		disable_mbox_intr(ioceth);
1424 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1425 		break;
1426 
1427 	default:
1428 		bfa_sm_fault(event);
1429 	}
1430 }
1431 
1432 static void
1433 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1434 {
1435 	/**
1436 	 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1437 	 * previous state due to failed -> ioc_ready_wait transition.
1438 	 */
1439 }
1440 
1441 static void
1442 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1443 				enum bna_ioceth_event event)
1444 {
1445 	switch (event) {
1446 	case IOCETH_E_DISABLE:
1447 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1448 		bfa_nw_ioc_disable(&ioceth->ioc);
1449 		break;
1450 
1451 	case IOCETH_E_IOC_RESET:
1452 		enable_mbox_intr(ioceth);
1453 		break;
1454 
1455 	case IOCETH_E_IOC_FAILED:
1456 		disable_mbox_intr(ioceth);
1457 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1458 		break;
1459 
1460 	case IOCETH_E_IOC_READY:
1461 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1462 		break;
1463 
1464 	default:
1465 		bfa_sm_fault(event);
1466 	}
1467 }
1468 
1469 static void
1470 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1471 {
1472 	bna_bfi_attr_get(ioceth);
1473 }
1474 
1475 static void
1476 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1477 				enum bna_ioceth_event event)
1478 {
1479 	switch (event) {
1480 	case IOCETH_E_DISABLE:
1481 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1482 		break;
1483 
1484 	case IOCETH_E_IOC_FAILED:
1485 		disable_mbox_intr(ioceth);
1486 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1487 		break;
1488 
1489 	case IOCETH_E_ENET_ATTR_RESP:
1490 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1491 		break;
1492 
1493 	default:
1494 		bfa_sm_fault(event);
1495 	}
1496 }
1497 
1498 static void
1499 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1500 {
1501 	bna_enet_start(&ioceth->bna->enet);
1502 	bna_stats_mod_start(&ioceth->bna->stats_mod);
1503 	bnad_cb_ioceth_ready(ioceth->bna->bnad);
1504 }
1505 
1506 static void
1507 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1508 {
1509 	switch (event) {
1510 	case IOCETH_E_DISABLE:
1511 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1512 		break;
1513 
1514 	case IOCETH_E_IOC_FAILED:
1515 		disable_mbox_intr(ioceth);
1516 		bna_enet_fail(&ioceth->bna->enet);
1517 		bna_stats_mod_fail(&ioceth->bna->stats_mod);
1518 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1519 		break;
1520 
1521 	default:
1522 		bfa_sm_fault(event);
1523 	}
1524 }
1525 
1526 static void
1527 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1528 {
1529 }
1530 
1531 static void
1532 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1533 				enum bna_ioceth_event event)
1534 {
1535 	switch (event) {
1536 	case IOCETH_E_IOC_FAILED:
1537 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1538 		disable_mbox_intr(ioceth);
1539 		bfa_nw_ioc_disable(&ioceth->ioc);
1540 		break;
1541 
1542 	case IOCETH_E_ENET_ATTR_RESP:
1543 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1544 		bfa_nw_ioc_disable(&ioceth->ioc);
1545 		break;
1546 
1547 	default:
1548 		bfa_sm_fault(event);
1549 	}
1550 }
1551 
1552 static void
1553 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1554 {
1555 	bna_stats_mod_stop(&ioceth->bna->stats_mod);
1556 	bna_enet_stop(&ioceth->bna->enet);
1557 }
1558 
1559 static void
1560 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1561 				enum bna_ioceth_event event)
1562 {
1563 	switch (event) {
1564 	case IOCETH_E_IOC_FAILED:
1565 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1566 		disable_mbox_intr(ioceth);
1567 		bna_enet_fail(&ioceth->bna->enet);
1568 		bna_stats_mod_fail(&ioceth->bna->stats_mod);
1569 		bfa_nw_ioc_disable(&ioceth->ioc);
1570 		break;
1571 
1572 	case IOCETH_E_ENET_STOPPED:
1573 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1574 		bfa_nw_ioc_disable(&ioceth->ioc);
1575 		break;
1576 
1577 	default:
1578 		bfa_sm_fault(event);
1579 	}
1580 }
1581 
1582 static void
1583 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1584 {
1585 }
1586 
1587 static void
1588 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1589 				enum bna_ioceth_event event)
1590 {
1591 	switch (event) {
1592 	case IOCETH_E_IOC_DISABLED:
1593 		disable_mbox_intr(ioceth);
1594 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1595 		break;
1596 
1597 	case IOCETH_E_ENET_STOPPED:
1598 		/* This event is received due to enet failing */
1599 		/* No-op */
1600 		break;
1601 
1602 	default:
1603 		bfa_sm_fault(event);
1604 	}
1605 }
1606 
1607 static void
1608 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1609 {
1610 	bnad_cb_ioceth_failed(ioceth->bna->bnad);
1611 }
1612 
1613 static void
1614 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1615 			enum bna_ioceth_event event)
1616 {
1617 	switch (event) {
1618 	case IOCETH_E_DISABLE:
1619 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1620 		bfa_nw_ioc_disable(&ioceth->ioc);
1621 		break;
1622 
1623 	case IOCETH_E_IOC_RESET:
1624 		enable_mbox_intr(ioceth);
1625 		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1626 		break;
1627 
1628 	case IOCETH_E_IOC_FAILED:
1629 		break;
1630 
1631 	default:
1632 		bfa_sm_fault(event);
1633 	}
1634 }
1635 
1636 static void
1637 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1638 {
1639 	struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1640 
1641 	bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1642 		BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1643 	attr_req->mh.num_entries = htons(
1644 	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1645 	bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1646 		sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1647 	bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1648 }
1649 
1650 /* IOC callback functions */
1651 
1652 static void
1653 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1654 {
1655 	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1656 
1657 	if (error)
1658 		bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1659 	else
1660 		bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1661 }
1662 
1663 static void
1664 bna_cb_ioceth_disable(void *arg)
1665 {
1666 	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1667 
1668 	bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1669 }
1670 
1671 static void
1672 bna_cb_ioceth_hbfail(void *arg)
1673 {
1674 	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1675 
1676 	bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1677 }
1678 
1679 static void
1680 bna_cb_ioceth_reset(void *arg)
1681 {
1682 	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1683 
1684 	bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1685 }
1686 
1687 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1688 	bna_cb_ioceth_enable,
1689 	bna_cb_ioceth_disable,
1690 	bna_cb_ioceth_hbfail,
1691 	bna_cb_ioceth_reset
1692 };
1693 
1694 static void bna_attr_init(struct bna_ioceth *ioceth)
1695 {
1696 	ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1697 	ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1698 	ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1699 	ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1700 	ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1701 	ioceth->attr.fw_query_complete = false;
1702 }
1703 
1704 static void
1705 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1706 		struct bna_res_info *res_info)
1707 {
1708 	u64 dma;
1709 	u8 *kva;
1710 
1711 	ioceth->bna = bna;
1712 
1713 	/**
1714 	 * Attach IOC and claim:
1715 	 *	1. DMA memory for IOC attributes
1716 	 *	2. Kernel memory for FW trace
1717 	 */
1718 	bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1719 	bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1720 
1721 	BNA_GET_DMA_ADDR(
1722 		&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1723 	kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1724 	bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1725 
1726 	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1727 	bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1728 
1729 	/**
1730 	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1731 	 * DMA memory.
1732 	 */
1733 	BNA_GET_DMA_ADDR(
1734 		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1735 	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1736 	bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1737 	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1738 	kva += bfa_nw_cee_meminfo();
1739 	dma += bfa_nw_cee_meminfo();
1740 
1741 	bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1742 	bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1743 	kva += bfa_nw_flash_meminfo();
1744 	dma += bfa_nw_flash_meminfo();
1745 
1746 	bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1747 	bfa_msgq_memclaim(&bna->msgq, kva, dma);
1748 	bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1749 	kva += bfa_msgq_meminfo();
1750 	dma += bfa_msgq_meminfo();
1751 
1752 	ioceth->stop_cbfn = NULL;
1753 	ioceth->stop_cbarg = NULL;
1754 
1755 	bna_attr_init(ioceth);
1756 
1757 	bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1758 }
1759 
1760 static void
1761 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1762 {
1763 	bfa_nw_ioc_detach(&ioceth->ioc);
1764 
1765 	ioceth->bna = NULL;
1766 }
1767 
1768 void
1769 bna_ioceth_enable(struct bna_ioceth *ioceth)
1770 {
1771 	if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1772 		bnad_cb_ioceth_ready(ioceth->bna->bnad);
1773 		return;
1774 	}
1775 
1776 	if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1777 		bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1778 }
1779 
1780 void
1781 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1782 {
1783 	if (type == BNA_SOFT_CLEANUP) {
1784 		bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1785 		return;
1786 	}
1787 
1788 	ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1789 	ioceth->stop_cbarg = ioceth->bna->bnad;
1790 
1791 	bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1792 }
1793 
1794 static void
1795 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1796 		  struct bna_res_info *res_info)
1797 {
1798 	int i;
1799 
1800 	ucam_mod->ucmac = (struct bna_mac *)
1801 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1802 
1803 	INIT_LIST_HEAD(&ucam_mod->free_q);
1804 	for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1805 		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1806 		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1807 	}
1808 
1809 	ucam_mod->bna = bna;
1810 }
1811 
1812 static void
1813 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1814 {
1815 	struct list_head *qe;
1816 	int i = 0;
1817 
1818 	list_for_each(qe, &ucam_mod->free_q)
1819 		i++;
1820 
1821 	ucam_mod->bna = NULL;
1822 }
1823 
1824 static void
1825 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1826 		  struct bna_res_info *res_info)
1827 {
1828 	int i;
1829 
1830 	mcam_mod->mcmac = (struct bna_mac *)
1831 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1832 
1833 	INIT_LIST_HEAD(&mcam_mod->free_q);
1834 	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1835 		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1836 		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1837 	}
1838 
1839 	mcam_mod->mchandle = (struct bna_mcam_handle *)
1840 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1841 
1842 	INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1843 	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1844 		bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1845 		list_add_tail(&mcam_mod->mchandle[i].qe,
1846 				&mcam_mod->free_handle_q);
1847 	}
1848 
1849 	mcam_mod->bna = bna;
1850 }
1851 
1852 static void
1853 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1854 {
1855 	struct list_head *qe;
1856 	int i;
1857 
1858 	i = 0;
1859 	list_for_each(qe, &mcam_mod->free_q) i++;
1860 
1861 	i = 0;
1862 	list_for_each(qe, &mcam_mod->free_handle_q) i++;
1863 
1864 	mcam_mod->bna = NULL;
1865 }
1866 
1867 static void
1868 bna_bfi_stats_get(struct bna *bna)
1869 {
1870 	struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1871 
1872 	bna->stats_mod.stats_get_busy = true;
1873 
1874 	bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1875 		BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1876 	stats_req->mh.num_entries = htons(
1877 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1878 	stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1879 	stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1880 	stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1881 	stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1882 	stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1883 
1884 	bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1885 		sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1886 	bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1887 }
1888 
1889 void
1890 bna_res_req(struct bna_res_info *res_info)
1891 {
1892 	/* DMA memory for COMMON_MODULE */
1893 	res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1894 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1895 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1896 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1897 				(bfa_nw_cee_meminfo() +
1898 				 bfa_nw_flash_meminfo() +
1899 				 bfa_msgq_meminfo()), PAGE_SIZE);
1900 
1901 	/* DMA memory for retrieving IOC attributes */
1902 	res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1903 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1904 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1905 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1906 				ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1907 
1908 	/* Virtual memory for retreiving fw_trc */
1909 	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1910 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1911 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1912 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1913 
1914 	/* DMA memory for retreiving stats */
1915 	res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1916 	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1917 	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1918 	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1919 				ALIGN(sizeof(struct bfi_enet_stats),
1920 					PAGE_SIZE);
1921 }
1922 
1923 void
1924 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1925 {
1926 	struct bna_attr *attr = &bna->ioceth.attr;
1927 
1928 	/* Virtual memory for Tx objects - stored by Tx module */
1929 	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1930 	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1931 		BNA_MEM_T_KVA;
1932 	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1933 	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1934 		attr->num_txq * sizeof(struct bna_tx);
1935 
1936 	/* Virtual memory for TxQ - stored by Tx module */
1937 	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1938 	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1939 		BNA_MEM_T_KVA;
1940 	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1941 	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1942 		attr->num_txq * sizeof(struct bna_txq);
1943 
1944 	/* Virtual memory for Rx objects - stored by Rx module */
1945 	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1946 	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1947 		BNA_MEM_T_KVA;
1948 	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1949 	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1950 		attr->num_rxp * sizeof(struct bna_rx);
1951 
1952 	/* Virtual memory for RxPath - stored by Rx module */
1953 	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1954 	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1955 		BNA_MEM_T_KVA;
1956 	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1957 	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1958 		attr->num_rxp * sizeof(struct bna_rxp);
1959 
1960 	/* Virtual memory for RxQ - stored by Rx module */
1961 	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1962 	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1963 		BNA_MEM_T_KVA;
1964 	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1965 	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1966 		(attr->num_rxp * 2) * sizeof(struct bna_rxq);
1967 
1968 	/* Virtual memory for Unicast MAC address - stored by ucam module */
1969 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1970 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1971 		BNA_MEM_T_KVA;
1972 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1973 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1974 		attr->num_ucmac * sizeof(struct bna_mac);
1975 
1976 	/* Virtual memory for Multicast MAC address - stored by mcam module */
1977 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1978 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1979 		BNA_MEM_T_KVA;
1980 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1981 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1982 		attr->num_mcmac * sizeof(struct bna_mac);
1983 
1984 	/* Virtual memory for Multicast handle - stored by mcam module */
1985 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1986 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1987 		BNA_MEM_T_KVA;
1988 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1989 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1990 		attr->num_mcmac * sizeof(struct bna_mcam_handle);
1991 }
1992 
1993 void
1994 bna_init(struct bna *bna, struct bnad *bnad,
1995 		struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1996 {
1997 	bna->bnad = bnad;
1998 	bna->pcidev = *pcidev;
1999 
2000 	bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2001 		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2002 	bna->stats.hw_stats_dma.msb =
2003 		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2004 	bna->stats.hw_stats_dma.lsb =
2005 		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2006 
2007 	bna_reg_addr_init(bna, &bna->pcidev);
2008 
2009 	/* Also initializes diag, cee, sfp, phy_port, msgq */
2010 	bna_ioceth_init(&bna->ioceth, bna, res_info);
2011 
2012 	bna_enet_init(&bna->enet, bna);
2013 	bna_ethport_init(&bna->ethport, bna);
2014 }
2015 
2016 void
2017 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2018 {
2019 	bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2020 
2021 	bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2022 
2023 	bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2024 
2025 	bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2026 
2027 	bna->default_mode_rid = BFI_INVALID_RID;
2028 	bna->promisc_rid = BFI_INVALID_RID;
2029 
2030 	bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2031 }
2032 
2033 void
2034 bna_uninit(struct bna *bna)
2035 {
2036 	if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2037 		bna_mcam_mod_uninit(&bna->mcam_mod);
2038 		bna_ucam_mod_uninit(&bna->ucam_mod);
2039 		bna_rx_mod_uninit(&bna->rx_mod);
2040 		bna_tx_mod_uninit(&bna->tx_mod);
2041 		bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2042 	}
2043 
2044 	bna_stats_mod_uninit(&bna->stats_mod);
2045 	bna_ethport_uninit(&bna->ethport);
2046 	bna_enet_uninit(&bna->enet);
2047 
2048 	bna_ioceth_uninit(&bna->ioceth);
2049 
2050 	bna->bnad = NULL;
2051 }
2052 
2053 int
2054 bna_num_txq_set(struct bna *bna, int num_txq)
2055 {
2056 	if (bna->ioceth.attr.fw_query_complete &&
2057 		(num_txq <= bna->ioceth.attr.num_txq)) {
2058 		bna->ioceth.attr.num_txq = num_txq;
2059 		return BNA_CB_SUCCESS;
2060 	}
2061 
2062 	return BNA_CB_FAIL;
2063 }
2064 
2065 int
2066 bna_num_rxp_set(struct bna *bna, int num_rxp)
2067 {
2068 	if (bna->ioceth.attr.fw_query_complete &&
2069 		(num_rxp <= bna->ioceth.attr.num_rxp)) {
2070 		bna->ioceth.attr.num_rxp = num_rxp;
2071 		return BNA_CB_SUCCESS;
2072 	}
2073 
2074 	return BNA_CB_FAIL;
2075 }
2076 
2077 struct bna_mac *
2078 bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2079 {
2080 	struct list_head *qe;
2081 
2082 	if (list_empty(&ucam_mod->free_q))
2083 		return NULL;
2084 
2085 	bfa_q_deq(&ucam_mod->free_q, &qe);
2086 
2087 	return (struct bna_mac *)qe;
2088 }
2089 
2090 void
2091 bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2092 {
2093 	list_add_tail(&mac->qe, &ucam_mod->free_q);
2094 }
2095 
2096 struct bna_mac *
2097 bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2098 {
2099 	struct list_head *qe;
2100 
2101 	if (list_empty(&mcam_mod->free_q))
2102 		return NULL;
2103 
2104 	bfa_q_deq(&mcam_mod->free_q, &qe);
2105 
2106 	return (struct bna_mac *)qe;
2107 }
2108 
2109 void
2110 bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2111 {
2112 	list_add_tail(&mac->qe, &mcam_mod->free_q);
2113 }
2114 
2115 struct bna_mcam_handle *
2116 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2117 {
2118 	struct list_head *qe;
2119 
2120 	if (list_empty(&mcam_mod->free_handle_q))
2121 		return NULL;
2122 
2123 	bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2124 
2125 	return (struct bna_mcam_handle *)qe;
2126 }
2127 
2128 void
2129 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2130 			struct bna_mcam_handle *handle)
2131 {
2132 	list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2133 }
2134 
2135 void
2136 bna_hw_stats_get(struct bna *bna)
2137 {
2138 	if (!bna->stats_mod.ioc_ready) {
2139 		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2140 		return;
2141 	}
2142 	if (bna->stats_mod.stats_get_busy) {
2143 		bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2144 		return;
2145 	}
2146 
2147 	bna_bfi_stats_get(bna);
2148 }
2149