xref: /openbmc/linux/drivers/scsi/bfa/bfa_ioc.c (revision 7490ca1e)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 #include "bfa_defs_svc.h"
24 
25 BFA_TRC_FILE(CNA, IOC);
26 
27 /*
28  * IOC local definitions
29  */
30 #define BFA_IOC_TOV		3000	/* msecs */
31 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
32 #define BFA_IOC_HB_TOV		500	/* msecs */
33 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
35 
36 #define bfa_ioc_timer_start(__ioc)					\
37 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
38 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
40 
41 #define bfa_hb_timer_start(__ioc)					\
42 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
43 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
45 
46 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
47 
48 /*
49  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50  */
51 
52 #define bfa_ioc_firmware_lock(__ioc)			\
53 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc)			\
55 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc)              \
59 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc)               \
61 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc)                \
63 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc)               \
65 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc)                 \
67 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc)            \
69 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70 
71 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
72 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74 
75 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
76 
77 /*
78  * forward declarations
79  */
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82 static void bfa_ioc_timeout(void *ioc);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
93 				enum bfa_ioc_event_e event);
94 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
99 
100 
101 /*
102  * IOC state machine definitions/declarations
103  */
104 enum ioc_event {
105 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
106 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
107 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
108 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
109 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
110 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
111 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
112 	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
113 	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
114 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
115 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
116 	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
117 	IOC_E_FWRSP_ACQ_ADDR	= 13,	/*  Acquiring address		*/
118 };
119 
120 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
131 
132 static struct bfa_sm_table_s ioc_sm_table[] = {
133 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
134 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
135 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
136 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
137 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
138 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
139 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
140 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
141 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
142 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
143 	{BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
144 };
145 
146 /*
147  * IOCPF state machine definitions/declarations
148  */
149 
150 #define bfa_iocpf_timer_start(__ioc)					\
151 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
152 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
153 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
154 
155 #define bfa_iocpf_poll_timer_start(__ioc)				\
156 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
157 			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
158 
159 #define bfa_sem_timer_start(__ioc)					\
160 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
161 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
162 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
163 
164 /*
165  * Forward declareations for iocpf state machine
166  */
167 static void bfa_iocpf_timeout(void *ioc_arg);
168 static void bfa_iocpf_sem_timeout(void *ioc_arg);
169 static void bfa_iocpf_poll_timeout(void *ioc_arg);
170 
171 /*
172  * IOCPF state machine events
173  */
174 enum iocpf_event {
175 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
176 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
177 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
178 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
179 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
180 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
181 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
182 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
183 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
184 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
185 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
186 	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
187 };
188 
189 /*
190  * IOCPF states
191  */
192 enum bfa_iocpf_state {
193 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
194 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
195 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
196 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
197 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
198 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
199 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
200 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
201 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
202 };
203 
204 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
212 						enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
217 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
218 						enum iocpf_event);
219 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
220 
221 static struct bfa_sm_table_s iocpf_sm_table[] = {
222 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
223 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
224 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
225 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
226 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
229 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
230 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
231 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
232 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
233 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
234 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
235 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
236 };
237 
238 /*
239  * IOC State Machine
240  */
241 
242 /*
243  * Beginning state. IOC uninit state.
244  */
245 
246 static void
247 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
248 {
249 }
250 
251 /*
252  * IOC is in uninit state.
253  */
254 static void
255 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
256 {
257 	bfa_trc(ioc, event);
258 
259 	switch (event) {
260 	case IOC_E_RESET:
261 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
262 		break;
263 
264 	default:
265 		bfa_sm_fault(ioc, event);
266 	}
267 }
268 /*
269  * Reset entry actions -- initialize state machine
270  */
271 static void
272 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
273 {
274 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
275 }
276 
277 /*
278  * IOC is in reset state.
279  */
280 static void
281 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
282 {
283 	bfa_trc(ioc, event);
284 
285 	switch (event) {
286 	case IOC_E_ENABLE:
287 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
288 		break;
289 
290 	case IOC_E_DISABLE:
291 		bfa_ioc_disable_comp(ioc);
292 		break;
293 
294 	case IOC_E_DETACH:
295 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
296 		break;
297 
298 	default:
299 		bfa_sm_fault(ioc, event);
300 	}
301 }
302 
303 
304 static void
305 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
306 {
307 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
308 }
309 
310 /*
311  * Host IOC function is being enabled, awaiting response from firmware.
312  * Semaphore is acquired.
313  */
314 static void
315 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
316 {
317 	bfa_trc(ioc, event);
318 
319 	switch (event) {
320 	case IOC_E_ENABLED:
321 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
322 		break;
323 
324 	case IOC_E_PFFAILED:
325 		/* !!! fall through !!! */
326 	case IOC_E_HWERROR:
327 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
328 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
329 		if (event != IOC_E_PFFAILED)
330 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
331 		break;
332 
333 	case IOC_E_HWFAILED:
334 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
335 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
336 		break;
337 
338 	case IOC_E_DISABLE:
339 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
340 		break;
341 
342 	case IOC_E_DETACH:
343 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
344 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
345 		break;
346 
347 	case IOC_E_ENABLE:
348 		break;
349 
350 	default:
351 		bfa_sm_fault(ioc, event);
352 	}
353 }
354 
355 
356 static void
357 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
358 {
359 	bfa_ioc_timer_start(ioc);
360 	bfa_ioc_send_getattr(ioc);
361 }
362 
363 /*
364  * IOC configuration in progress. Timer is active.
365  */
366 static void
367 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
368 {
369 	bfa_trc(ioc, event);
370 
371 	switch (event) {
372 	case IOC_E_FWRSP_GETATTR:
373 		bfa_ioc_timer_stop(ioc);
374 		bfa_ioc_check_attr_wwns(ioc);
375 		bfa_ioc_hb_monitor(ioc);
376 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
377 		break;
378 
379 	case IOC_E_FWRSP_ACQ_ADDR:
380 		bfa_ioc_timer_stop(ioc);
381 		bfa_ioc_hb_monitor(ioc);
382 		bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
383 		break;
384 
385 	case IOC_E_PFFAILED:
386 	case IOC_E_HWERROR:
387 		bfa_ioc_timer_stop(ioc);
388 		/* !!! fall through !!! */
389 	case IOC_E_TIMEOUT:
390 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
391 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
392 		if (event != IOC_E_PFFAILED)
393 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
394 		break;
395 
396 	case IOC_E_DISABLE:
397 		bfa_ioc_timer_stop(ioc);
398 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
399 		break;
400 
401 	case IOC_E_ENABLE:
402 		break;
403 
404 	default:
405 		bfa_sm_fault(ioc, event);
406 	}
407 }
408 
409 /*
410  * Acquiring address from fabric (entry function)
411  */
412 static void
413 bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
414 {
415 }
416 
417 /*
418  *	Acquiring address from the fabric
419  */
420 static void
421 bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
422 {
423 	bfa_trc(ioc, event);
424 
425 	switch (event) {
426 	case IOC_E_FWRSP_GETATTR:
427 		bfa_ioc_check_attr_wwns(ioc);
428 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
429 		break;
430 
431 	case IOC_E_PFFAILED:
432 	case IOC_E_HWERROR:
433 		bfa_hb_timer_stop(ioc);
434 	case IOC_E_HBFAIL:
435 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
436 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
437 		if (event != IOC_E_PFFAILED)
438 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
439 		break;
440 
441 	case IOC_E_DISABLE:
442 		bfa_hb_timer_stop(ioc);
443 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
444 		break;
445 
446 	case IOC_E_ENABLE:
447 		break;
448 
449 	default:
450 		bfa_sm_fault(ioc, event);
451 	}
452 }
453 
454 static void
455 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
456 {
457 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
458 
459 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
460 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
461 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
462 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
463 }
464 
465 static void
466 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
467 {
468 	bfa_trc(ioc, event);
469 
470 	switch (event) {
471 	case IOC_E_ENABLE:
472 		break;
473 
474 	case IOC_E_DISABLE:
475 		bfa_hb_timer_stop(ioc);
476 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
477 		break;
478 
479 	case IOC_E_PFFAILED:
480 	case IOC_E_HWERROR:
481 		bfa_hb_timer_stop(ioc);
482 		/* !!! fall through !!! */
483 	case IOC_E_HBFAIL:
484 		if (ioc->iocpf.auto_recover)
485 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
486 		else
487 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
488 
489 		bfa_ioc_fail_notify(ioc);
490 
491 		if (event != IOC_E_PFFAILED)
492 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
493 		break;
494 
495 	default:
496 		bfa_sm_fault(ioc, event);
497 	}
498 }
499 
500 
501 static void
502 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
503 {
504 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
505 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
506 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
507 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
508 }
509 
510 /*
511  * IOC is being disabled
512  */
513 static void
514 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
515 {
516 	bfa_trc(ioc, event);
517 
518 	switch (event) {
519 	case IOC_E_DISABLED:
520 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
521 		break;
522 
523 	case IOC_E_HWERROR:
524 		/*
525 		 * No state change.  Will move to disabled state
526 		 * after iocpf sm completes failure processing and
527 		 * moves to disabled state.
528 		 */
529 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
530 		break;
531 
532 	case IOC_E_HWFAILED:
533 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
534 		bfa_ioc_disable_comp(ioc);
535 		break;
536 
537 	default:
538 		bfa_sm_fault(ioc, event);
539 	}
540 }
541 
542 /*
543  * IOC disable completion entry.
544  */
545 static void
546 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
547 {
548 	bfa_ioc_disable_comp(ioc);
549 }
550 
551 static void
552 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
553 {
554 	bfa_trc(ioc, event);
555 
556 	switch (event) {
557 	case IOC_E_ENABLE:
558 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
559 		break;
560 
561 	case IOC_E_DISABLE:
562 		ioc->cbfn->disable_cbfn(ioc->bfa);
563 		break;
564 
565 	case IOC_E_DETACH:
566 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
567 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
568 		break;
569 
570 	default:
571 		bfa_sm_fault(ioc, event);
572 	}
573 }
574 
575 
576 static void
577 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
578 {
579 	bfa_trc(ioc, 0);
580 }
581 
582 /*
583  * Hardware initialization retry.
584  */
585 static void
586 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
587 {
588 	bfa_trc(ioc, event);
589 
590 	switch (event) {
591 	case IOC_E_ENABLED:
592 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
593 		break;
594 
595 	case IOC_E_PFFAILED:
596 	case IOC_E_HWERROR:
597 		/*
598 		 * Initialization retry failed.
599 		 */
600 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
601 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
602 		if (event != IOC_E_PFFAILED)
603 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
604 		break;
605 
606 	case IOC_E_HWFAILED:
607 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
608 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
609 		break;
610 
611 	case IOC_E_ENABLE:
612 		break;
613 
614 	case IOC_E_DISABLE:
615 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
616 		break;
617 
618 	case IOC_E_DETACH:
619 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
620 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
621 		break;
622 
623 	default:
624 		bfa_sm_fault(ioc, event);
625 	}
626 }
627 
628 
629 static void
630 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
631 {
632 	bfa_trc(ioc, 0);
633 }
634 
635 /*
636  * IOC failure.
637  */
638 static void
639 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
640 {
641 	bfa_trc(ioc, event);
642 
643 	switch (event) {
644 
645 	case IOC_E_ENABLE:
646 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
647 		break;
648 
649 	case IOC_E_DISABLE:
650 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
651 		break;
652 
653 	case IOC_E_DETACH:
654 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
655 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
656 		break;
657 
658 	case IOC_E_HWERROR:
659 		/*
660 		 * HB failure notification, ignore.
661 		 */
662 		break;
663 	default:
664 		bfa_sm_fault(ioc, event);
665 	}
666 }
667 
668 static void
669 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
670 {
671 	bfa_trc(ioc, 0);
672 }
673 
674 static void
675 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
676 {
677 	bfa_trc(ioc, event);
678 
679 	switch (event) {
680 	case IOC_E_ENABLE:
681 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
682 		break;
683 
684 	case IOC_E_DISABLE:
685 		ioc->cbfn->disable_cbfn(ioc->bfa);
686 		break;
687 
688 	case IOC_E_DETACH:
689 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
690 		break;
691 
692 	default:
693 		bfa_sm_fault(ioc, event);
694 	}
695 }
696 
697 /*
698  * IOCPF State Machine
699  */
700 
701 /*
702  * Reset entry actions -- initialize state machine
703  */
704 static void
705 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
706 {
707 	iocpf->fw_mismatch_notified = BFA_FALSE;
708 	iocpf->auto_recover = bfa_auto_recover;
709 }
710 
711 /*
712  * Beginning state. IOC is in reset state.
713  */
714 static void
715 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
716 {
717 	struct bfa_ioc_s *ioc = iocpf->ioc;
718 
719 	bfa_trc(ioc, event);
720 
721 	switch (event) {
722 	case IOCPF_E_ENABLE:
723 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
724 		break;
725 
726 	case IOCPF_E_STOP:
727 		break;
728 
729 	default:
730 		bfa_sm_fault(ioc, event);
731 	}
732 }
733 
734 /*
735  * Semaphore should be acquired for version check.
736  */
737 static void
738 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
739 {
740 	struct bfi_ioc_image_hdr_s	fwhdr;
741 	u32	fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
742 
743 	/* h/w sem init */
744 	if (fwstate == BFI_IOC_UNINIT)
745 		goto sem_get;
746 
747 	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
748 
749 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
750 		goto sem_get;
751 
752 	bfa_trc(iocpf->ioc, fwstate);
753 	bfa_trc(iocpf->ioc, fwhdr.exec);
754 	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
755 
756 	/*
757 	 * Try to lock and then unlock the semaphore.
758 	 */
759 	readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
760 	writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
761 sem_get:
762 	bfa_ioc_hw_sem_get(iocpf->ioc);
763 }
764 
765 /*
766  * Awaiting h/w semaphore to continue with version check.
767  */
768 static void
769 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
770 {
771 	struct bfa_ioc_s *ioc = iocpf->ioc;
772 
773 	bfa_trc(ioc, event);
774 
775 	switch (event) {
776 	case IOCPF_E_SEMLOCKED:
777 		if (bfa_ioc_firmware_lock(ioc)) {
778 			if (bfa_ioc_sync_start(ioc)) {
779 				bfa_ioc_sync_join(ioc);
780 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
781 			} else {
782 				bfa_ioc_firmware_unlock(ioc);
783 				writel(1, ioc->ioc_regs.ioc_sem_reg);
784 				bfa_sem_timer_start(ioc);
785 			}
786 		} else {
787 			writel(1, ioc->ioc_regs.ioc_sem_reg);
788 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
789 		}
790 		break;
791 
792 	case IOCPF_E_SEM_ERROR:
793 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
794 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
795 		break;
796 
797 	case IOCPF_E_DISABLE:
798 		bfa_sem_timer_stop(ioc);
799 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
800 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
801 		break;
802 
803 	case IOCPF_E_STOP:
804 		bfa_sem_timer_stop(ioc);
805 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
806 		break;
807 
808 	default:
809 		bfa_sm_fault(ioc, event);
810 	}
811 }
812 
813 /*
814  * Notify enable completion callback.
815  */
816 static void
817 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
818 {
819 	/*
820 	 * Call only the first time sm enters fwmismatch state.
821 	 */
822 	if (iocpf->fw_mismatch_notified == BFA_FALSE)
823 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
824 
825 	iocpf->fw_mismatch_notified = BFA_TRUE;
826 	bfa_iocpf_timer_start(iocpf->ioc);
827 }
828 
829 /*
830  * Awaiting firmware version match.
831  */
832 static void
833 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
834 {
835 	struct bfa_ioc_s *ioc = iocpf->ioc;
836 
837 	bfa_trc(ioc, event);
838 
839 	switch (event) {
840 	case IOCPF_E_TIMEOUT:
841 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
842 		break;
843 
844 	case IOCPF_E_DISABLE:
845 		bfa_iocpf_timer_stop(ioc);
846 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
847 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
848 		break;
849 
850 	case IOCPF_E_STOP:
851 		bfa_iocpf_timer_stop(ioc);
852 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
853 		break;
854 
855 	default:
856 		bfa_sm_fault(ioc, event);
857 	}
858 }
859 
860 /*
861  * Request for semaphore.
862  */
863 static void
864 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
865 {
866 	bfa_ioc_hw_sem_get(iocpf->ioc);
867 }
868 
869 /*
870  * Awaiting semaphore for h/w initialzation.
871  */
872 static void
873 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
874 {
875 	struct bfa_ioc_s *ioc = iocpf->ioc;
876 
877 	bfa_trc(ioc, event);
878 
879 	switch (event) {
880 	case IOCPF_E_SEMLOCKED:
881 		if (bfa_ioc_sync_complete(ioc)) {
882 			bfa_ioc_sync_join(ioc);
883 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
884 		} else {
885 			writel(1, ioc->ioc_regs.ioc_sem_reg);
886 			bfa_sem_timer_start(ioc);
887 		}
888 		break;
889 
890 	case IOCPF_E_SEM_ERROR:
891 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
892 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
893 		break;
894 
895 	case IOCPF_E_DISABLE:
896 		bfa_sem_timer_stop(ioc);
897 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
898 		break;
899 
900 	default:
901 		bfa_sm_fault(ioc, event);
902 	}
903 }
904 
905 static void
906 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
907 {
908 	iocpf->poll_time = 0;
909 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
910 }
911 
912 /*
913  * Hardware is being initialized. Interrupts are enabled.
914  * Holding hardware semaphore lock.
915  */
916 static void
917 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
918 {
919 	struct bfa_ioc_s *ioc = iocpf->ioc;
920 
921 	bfa_trc(ioc, event);
922 
923 	switch (event) {
924 	case IOCPF_E_FWREADY:
925 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
926 		break;
927 
928 	case IOCPF_E_TIMEOUT:
929 		writel(1, ioc->ioc_regs.ioc_sem_reg);
930 		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
931 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
932 		break;
933 
934 	case IOCPF_E_DISABLE:
935 		bfa_iocpf_timer_stop(ioc);
936 		bfa_ioc_sync_leave(ioc);
937 		writel(1, ioc->ioc_regs.ioc_sem_reg);
938 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
939 		break;
940 
941 	default:
942 		bfa_sm_fault(ioc, event);
943 	}
944 }
945 
946 static void
947 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
948 {
949 	bfa_iocpf_timer_start(iocpf->ioc);
950 	/*
951 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
952 	 */
953 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
954 	bfa_ioc_send_enable(iocpf->ioc);
955 }
956 
957 /*
958  * Host IOC function is being enabled, awaiting response from firmware.
959  * Semaphore is acquired.
960  */
961 static void
962 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
963 {
964 	struct bfa_ioc_s *ioc = iocpf->ioc;
965 
966 	bfa_trc(ioc, event);
967 
968 	switch (event) {
969 	case IOCPF_E_FWRSP_ENABLE:
970 		bfa_iocpf_timer_stop(ioc);
971 		writel(1, ioc->ioc_regs.ioc_sem_reg);
972 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
973 		break;
974 
975 	case IOCPF_E_INITFAIL:
976 		bfa_iocpf_timer_stop(ioc);
977 		/*
978 		 * !!! fall through !!!
979 		 */
980 
981 	case IOCPF_E_TIMEOUT:
982 		writel(1, ioc->ioc_regs.ioc_sem_reg);
983 		if (event == IOCPF_E_TIMEOUT)
984 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
985 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
986 		break;
987 
988 	case IOCPF_E_DISABLE:
989 		bfa_iocpf_timer_stop(ioc);
990 		writel(1, ioc->ioc_regs.ioc_sem_reg);
991 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
992 		break;
993 
994 	default:
995 		bfa_sm_fault(ioc, event);
996 	}
997 }
998 
999 static void
1000 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1001 {
1002 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1003 }
1004 
1005 static void
1006 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1007 {
1008 	struct bfa_ioc_s *ioc = iocpf->ioc;
1009 
1010 	bfa_trc(ioc, event);
1011 
1012 	switch (event) {
1013 	case IOCPF_E_DISABLE:
1014 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1015 		break;
1016 
1017 	case IOCPF_E_GETATTRFAIL:
1018 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1019 		break;
1020 
1021 	case IOCPF_E_FAIL:
1022 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1023 		break;
1024 
1025 	default:
1026 		bfa_sm_fault(ioc, event);
1027 	}
1028 }
1029 
1030 static void
1031 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1032 {
1033 	bfa_iocpf_timer_start(iocpf->ioc);
1034 	bfa_ioc_send_disable(iocpf->ioc);
1035 }
1036 
1037 /*
1038  * IOC is being disabled
1039  */
1040 static void
1041 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1042 {
1043 	struct bfa_ioc_s *ioc = iocpf->ioc;
1044 
1045 	bfa_trc(ioc, event);
1046 
1047 	switch (event) {
1048 	case IOCPF_E_FWRSP_DISABLE:
1049 		bfa_iocpf_timer_stop(ioc);
1050 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1051 		break;
1052 
1053 	case IOCPF_E_FAIL:
1054 		bfa_iocpf_timer_stop(ioc);
1055 		/*
1056 		 * !!! fall through !!!
1057 		 */
1058 
1059 	case IOCPF_E_TIMEOUT:
1060 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1061 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1062 		break;
1063 
1064 	case IOCPF_E_FWRSP_ENABLE:
1065 		break;
1066 
1067 	default:
1068 		bfa_sm_fault(ioc, event);
1069 	}
1070 }
1071 
1072 static void
1073 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1074 {
1075 	bfa_ioc_hw_sem_get(iocpf->ioc);
1076 }
1077 
1078 /*
1079  * IOC hb ack request is being removed.
1080  */
1081 static void
1082 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1083 {
1084 	struct bfa_ioc_s *ioc = iocpf->ioc;
1085 
1086 	bfa_trc(ioc, event);
1087 
1088 	switch (event) {
1089 	case IOCPF_E_SEMLOCKED:
1090 		bfa_ioc_sync_leave(ioc);
1091 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1092 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1093 		break;
1094 
1095 	case IOCPF_E_SEM_ERROR:
1096 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1097 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1098 		break;
1099 
1100 	case IOCPF_E_FAIL:
1101 		break;
1102 
1103 	default:
1104 		bfa_sm_fault(ioc, event);
1105 	}
1106 }
1107 
1108 /*
1109  * IOC disable completion entry.
1110  */
1111 static void
1112 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1113 {
1114 	bfa_ioc_mbox_flush(iocpf->ioc);
1115 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1116 }
1117 
1118 static void
1119 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1120 {
1121 	struct bfa_ioc_s *ioc = iocpf->ioc;
1122 
1123 	bfa_trc(ioc, event);
1124 
1125 	switch (event) {
1126 	case IOCPF_E_ENABLE:
1127 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1128 		break;
1129 
1130 	case IOCPF_E_STOP:
1131 		bfa_ioc_firmware_unlock(ioc);
1132 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1133 		break;
1134 
1135 	default:
1136 		bfa_sm_fault(ioc, event);
1137 	}
1138 }
1139 
1140 static void
1141 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1142 {
1143 	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1144 	bfa_ioc_hw_sem_get(iocpf->ioc);
1145 }
1146 
1147 /*
1148  * Hardware initialization failed.
1149  */
1150 static void
1151 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1152 {
1153 	struct bfa_ioc_s *ioc = iocpf->ioc;
1154 
1155 	bfa_trc(ioc, event);
1156 
1157 	switch (event) {
1158 	case IOCPF_E_SEMLOCKED:
1159 		bfa_ioc_notify_fail(ioc);
1160 		bfa_ioc_sync_leave(ioc);
1161 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1162 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1163 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1164 		break;
1165 
1166 	case IOCPF_E_SEM_ERROR:
1167 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1168 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1169 		break;
1170 
1171 	case IOCPF_E_DISABLE:
1172 		bfa_sem_timer_stop(ioc);
1173 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1174 		break;
1175 
1176 	case IOCPF_E_STOP:
1177 		bfa_sem_timer_stop(ioc);
1178 		bfa_ioc_firmware_unlock(ioc);
1179 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1180 		break;
1181 
1182 	case IOCPF_E_FAIL:
1183 		break;
1184 
1185 	default:
1186 		bfa_sm_fault(ioc, event);
1187 	}
1188 }
1189 
1190 static void
1191 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1192 {
1193 	bfa_trc(iocpf->ioc, 0);
1194 }
1195 
1196 /*
1197  * Hardware initialization failed.
1198  */
1199 static void
1200 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1201 {
1202 	struct bfa_ioc_s *ioc = iocpf->ioc;
1203 
1204 	bfa_trc(ioc, event);
1205 
1206 	switch (event) {
1207 	case IOCPF_E_DISABLE:
1208 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1209 		break;
1210 
1211 	case IOCPF_E_STOP:
1212 		bfa_ioc_firmware_unlock(ioc);
1213 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1214 		break;
1215 
1216 	default:
1217 		bfa_sm_fault(ioc, event);
1218 	}
1219 }
1220 
1221 static void
1222 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1223 {
1224 	/*
1225 	 * Mark IOC as failed in hardware and stop firmware.
1226 	 */
1227 	bfa_ioc_lpu_stop(iocpf->ioc);
1228 
1229 	/*
1230 	 * Flush any queued up mailbox requests.
1231 	 */
1232 	bfa_ioc_mbox_flush(iocpf->ioc);
1233 
1234 	bfa_ioc_hw_sem_get(iocpf->ioc);
1235 }
1236 
1237 static void
1238 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1239 {
1240 	struct bfa_ioc_s *ioc = iocpf->ioc;
1241 
1242 	bfa_trc(ioc, event);
1243 
1244 	switch (event) {
1245 	case IOCPF_E_SEMLOCKED:
1246 		bfa_ioc_sync_ack(ioc);
1247 		bfa_ioc_notify_fail(ioc);
1248 		if (!iocpf->auto_recover) {
1249 			bfa_ioc_sync_leave(ioc);
1250 			writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1251 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1252 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1253 		} else {
1254 			if (bfa_ioc_sync_complete(ioc))
1255 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1256 			else {
1257 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1258 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1259 			}
1260 		}
1261 		break;
1262 
1263 	case IOCPF_E_SEM_ERROR:
1264 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1265 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1266 		break;
1267 
1268 	case IOCPF_E_DISABLE:
1269 		bfa_sem_timer_stop(ioc);
1270 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1271 		break;
1272 
1273 	case IOCPF_E_FAIL:
1274 		break;
1275 
1276 	default:
1277 		bfa_sm_fault(ioc, event);
1278 	}
1279 }
1280 
1281 static void
1282 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1283 {
1284 	bfa_trc(iocpf->ioc, 0);
1285 }
1286 
1287 /*
1288  * IOC is in failed state.
1289  */
1290 static void
1291 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1292 {
1293 	struct bfa_ioc_s *ioc = iocpf->ioc;
1294 
1295 	bfa_trc(ioc, event);
1296 
1297 	switch (event) {
1298 	case IOCPF_E_DISABLE:
1299 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1300 		break;
1301 
1302 	default:
1303 		bfa_sm_fault(ioc, event);
1304 	}
1305 }
1306 
1307 /*
1308  *  BFA IOC private functions
1309  */
1310 
1311 /*
1312  * Notify common modules registered for notification.
1313  */
1314 static void
1315 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1316 {
1317 	struct bfa_ioc_notify_s	*notify;
1318 	struct list_head	*qe;
1319 
1320 	list_for_each(qe, &ioc->notify_q) {
1321 		notify = (struct bfa_ioc_notify_s *)qe;
1322 		notify->cbfn(notify->cbarg, event);
1323 	}
1324 }
1325 
1326 static void
1327 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1328 {
1329 	ioc->cbfn->disable_cbfn(ioc->bfa);
1330 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1331 }
1332 
1333 bfa_boolean_t
1334 bfa_ioc_sem_get(void __iomem *sem_reg)
1335 {
1336 	u32 r32;
1337 	int cnt = 0;
1338 #define BFA_SEM_SPINCNT	3000
1339 
1340 	r32 = readl(sem_reg);
1341 
1342 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1343 		cnt++;
1344 		udelay(2);
1345 		r32 = readl(sem_reg);
1346 	}
1347 
1348 	if (!(r32 & 1))
1349 		return BFA_TRUE;
1350 
1351 	return BFA_FALSE;
1352 }
1353 
1354 static void
1355 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1356 {
1357 	u32	r32;
1358 
1359 	/*
1360 	 * First read to the semaphore register will return 0, subsequent reads
1361 	 * will return 1. Semaphore is released by writing 1 to the register
1362 	 */
1363 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1364 	if (r32 == ~0) {
1365 		WARN_ON(r32 == ~0);
1366 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1367 		return;
1368 	}
1369 	if (!(r32 & 1)) {
1370 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1371 		return;
1372 	}
1373 
1374 	bfa_sem_timer_start(ioc);
1375 }
1376 
1377 /*
1378  * Initialize LPU local memory (aka secondary memory / SRAM)
1379  */
1380 static void
1381 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1382 {
1383 	u32	pss_ctl;
1384 	int		i;
1385 #define PSS_LMEM_INIT_TIME  10000
1386 
1387 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1388 	pss_ctl &= ~__PSS_LMEM_RESET;
1389 	pss_ctl |= __PSS_LMEM_INIT_EN;
1390 
1391 	/*
1392 	 * i2c workaround 12.5khz clock
1393 	 */
1394 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1395 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1396 
1397 	/*
1398 	 * wait for memory initialization to be complete
1399 	 */
1400 	i = 0;
1401 	do {
1402 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1403 		i++;
1404 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1405 
1406 	/*
1407 	 * If memory initialization is not successful, IOC timeout will catch
1408 	 * such failures.
1409 	 */
1410 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1411 	bfa_trc(ioc, pss_ctl);
1412 
1413 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1414 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1415 }
1416 
1417 static void
1418 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1419 {
1420 	u32	pss_ctl;
1421 
1422 	/*
1423 	 * Take processor out of reset.
1424 	 */
1425 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1426 	pss_ctl &= ~__PSS_LPU0_RESET;
1427 
1428 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1429 }
1430 
1431 static void
1432 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1433 {
1434 	u32	pss_ctl;
1435 
1436 	/*
1437 	 * Put processors in reset.
1438 	 */
1439 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1440 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1441 
1442 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1443 }
1444 
1445 /*
1446  * Get driver and firmware versions.
1447  */
1448 void
1449 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1450 {
1451 	u32	pgnum, pgoff;
1452 	u32	loff = 0;
1453 	int		i;
1454 	u32	*fwsig = (u32 *) fwhdr;
1455 
1456 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1457 	pgoff = PSS_SMEM_PGOFF(loff);
1458 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1459 
1460 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1461 	     i++) {
1462 		fwsig[i] =
1463 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1464 		loff += sizeof(u32);
1465 	}
1466 }
1467 
1468 /*
1469  * Returns TRUE if same.
1470  */
1471 bfa_boolean_t
1472 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1473 {
1474 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1475 	int i;
1476 
1477 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1478 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1479 
1480 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1481 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1482 			bfa_trc(ioc, i);
1483 			bfa_trc(ioc, fwhdr->md5sum[i]);
1484 			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1485 			return BFA_FALSE;
1486 		}
1487 	}
1488 
1489 	bfa_trc(ioc, fwhdr->md5sum[0]);
1490 	return BFA_TRUE;
1491 }
1492 
1493 /*
1494  * Return true if current running version is valid. Firmware signature and
1495  * execution context (driver/bios) must match.
1496  */
1497 static bfa_boolean_t
1498 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1499 {
1500 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1501 
1502 	bfa_ioc_fwver_get(ioc, &fwhdr);
1503 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1504 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1505 
1506 	if (fwhdr.signature != drv_fwhdr->signature) {
1507 		bfa_trc(ioc, fwhdr.signature);
1508 		bfa_trc(ioc, drv_fwhdr->signature);
1509 		return BFA_FALSE;
1510 	}
1511 
1512 	if (swab32(fwhdr.bootenv) != boot_env) {
1513 		bfa_trc(ioc, fwhdr.bootenv);
1514 		bfa_trc(ioc, boot_env);
1515 		return BFA_FALSE;
1516 	}
1517 
1518 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1519 }
1520 
1521 /*
1522  * Conditionally flush any pending message from firmware at start.
1523  */
1524 static void
1525 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1526 {
1527 	u32	r32;
1528 
1529 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1530 	if (r32)
1531 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1532 }
1533 
1534 static void
1535 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1536 {
1537 	enum bfi_ioc_state ioc_fwstate;
1538 	bfa_boolean_t fwvalid;
1539 	u32 boot_type;
1540 	u32 boot_env;
1541 
1542 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1543 
1544 	if (force)
1545 		ioc_fwstate = BFI_IOC_UNINIT;
1546 
1547 	bfa_trc(ioc, ioc_fwstate);
1548 
1549 	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1550 	boot_env = BFI_FWBOOT_ENV_OS;
1551 
1552 	/*
1553 	 * check if firmware is valid
1554 	 */
1555 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1556 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1557 
1558 	if (!fwvalid) {
1559 		bfa_ioc_boot(ioc, boot_type, boot_env);
1560 		bfa_ioc_poll_fwinit(ioc);
1561 		return;
1562 	}
1563 
1564 	/*
1565 	 * If hardware initialization is in progress (initialized by other IOC),
1566 	 * just wait for an initialization completion interrupt.
1567 	 */
1568 	if (ioc_fwstate == BFI_IOC_INITING) {
1569 		bfa_ioc_poll_fwinit(ioc);
1570 		return;
1571 	}
1572 
1573 	/*
1574 	 * If IOC function is disabled and firmware version is same,
1575 	 * just re-enable IOC.
1576 	 *
1577 	 * If option rom, IOC must not be in operational state. With
1578 	 * convergence, IOC will be in operational state when 2nd driver
1579 	 * is loaded.
1580 	 */
1581 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1582 
1583 		/*
1584 		 * When using MSI-X any pending firmware ready event should
1585 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1586 		 */
1587 		bfa_ioc_msgflush(ioc);
1588 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1589 		return;
1590 	}
1591 
1592 	/*
1593 	 * Initialize the h/w for any other states.
1594 	 */
1595 	bfa_ioc_boot(ioc, boot_type, boot_env);
1596 	bfa_ioc_poll_fwinit(ioc);
1597 }
1598 
1599 static void
1600 bfa_ioc_timeout(void *ioc_arg)
1601 {
1602 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1603 
1604 	bfa_trc(ioc, 0);
1605 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1606 }
1607 
1608 void
1609 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1610 {
1611 	u32 *msgp = (u32 *) ioc_msg;
1612 	u32 i;
1613 
1614 	bfa_trc(ioc, msgp[0]);
1615 	bfa_trc(ioc, len);
1616 
1617 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1618 
1619 	/*
1620 	 * first write msg to mailbox registers
1621 	 */
1622 	for (i = 0; i < len / sizeof(u32); i++)
1623 		writel(cpu_to_le32(msgp[i]),
1624 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1625 
1626 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1627 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1628 
1629 	/*
1630 	 * write 1 to mailbox CMD to trigger LPU event
1631 	 */
1632 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1633 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1634 }
1635 
1636 static void
1637 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1638 {
1639 	struct bfi_ioc_ctrl_req_s enable_req;
1640 	struct timeval tv;
1641 
1642 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1643 		    bfa_ioc_portid(ioc));
1644 	enable_req.clscode = cpu_to_be16(ioc->clscode);
1645 	do_gettimeofday(&tv);
1646 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1647 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1648 }
1649 
1650 static void
1651 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1652 {
1653 	struct bfi_ioc_ctrl_req_s disable_req;
1654 
1655 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1656 		    bfa_ioc_portid(ioc));
1657 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1658 }
1659 
1660 static void
1661 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1662 {
1663 	struct bfi_ioc_getattr_req_s	attr_req;
1664 
1665 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1666 		    bfa_ioc_portid(ioc));
1667 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1668 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1669 }
1670 
1671 static void
1672 bfa_ioc_hb_check(void *cbarg)
1673 {
1674 	struct bfa_ioc_s  *ioc = cbarg;
1675 	u32	hb_count;
1676 
1677 	hb_count = readl(ioc->ioc_regs.heartbeat);
1678 	if (ioc->hb_count == hb_count) {
1679 		bfa_ioc_recover(ioc);
1680 		return;
1681 	} else {
1682 		ioc->hb_count = hb_count;
1683 	}
1684 
1685 	bfa_ioc_mbox_poll(ioc);
1686 	bfa_hb_timer_start(ioc);
1687 }
1688 
1689 static void
1690 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1691 {
1692 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1693 	bfa_hb_timer_start(ioc);
1694 }
1695 
1696 /*
1697  *	Initiate a full firmware download.
1698  */
1699 static void
1700 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1701 		    u32 boot_env)
1702 {
1703 	u32 *fwimg;
1704 	u32 pgnum, pgoff;
1705 	u32 loff = 0;
1706 	u32 chunkno = 0;
1707 	u32 i;
1708 	u32 asicmode;
1709 
1710 	/*
1711 	 * Initialize LMEM first before code download
1712 	 */
1713 	bfa_ioc_lmem_init(ioc);
1714 
1715 	bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1716 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1717 
1718 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1719 	pgoff = PSS_SMEM_PGOFF(loff);
1720 
1721 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1722 
1723 	for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1724 
1725 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1726 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1727 			fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1728 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1729 		}
1730 
1731 		/*
1732 		 * write smem
1733 		 */
1734 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1735 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1736 
1737 		loff += sizeof(u32);
1738 
1739 		/*
1740 		 * handle page offset wrap around
1741 		 */
1742 		loff = PSS_SMEM_PGOFF(loff);
1743 		if (loff == 0) {
1744 			pgnum++;
1745 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1746 		}
1747 	}
1748 
1749 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1750 			ioc->ioc_regs.host_page_num_fn);
1751 
1752 	/*
1753 	 * Set boot type and device mode at the end.
1754 	 */
1755 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1756 				ioc->port0_mode, ioc->port1_mode);
1757 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1758 			swab32(asicmode));
1759 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1760 			swab32(boot_type));
1761 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1762 			swab32(boot_env));
1763 }
1764 
1765 
1766 /*
1767  * Update BFA configuration from firmware configuration.
1768  */
1769 static void
1770 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1771 {
1772 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1773 
1774 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1775 	attr->card_type     = be32_to_cpu(attr->card_type);
1776 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1777 	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1778 
1779 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1780 }
1781 
1782 /*
1783  * Attach time initialization of mbox logic.
1784  */
1785 static void
1786 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1787 {
1788 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1789 	int	mc;
1790 
1791 	INIT_LIST_HEAD(&mod->cmd_q);
1792 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1793 		mod->mbhdlr[mc].cbfn = NULL;
1794 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1795 	}
1796 }
1797 
1798 /*
1799  * Mbox poll timer -- restarts any pending mailbox requests.
1800  */
1801 static void
1802 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1803 {
1804 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1805 	struct bfa_mbox_cmd_s		*cmd;
1806 	u32			stat;
1807 
1808 	/*
1809 	 * If no command pending, do nothing
1810 	 */
1811 	if (list_empty(&mod->cmd_q))
1812 		return;
1813 
1814 	/*
1815 	 * If previous command is not yet fetched by firmware, do nothing
1816 	 */
1817 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1818 	if (stat)
1819 		return;
1820 
1821 	/*
1822 	 * Enqueue command to firmware.
1823 	 */
1824 	bfa_q_deq(&mod->cmd_q, &cmd);
1825 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1826 }
1827 
1828 /*
1829  * Cleanup any pending requests.
1830  */
1831 static void
1832 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1833 {
1834 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1835 	struct bfa_mbox_cmd_s		*cmd;
1836 
1837 	while (!list_empty(&mod->cmd_q))
1838 		bfa_q_deq(&mod->cmd_q, &cmd);
1839 }
1840 
1841 /*
1842  * Read data from SMEM to host through PCI memmap
1843  *
1844  * @param[in]	ioc	memory for IOC
1845  * @param[in]	tbuf	app memory to store data from smem
1846  * @param[in]	soff	smem offset
1847  * @param[in]	sz	size of smem in bytes
1848  */
1849 static bfa_status_t
1850 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1851 {
1852 	u32 pgnum, loff;
1853 	__be32 r32;
1854 	int i, len;
1855 	u32 *buf = tbuf;
1856 
1857 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1858 	loff = PSS_SMEM_PGOFF(soff);
1859 	bfa_trc(ioc, pgnum);
1860 	bfa_trc(ioc, loff);
1861 	bfa_trc(ioc, sz);
1862 
1863 	/*
1864 	 *  Hold semaphore to serialize pll init and fwtrc.
1865 	 */
1866 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1867 		bfa_trc(ioc, 0);
1868 		return BFA_STATUS_FAILED;
1869 	}
1870 
1871 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1872 
1873 	len = sz/sizeof(u32);
1874 	bfa_trc(ioc, len);
1875 	for (i = 0; i < len; i++) {
1876 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1877 		buf[i] = be32_to_cpu(r32);
1878 		loff += sizeof(u32);
1879 
1880 		/*
1881 		 * handle page offset wrap around
1882 		 */
1883 		loff = PSS_SMEM_PGOFF(loff);
1884 		if (loff == 0) {
1885 			pgnum++;
1886 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1887 		}
1888 	}
1889 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1890 			ioc->ioc_regs.host_page_num_fn);
1891 	/*
1892 	 *  release semaphore.
1893 	 */
1894 	readl(ioc->ioc_regs.ioc_init_sem_reg);
1895 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1896 
1897 	bfa_trc(ioc, pgnum);
1898 	return BFA_STATUS_OK;
1899 }
1900 
1901 /*
1902  * Clear SMEM data from host through PCI memmap
1903  *
1904  * @param[in]	ioc	memory for IOC
1905  * @param[in]	soff	smem offset
1906  * @param[in]	sz	size of smem in bytes
1907  */
1908 static bfa_status_t
1909 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1910 {
1911 	int i, len;
1912 	u32 pgnum, loff;
1913 
1914 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1915 	loff = PSS_SMEM_PGOFF(soff);
1916 	bfa_trc(ioc, pgnum);
1917 	bfa_trc(ioc, loff);
1918 	bfa_trc(ioc, sz);
1919 
1920 	/*
1921 	 *  Hold semaphore to serialize pll init and fwtrc.
1922 	 */
1923 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1924 		bfa_trc(ioc, 0);
1925 		return BFA_STATUS_FAILED;
1926 	}
1927 
1928 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1929 
1930 	len = sz/sizeof(u32); /* len in words */
1931 	bfa_trc(ioc, len);
1932 	for (i = 0; i < len; i++) {
1933 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1934 		loff += sizeof(u32);
1935 
1936 		/*
1937 		 * handle page offset wrap around
1938 		 */
1939 		loff = PSS_SMEM_PGOFF(loff);
1940 		if (loff == 0) {
1941 			pgnum++;
1942 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1943 		}
1944 	}
1945 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1946 			ioc->ioc_regs.host_page_num_fn);
1947 
1948 	/*
1949 	 *  release semaphore.
1950 	 */
1951 	readl(ioc->ioc_regs.ioc_init_sem_reg);
1952 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1953 	bfa_trc(ioc, pgnum);
1954 	return BFA_STATUS_OK;
1955 }
1956 
1957 static void
1958 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1959 {
1960 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1961 
1962 	/*
1963 	 * Notify driver and common modules registered for notification.
1964 	 */
1965 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
1966 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1967 
1968 	bfa_ioc_debug_save_ftrc(ioc);
1969 
1970 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1971 		"Heart Beat of IOC has failed\n");
1972 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1973 
1974 }
1975 
1976 static void
1977 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1978 {
1979 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1980 	/*
1981 	 * Provide enable completion callback.
1982 	 */
1983 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1984 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1985 		"Running firmware version is incompatible "
1986 		"with the driver version\n");
1987 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1988 }
1989 
1990 bfa_status_t
1991 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1992 {
1993 
1994 	/*
1995 	 *  Hold semaphore so that nobody can access the chip during init.
1996 	 */
1997 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1998 
1999 	bfa_ioc_pll_init_asic(ioc);
2000 
2001 	ioc->pllinit = BFA_TRUE;
2002 	/*
2003 	 *  release semaphore.
2004 	 */
2005 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2006 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2007 
2008 	return BFA_STATUS_OK;
2009 }
2010 
2011 /*
2012  * Interface used by diag module to do firmware boot with memory test
2013  * as the entry vector.
2014  */
2015 void
2016 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2017 {
2018 	bfa_ioc_stats(ioc, ioc_boots);
2019 
2020 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2021 		return;
2022 
2023 	/*
2024 	 * Initialize IOC state of all functions on a chip reset.
2025 	 */
2026 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2027 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2028 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2029 	} else {
2030 		writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2031 		writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2032 	}
2033 
2034 	bfa_ioc_msgflush(ioc);
2035 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
2036 	bfa_ioc_lpu_start(ioc);
2037 }
2038 
2039 /*
2040  * Enable/disable IOC failure auto recovery.
2041  */
2042 void
2043 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2044 {
2045 	bfa_auto_recover = auto_recover;
2046 }
2047 
2048 
2049 
2050 bfa_boolean_t
2051 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2052 {
2053 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2054 }
2055 
2056 bfa_boolean_t
2057 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2058 {
2059 	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2060 
2061 	return ((r32 != BFI_IOC_UNINIT) &&
2062 		(r32 != BFI_IOC_INITING) &&
2063 		(r32 != BFI_IOC_MEMTEST));
2064 }
2065 
2066 bfa_boolean_t
2067 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2068 {
2069 	__be32	*msgp = mbmsg;
2070 	u32	r32;
2071 	int		i;
2072 
2073 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2074 	if ((r32 & 1) == 0)
2075 		return BFA_FALSE;
2076 
2077 	/*
2078 	 * read the MBOX msg
2079 	 */
2080 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2081 	     i++) {
2082 		r32 = readl(ioc->ioc_regs.lpu_mbox +
2083 				   i * sizeof(u32));
2084 		msgp[i] = cpu_to_be32(r32);
2085 	}
2086 
2087 	/*
2088 	 * turn off mailbox interrupt by clearing mailbox status
2089 	 */
2090 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2091 	readl(ioc->ioc_regs.lpu_mbox_cmd);
2092 
2093 	return BFA_TRUE;
2094 }
2095 
2096 void
2097 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2098 {
2099 	union bfi_ioc_i2h_msg_u	*msg;
2100 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2101 
2102 	msg = (union bfi_ioc_i2h_msg_u *) m;
2103 
2104 	bfa_ioc_stats(ioc, ioc_isrs);
2105 
2106 	switch (msg->mh.msg_id) {
2107 	case BFI_IOC_I2H_HBEAT:
2108 		break;
2109 
2110 	case BFI_IOC_I2H_ENABLE_REPLY:
2111 		ioc->port_mode = ioc->port_mode_cfg =
2112 				(enum bfa_mode_s)msg->fw_event.port_mode;
2113 		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2114 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2115 		break;
2116 
2117 	case BFI_IOC_I2H_DISABLE_REPLY:
2118 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2119 		break;
2120 
2121 	case BFI_IOC_I2H_GETATTR_REPLY:
2122 		bfa_ioc_getattr_reply(ioc);
2123 		break;
2124 
2125 	case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2126 		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2127 		break;
2128 
2129 	default:
2130 		bfa_trc(ioc, msg->mh.msg_id);
2131 		WARN_ON(1);
2132 	}
2133 }
2134 
2135 /*
2136  * IOC attach time initialization and setup.
2137  *
2138  * @param[in]	ioc	memory for IOC
2139  * @param[in]	bfa	driver instance structure
2140  */
2141 void
2142 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2143 	       struct bfa_timer_mod_s *timer_mod)
2144 {
2145 	ioc->bfa	= bfa;
2146 	ioc->cbfn	= cbfn;
2147 	ioc->timer_mod	= timer_mod;
2148 	ioc->fcmode	= BFA_FALSE;
2149 	ioc->pllinit	= BFA_FALSE;
2150 	ioc->dbg_fwsave_once = BFA_TRUE;
2151 	ioc->iocpf.ioc	= ioc;
2152 
2153 	bfa_ioc_mbox_attach(ioc);
2154 	INIT_LIST_HEAD(&ioc->notify_q);
2155 
2156 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2157 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2158 }
2159 
2160 /*
2161  * Driver detach time IOC cleanup.
2162  */
2163 void
2164 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2165 {
2166 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2167 	INIT_LIST_HEAD(&ioc->notify_q);
2168 }
2169 
2170 /*
2171  * Setup IOC PCI properties.
2172  *
2173  * @param[in]	pcidev	PCI device information for this IOC
2174  */
2175 void
2176 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2177 		enum bfi_pcifn_class clscode)
2178 {
2179 	ioc->clscode	= clscode;
2180 	ioc->pcidev	= *pcidev;
2181 
2182 	/*
2183 	 * Initialize IOC and device personality
2184 	 */
2185 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2186 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2187 
2188 	switch (pcidev->device_id) {
2189 	case BFA_PCI_DEVICE_ID_FC_8G1P:
2190 	case BFA_PCI_DEVICE_ID_FC_8G2P:
2191 		ioc->asic_gen = BFI_ASIC_GEN_CB;
2192 		ioc->fcmode = BFA_TRUE;
2193 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2194 		ioc->ad_cap_bm = BFA_CM_HBA;
2195 		break;
2196 
2197 	case BFA_PCI_DEVICE_ID_CT:
2198 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2199 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2200 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2201 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2202 		ioc->ad_cap_bm = BFA_CM_CNA;
2203 		break;
2204 
2205 	case BFA_PCI_DEVICE_ID_CT_FC:
2206 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2207 		ioc->fcmode = BFA_TRUE;
2208 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2209 		ioc->ad_cap_bm = BFA_CM_HBA;
2210 		break;
2211 
2212 	case BFA_PCI_DEVICE_ID_CT2:
2213 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2214 		if (clscode == BFI_PCIFN_CLASS_FC &&
2215 		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2216 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2217 			ioc->fcmode = BFA_TRUE;
2218 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2219 			ioc->ad_cap_bm = BFA_CM_HBA;
2220 		} else {
2221 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2222 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2223 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2224 				ioc->port_mode =
2225 				ioc->port_mode_cfg = BFA_MODE_CNA;
2226 				ioc->ad_cap_bm = BFA_CM_CNA;
2227 			} else {
2228 				ioc->port_mode =
2229 				ioc->port_mode_cfg = BFA_MODE_NIC;
2230 				ioc->ad_cap_bm = BFA_CM_NIC;
2231 			}
2232 		}
2233 		break;
2234 
2235 	default:
2236 		WARN_ON(1);
2237 	}
2238 
2239 	/*
2240 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2241 	 */
2242 	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2243 		bfa_ioc_set_cb_hwif(ioc);
2244 	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2245 		bfa_ioc_set_ct_hwif(ioc);
2246 	else {
2247 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2248 		bfa_ioc_set_ct2_hwif(ioc);
2249 		bfa_ioc_ct2_poweron(ioc);
2250 	}
2251 
2252 	bfa_ioc_map_port(ioc);
2253 	bfa_ioc_reg_init(ioc);
2254 }
2255 
2256 /*
2257  * Initialize IOC dma memory
2258  *
2259  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2260  * @param[in]	dm_pa	physical address of IOC dma memory
2261  */
2262 void
2263 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2264 {
2265 	/*
2266 	 * dma memory for firmware attribute
2267 	 */
2268 	ioc->attr_dma.kva = dm_kva;
2269 	ioc->attr_dma.pa = dm_pa;
2270 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2271 }
2272 
2273 void
2274 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2275 {
2276 	bfa_ioc_stats(ioc, ioc_enables);
2277 	ioc->dbg_fwsave_once = BFA_TRUE;
2278 
2279 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2280 }
2281 
2282 void
2283 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2284 {
2285 	bfa_ioc_stats(ioc, ioc_disables);
2286 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2287 }
2288 
2289 
2290 /*
2291  * Initialize memory for saving firmware trace. Driver must initialize
2292  * trace memory before call bfa_ioc_enable().
2293  */
2294 void
2295 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2296 {
2297 	ioc->dbg_fwsave	    = dbg_fwsave;
2298 	ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2299 }
2300 
2301 /*
2302  * Register mailbox message handler functions
2303  *
2304  * @param[in]	ioc		IOC instance
2305  * @param[in]	mcfuncs		message class handler functions
2306  */
2307 void
2308 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2309 {
2310 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2311 	int				mc;
2312 
2313 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2314 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2315 }
2316 
2317 /*
2318  * Register mailbox message handler function, to be called by common modules
2319  */
2320 void
2321 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2322 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2323 {
2324 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2325 
2326 	mod->mbhdlr[mc].cbfn	= cbfn;
2327 	mod->mbhdlr[mc].cbarg	= cbarg;
2328 }
2329 
2330 /*
2331  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2332  * Responsibility of caller to serialize
2333  *
2334  * @param[in]	ioc	IOC instance
2335  * @param[i]	cmd	Mailbox command
2336  */
2337 void
2338 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2339 {
2340 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2341 	u32			stat;
2342 
2343 	/*
2344 	 * If a previous command is pending, queue new command
2345 	 */
2346 	if (!list_empty(&mod->cmd_q)) {
2347 		list_add_tail(&cmd->qe, &mod->cmd_q);
2348 		return;
2349 	}
2350 
2351 	/*
2352 	 * If mailbox is busy, queue command for poll timer
2353 	 */
2354 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2355 	if (stat) {
2356 		list_add_tail(&cmd->qe, &mod->cmd_q);
2357 		return;
2358 	}
2359 
2360 	/*
2361 	 * mailbox is free -- queue command to firmware
2362 	 */
2363 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2364 }
2365 
2366 /*
2367  * Handle mailbox interrupts
2368  */
2369 void
2370 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2371 {
2372 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2373 	struct bfi_mbmsg_s		m;
2374 	int				mc;
2375 
2376 	if (bfa_ioc_msgget(ioc, &m)) {
2377 		/*
2378 		 * Treat IOC message class as special.
2379 		 */
2380 		mc = m.mh.msg_class;
2381 		if (mc == BFI_MC_IOC) {
2382 			bfa_ioc_isr(ioc, &m);
2383 			return;
2384 		}
2385 
2386 		if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2387 			return;
2388 
2389 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2390 	}
2391 
2392 	bfa_ioc_lpu_read_stat(ioc);
2393 
2394 	/*
2395 	 * Try to send pending mailbox commands
2396 	 */
2397 	bfa_ioc_mbox_poll(ioc);
2398 }
2399 
2400 void
2401 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2402 {
2403 	bfa_ioc_stats(ioc, ioc_hbfails);
2404 	ioc->stats.hb_count = ioc->hb_count;
2405 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2406 }
2407 
2408 /*
2409  * return true if IOC is disabled
2410  */
2411 bfa_boolean_t
2412 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2413 {
2414 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2415 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2416 }
2417 
2418 /*
2419  * Return TRUE if IOC is in acquiring address state
2420  */
2421 bfa_boolean_t
2422 bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2423 {
2424 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2425 }
2426 
2427 /*
2428  * return true if IOC firmware is different.
2429  */
2430 bfa_boolean_t
2431 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2432 {
2433 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2434 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2435 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2436 }
2437 
2438 #define bfa_ioc_state_disabled(__sm)		\
2439 	(((__sm) == BFI_IOC_UNINIT) ||		\
2440 	 ((__sm) == BFI_IOC_INITING) ||		\
2441 	 ((__sm) == BFI_IOC_HWINIT) ||		\
2442 	 ((__sm) == BFI_IOC_DISABLED) ||	\
2443 	 ((__sm) == BFI_IOC_FAIL) ||		\
2444 	 ((__sm) == BFI_IOC_CFG_DISABLED))
2445 
2446 /*
2447  * Check if adapter is disabled -- both IOCs should be in a disabled
2448  * state.
2449  */
2450 bfa_boolean_t
2451 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2452 {
2453 	u32	ioc_state;
2454 
2455 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2456 		return BFA_FALSE;
2457 
2458 	ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2459 	if (!bfa_ioc_state_disabled(ioc_state))
2460 		return BFA_FALSE;
2461 
2462 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2463 		ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2464 		if (!bfa_ioc_state_disabled(ioc_state))
2465 			return BFA_FALSE;
2466 	}
2467 
2468 	return BFA_TRUE;
2469 }
2470 
2471 /*
2472  * Reset IOC fwstate registers.
2473  */
2474 void
2475 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2476 {
2477 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2478 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2479 }
2480 
2481 #define BFA_MFG_NAME "Brocade"
2482 void
2483 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2484 			 struct bfa_adapter_attr_s *ad_attr)
2485 {
2486 	struct bfi_ioc_attr_s	*ioc_attr;
2487 
2488 	ioc_attr = ioc->attr;
2489 
2490 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2491 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2492 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2493 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2494 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2495 		      sizeof(struct bfa_mfg_vpd_s));
2496 
2497 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2498 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2499 
2500 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2501 	/* For now, model descr uses same model string */
2502 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2503 
2504 	ad_attr->card_type = ioc_attr->card_type;
2505 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2506 
2507 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2508 		ad_attr->prototype = 1;
2509 	else
2510 		ad_attr->prototype = 0;
2511 
2512 	ad_attr->pwwn = ioc->attr->pwwn;
2513 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2514 
2515 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2516 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2517 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2518 	ad_attr->asic_rev = ioc_attr->asic_rev;
2519 
2520 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2521 
2522 	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2523 	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2524 				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2525 }
2526 
2527 enum bfa_ioc_type_e
2528 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2529 {
2530 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2531 		return BFA_IOC_TYPE_LL;
2532 
2533 	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2534 
2535 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2536 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2537 }
2538 
2539 void
2540 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2541 {
2542 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2543 	memcpy((void *)serial_num,
2544 			(void *)ioc->attr->brcd_serialnum,
2545 			BFA_ADAPTER_SERIAL_NUM_LEN);
2546 }
2547 
2548 void
2549 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2550 {
2551 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2552 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2553 }
2554 
2555 void
2556 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2557 {
2558 	WARN_ON(!chip_rev);
2559 
2560 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2561 
2562 	chip_rev[0] = 'R';
2563 	chip_rev[1] = 'e';
2564 	chip_rev[2] = 'v';
2565 	chip_rev[3] = '-';
2566 	chip_rev[4] = ioc->attr->asic_rev;
2567 	chip_rev[5] = '\0';
2568 }
2569 
2570 void
2571 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2572 {
2573 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2574 	memcpy(optrom_ver, ioc->attr->optrom_version,
2575 		      BFA_VERSION_LEN);
2576 }
2577 
2578 void
2579 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2580 {
2581 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2582 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2583 }
2584 
2585 void
2586 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2587 {
2588 	struct bfi_ioc_attr_s	*ioc_attr;
2589 
2590 	WARN_ON(!model);
2591 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2592 
2593 	ioc_attr = ioc->attr;
2594 
2595 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2596 			BFA_MFG_NAME, ioc_attr->card_type);
2597 }
2598 
2599 enum bfa_ioc_state
2600 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2601 {
2602 	enum bfa_iocpf_state iocpf_st;
2603 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2604 
2605 	if (ioc_st == BFA_IOC_ENABLING ||
2606 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2607 
2608 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2609 
2610 		switch (iocpf_st) {
2611 		case BFA_IOCPF_SEMWAIT:
2612 			ioc_st = BFA_IOC_SEMWAIT;
2613 			break;
2614 
2615 		case BFA_IOCPF_HWINIT:
2616 			ioc_st = BFA_IOC_HWINIT;
2617 			break;
2618 
2619 		case BFA_IOCPF_FWMISMATCH:
2620 			ioc_st = BFA_IOC_FWMISMATCH;
2621 			break;
2622 
2623 		case BFA_IOCPF_FAIL:
2624 			ioc_st = BFA_IOC_FAIL;
2625 			break;
2626 
2627 		case BFA_IOCPF_INITFAIL:
2628 			ioc_st = BFA_IOC_INITFAIL;
2629 			break;
2630 
2631 		default:
2632 			break;
2633 		}
2634 	}
2635 
2636 	return ioc_st;
2637 }
2638 
2639 void
2640 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2641 {
2642 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2643 
2644 	ioc_attr->state = bfa_ioc_get_state(ioc);
2645 	ioc_attr->port_id = ioc->port_id;
2646 	ioc_attr->port_mode = ioc->port_mode;
2647 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2648 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2649 
2650 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2651 
2652 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2653 
2654 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2655 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2656 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2657 }
2658 
2659 mac_t
2660 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2661 {
2662 	/*
2663 	 * Check the IOC type and return the appropriate MAC
2664 	 */
2665 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2666 		return ioc->attr->fcoe_mac;
2667 	else
2668 		return ioc->attr->mac;
2669 }
2670 
2671 mac_t
2672 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2673 {
2674 	mac_t	m;
2675 
2676 	m = ioc->attr->mfg_mac;
2677 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2678 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2679 	else
2680 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2681 			bfa_ioc_pcifn(ioc));
2682 
2683 	return m;
2684 }
2685 
2686 /*
2687  * Send AEN notification
2688  */
2689 void
2690 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2691 {
2692 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2693 	struct bfa_aen_entry_s	*aen_entry;
2694 	enum bfa_ioc_type_e ioc_type;
2695 
2696 	bfad_get_aen_entry(bfad, aen_entry);
2697 	if (!aen_entry)
2698 		return;
2699 
2700 	ioc_type = bfa_ioc_get_type(ioc);
2701 	switch (ioc_type) {
2702 	case BFA_IOC_TYPE_FC:
2703 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2704 		break;
2705 	case BFA_IOC_TYPE_FCoE:
2706 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2707 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2708 		break;
2709 	case BFA_IOC_TYPE_LL:
2710 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2711 		break;
2712 	default:
2713 		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2714 		break;
2715 	}
2716 
2717 	/* Send the AEN notification */
2718 	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2719 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2720 				  BFA_AEN_CAT_IOC, event);
2721 }
2722 
2723 /*
2724  * Retrieve saved firmware trace from a prior IOC failure.
2725  */
2726 bfa_status_t
2727 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2728 {
2729 	int	tlen;
2730 
2731 	if (ioc->dbg_fwsave_len == 0)
2732 		return BFA_STATUS_ENOFSAVE;
2733 
2734 	tlen = *trclen;
2735 	if (tlen > ioc->dbg_fwsave_len)
2736 		tlen = ioc->dbg_fwsave_len;
2737 
2738 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2739 	*trclen = tlen;
2740 	return BFA_STATUS_OK;
2741 }
2742 
2743 
2744 /*
2745  * Retrieve saved firmware trace from a prior IOC failure.
2746  */
2747 bfa_status_t
2748 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2749 {
2750 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2751 	int tlen;
2752 	bfa_status_t status;
2753 
2754 	bfa_trc(ioc, *trclen);
2755 
2756 	tlen = *trclen;
2757 	if (tlen > BFA_DBG_FWTRC_LEN)
2758 		tlen = BFA_DBG_FWTRC_LEN;
2759 
2760 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2761 	*trclen = tlen;
2762 	return status;
2763 }
2764 
2765 static void
2766 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2767 {
2768 	struct bfa_mbox_cmd_s cmd;
2769 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2770 
2771 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2772 		    bfa_ioc_portid(ioc));
2773 	req->clscode = cpu_to_be16(ioc->clscode);
2774 	bfa_ioc_mbox_queue(ioc, &cmd);
2775 }
2776 
2777 static void
2778 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2779 {
2780 	u32 fwsync_iter = 1000;
2781 
2782 	bfa_ioc_send_fwsync(ioc);
2783 
2784 	/*
2785 	 * After sending a fw sync mbox command wait for it to
2786 	 * take effect.  We will not wait for a response because
2787 	 *    1. fw_sync mbox cmd doesn't have a response.
2788 	 *    2. Even if we implement that,  interrupts might not
2789 	 *	 be enabled when we call this function.
2790 	 * So, just keep checking if any mbox cmd is pending, and
2791 	 * after waiting for a reasonable amount of time, go ahead.
2792 	 * It is possible that fw has crashed and the mbox command
2793 	 * is never acknowledged.
2794 	 */
2795 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2796 		fwsync_iter--;
2797 }
2798 
2799 /*
2800  * Dump firmware smem
2801  */
2802 bfa_status_t
2803 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2804 				u32 *offset, int *buflen)
2805 {
2806 	u32 loff;
2807 	int dlen;
2808 	bfa_status_t status;
2809 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2810 
2811 	if (*offset >= smem_len) {
2812 		*offset = *buflen = 0;
2813 		return BFA_STATUS_EINVAL;
2814 	}
2815 
2816 	loff = *offset;
2817 	dlen = *buflen;
2818 
2819 	/*
2820 	 * First smem read, sync smem before proceeding
2821 	 * No need to sync before reading every chunk.
2822 	 */
2823 	if (loff == 0)
2824 		bfa_ioc_fwsync(ioc);
2825 
2826 	if ((loff + dlen) >= smem_len)
2827 		dlen = smem_len - loff;
2828 
2829 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2830 
2831 	if (status != BFA_STATUS_OK) {
2832 		*offset = *buflen = 0;
2833 		return status;
2834 	}
2835 
2836 	*offset += dlen;
2837 
2838 	if (*offset >= smem_len)
2839 		*offset = 0;
2840 
2841 	*buflen = dlen;
2842 
2843 	return status;
2844 }
2845 
2846 /*
2847  * Firmware statistics
2848  */
2849 bfa_status_t
2850 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2851 {
2852 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2853 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2854 	int tlen;
2855 	bfa_status_t status;
2856 
2857 	if (ioc->stats_busy) {
2858 		bfa_trc(ioc, ioc->stats_busy);
2859 		return BFA_STATUS_DEVBUSY;
2860 	}
2861 	ioc->stats_busy = BFA_TRUE;
2862 
2863 	tlen = sizeof(struct bfa_fw_stats_s);
2864 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2865 
2866 	ioc->stats_busy = BFA_FALSE;
2867 	return status;
2868 }
2869 
2870 bfa_status_t
2871 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2872 {
2873 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2874 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2875 	int tlen;
2876 	bfa_status_t status;
2877 
2878 	if (ioc->stats_busy) {
2879 		bfa_trc(ioc, ioc->stats_busy);
2880 		return BFA_STATUS_DEVBUSY;
2881 	}
2882 	ioc->stats_busy = BFA_TRUE;
2883 
2884 	tlen = sizeof(struct bfa_fw_stats_s);
2885 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
2886 
2887 	ioc->stats_busy = BFA_FALSE;
2888 	return status;
2889 }
2890 
2891 /*
2892  * Save firmware trace if configured.
2893  */
2894 static void
2895 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2896 {
2897 	int		tlen;
2898 
2899 	if (ioc->dbg_fwsave_once) {
2900 		ioc->dbg_fwsave_once = BFA_FALSE;
2901 		if (ioc->dbg_fwsave_len) {
2902 			tlen = ioc->dbg_fwsave_len;
2903 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2904 		}
2905 	}
2906 }
2907 
2908 /*
2909  * Firmware failure detected. Start recovery actions.
2910  */
2911 static void
2912 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2913 {
2914 	bfa_ioc_stats(ioc, ioc_hbfails);
2915 	ioc->stats.hb_count = ioc->hb_count;
2916 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2917 }
2918 
2919 static void
2920 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2921 {
2922 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2923 		return;
2924 	if (ioc->attr->nwwn == 0)
2925 		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2926 	if (ioc->attr->pwwn == 0)
2927 		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2928 }
2929 
2930 /*
2931  *  BFA IOC PF private functions
2932  */
2933 static void
2934 bfa_iocpf_timeout(void *ioc_arg)
2935 {
2936 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2937 
2938 	bfa_trc(ioc, 0);
2939 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2940 }
2941 
2942 static void
2943 bfa_iocpf_sem_timeout(void *ioc_arg)
2944 {
2945 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2946 
2947 	bfa_ioc_hw_sem_get(ioc);
2948 }
2949 
2950 static void
2951 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2952 {
2953 	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2954 
2955 	bfa_trc(ioc, fwstate);
2956 
2957 	if (fwstate == BFI_IOC_DISABLED) {
2958 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2959 		return;
2960 	}
2961 
2962 	if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2963 		bfa_iocpf_timeout(ioc);
2964 	else {
2965 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2966 		bfa_iocpf_poll_timer_start(ioc);
2967 	}
2968 }
2969 
2970 static void
2971 bfa_iocpf_poll_timeout(void *ioc_arg)
2972 {
2973 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2974 
2975 	bfa_ioc_poll_fwinit(ioc);
2976 }
2977 
2978 /*
2979  *  bfa timer function
2980  */
2981 void
2982 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2983 {
2984 	struct list_head *qh = &mod->timer_q;
2985 	struct list_head *qe, *qe_next;
2986 	struct bfa_timer_s *elem;
2987 	struct list_head timedout_q;
2988 
2989 	INIT_LIST_HEAD(&timedout_q);
2990 
2991 	qe = bfa_q_next(qh);
2992 
2993 	while (qe != qh) {
2994 		qe_next = bfa_q_next(qe);
2995 
2996 		elem = (struct bfa_timer_s *) qe;
2997 		if (elem->timeout <= BFA_TIMER_FREQ) {
2998 			elem->timeout = 0;
2999 			list_del(&elem->qe);
3000 			list_add_tail(&elem->qe, &timedout_q);
3001 		} else {
3002 			elem->timeout -= BFA_TIMER_FREQ;
3003 		}
3004 
3005 		qe = qe_next;	/* go to next elem */
3006 	}
3007 
3008 	/*
3009 	 * Pop all the timeout entries
3010 	 */
3011 	while (!list_empty(&timedout_q)) {
3012 		bfa_q_deq(&timedout_q, &elem);
3013 		elem->timercb(elem->arg);
3014 	}
3015 }
3016 
3017 /*
3018  * Should be called with lock protection
3019  */
3020 void
3021 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3022 		    void (*timercb) (void *), void *arg, unsigned int timeout)
3023 {
3024 
3025 	WARN_ON(timercb == NULL);
3026 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3027 
3028 	timer->timeout = timeout;
3029 	timer->timercb = timercb;
3030 	timer->arg = arg;
3031 
3032 	list_add_tail(&timer->qe, &mod->timer_q);
3033 }
3034 
3035 /*
3036  * Should be called with lock protection
3037  */
3038 void
3039 bfa_timer_stop(struct bfa_timer_s *timer)
3040 {
3041 	WARN_ON(list_empty(&timer->qe));
3042 
3043 	list_del(&timer->qe);
3044 }
3045 
3046 /*
3047  *	ASIC block related
3048  */
3049 static void
3050 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3051 {
3052 	struct bfa_ablk_cfg_inst_s *cfg_inst;
3053 	int i, j;
3054 	u16	be16;
3055 	u32	be32;
3056 
3057 	for (i = 0; i < BFA_ABLK_MAX; i++) {
3058 		cfg_inst = &cfg->inst[i];
3059 		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3060 			be16 = cfg_inst->pf_cfg[j].pers;
3061 			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3062 			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3063 			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3064 			be16 = cfg_inst->pf_cfg[j].num_vectors;
3065 			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3066 			be32 = cfg_inst->pf_cfg[j].bw;
3067 			cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3068 		}
3069 	}
3070 }
3071 
3072 static void
3073 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3074 {
3075 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3076 	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3077 	bfa_ablk_cbfn_t cbfn;
3078 
3079 	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3080 	bfa_trc(ablk->ioc, msg->mh.msg_id);
3081 
3082 	switch (msg->mh.msg_id) {
3083 	case BFI_ABLK_I2H_QUERY:
3084 		if (rsp->status == BFA_STATUS_OK) {
3085 			memcpy(ablk->cfg, ablk->dma_addr.kva,
3086 				sizeof(struct bfa_ablk_cfg_s));
3087 			bfa_ablk_config_swap(ablk->cfg);
3088 			ablk->cfg = NULL;
3089 		}
3090 		break;
3091 
3092 	case BFI_ABLK_I2H_ADPT_CONFIG:
3093 	case BFI_ABLK_I2H_PORT_CONFIG:
3094 		/* update config port mode */
3095 		ablk->ioc->port_mode_cfg = rsp->port_mode;
3096 
3097 	case BFI_ABLK_I2H_PF_DELETE:
3098 	case BFI_ABLK_I2H_PF_UPDATE:
3099 	case BFI_ABLK_I2H_OPTROM_ENABLE:
3100 	case BFI_ABLK_I2H_OPTROM_DISABLE:
3101 		/* No-op */
3102 		break;
3103 
3104 	case BFI_ABLK_I2H_PF_CREATE:
3105 		*(ablk->pcifn) = rsp->pcifn;
3106 		ablk->pcifn = NULL;
3107 		break;
3108 
3109 	default:
3110 		WARN_ON(1);
3111 	}
3112 
3113 	ablk->busy = BFA_FALSE;
3114 	if (ablk->cbfn) {
3115 		cbfn = ablk->cbfn;
3116 		ablk->cbfn = NULL;
3117 		cbfn(ablk->cbarg, rsp->status);
3118 	}
3119 }
3120 
3121 static void
3122 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3123 {
3124 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3125 
3126 	bfa_trc(ablk->ioc, event);
3127 
3128 	switch (event) {
3129 	case BFA_IOC_E_ENABLED:
3130 		WARN_ON(ablk->busy != BFA_FALSE);
3131 		break;
3132 
3133 	case BFA_IOC_E_DISABLED:
3134 	case BFA_IOC_E_FAILED:
3135 		/* Fail any pending requests */
3136 		ablk->pcifn = NULL;
3137 		if (ablk->busy) {
3138 			if (ablk->cbfn)
3139 				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3140 			ablk->cbfn = NULL;
3141 			ablk->busy = BFA_FALSE;
3142 		}
3143 		break;
3144 
3145 	default:
3146 		WARN_ON(1);
3147 		break;
3148 	}
3149 }
3150 
3151 u32
3152 bfa_ablk_meminfo(void)
3153 {
3154 	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3155 }
3156 
3157 void
3158 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3159 {
3160 	ablk->dma_addr.kva = dma_kva;
3161 	ablk->dma_addr.pa  = dma_pa;
3162 }
3163 
3164 void
3165 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3166 {
3167 	ablk->ioc = ioc;
3168 
3169 	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3170 	bfa_q_qe_init(&ablk->ioc_notify);
3171 	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3172 	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3173 }
3174 
3175 bfa_status_t
3176 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3177 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3178 {
3179 	struct bfi_ablk_h2i_query_s *m;
3180 
3181 	WARN_ON(!ablk_cfg);
3182 
3183 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3184 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3185 		return BFA_STATUS_IOC_FAILURE;
3186 	}
3187 
3188 	if (ablk->busy) {
3189 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3190 		return  BFA_STATUS_DEVBUSY;
3191 	}
3192 
3193 	ablk->cfg = ablk_cfg;
3194 	ablk->cbfn  = cbfn;
3195 	ablk->cbarg = cbarg;
3196 	ablk->busy  = BFA_TRUE;
3197 
3198 	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3199 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3200 		    bfa_ioc_portid(ablk->ioc));
3201 	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3202 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3203 
3204 	return BFA_STATUS_OK;
3205 }
3206 
3207 bfa_status_t
3208 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3209 		u8 port, enum bfi_pcifn_class personality, int bw,
3210 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3211 {
3212 	struct bfi_ablk_h2i_pf_req_s *m;
3213 
3214 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3215 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3216 		return BFA_STATUS_IOC_FAILURE;
3217 	}
3218 
3219 	if (ablk->busy) {
3220 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3221 		return  BFA_STATUS_DEVBUSY;
3222 	}
3223 
3224 	ablk->pcifn = pcifn;
3225 	ablk->cbfn = cbfn;
3226 	ablk->cbarg = cbarg;
3227 	ablk->busy  = BFA_TRUE;
3228 
3229 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3230 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3231 		    bfa_ioc_portid(ablk->ioc));
3232 	m->pers = cpu_to_be16((u16)personality);
3233 	m->bw = cpu_to_be32(bw);
3234 	m->port = port;
3235 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3236 
3237 	return BFA_STATUS_OK;
3238 }
3239 
3240 bfa_status_t
3241 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3242 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3243 {
3244 	struct bfi_ablk_h2i_pf_req_s *m;
3245 
3246 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3247 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3248 		return BFA_STATUS_IOC_FAILURE;
3249 	}
3250 
3251 	if (ablk->busy) {
3252 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3253 		return  BFA_STATUS_DEVBUSY;
3254 	}
3255 
3256 	ablk->cbfn  = cbfn;
3257 	ablk->cbarg = cbarg;
3258 	ablk->busy  = BFA_TRUE;
3259 
3260 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3261 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3262 		    bfa_ioc_portid(ablk->ioc));
3263 	m->pcifn = (u8)pcifn;
3264 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3265 
3266 	return BFA_STATUS_OK;
3267 }
3268 
3269 bfa_status_t
3270 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3271 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3272 {
3273 	struct bfi_ablk_h2i_cfg_req_s *m;
3274 
3275 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3276 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3277 		return BFA_STATUS_IOC_FAILURE;
3278 	}
3279 
3280 	if (ablk->busy) {
3281 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3282 		return  BFA_STATUS_DEVBUSY;
3283 	}
3284 
3285 	ablk->cbfn  = cbfn;
3286 	ablk->cbarg = cbarg;
3287 	ablk->busy  = BFA_TRUE;
3288 
3289 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3290 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3291 		    bfa_ioc_portid(ablk->ioc));
3292 	m->mode = (u8)mode;
3293 	m->max_pf = (u8)max_pf;
3294 	m->max_vf = (u8)max_vf;
3295 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3296 
3297 	return BFA_STATUS_OK;
3298 }
3299 
3300 bfa_status_t
3301 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3302 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3303 {
3304 	struct bfi_ablk_h2i_cfg_req_s *m;
3305 
3306 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3307 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3308 		return BFA_STATUS_IOC_FAILURE;
3309 	}
3310 
3311 	if (ablk->busy) {
3312 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3313 		return  BFA_STATUS_DEVBUSY;
3314 	}
3315 
3316 	ablk->cbfn  = cbfn;
3317 	ablk->cbarg = cbarg;
3318 	ablk->busy  = BFA_TRUE;
3319 
3320 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3321 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3322 		bfa_ioc_portid(ablk->ioc));
3323 	m->port = (u8)port;
3324 	m->mode = (u8)mode;
3325 	m->max_pf = (u8)max_pf;
3326 	m->max_vf = (u8)max_vf;
3327 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3328 
3329 	return BFA_STATUS_OK;
3330 }
3331 
3332 bfa_status_t
3333 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3334 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3335 {
3336 	struct bfi_ablk_h2i_pf_req_s *m;
3337 
3338 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3339 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3340 		return BFA_STATUS_IOC_FAILURE;
3341 	}
3342 
3343 	if (ablk->busy) {
3344 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3345 		return  BFA_STATUS_DEVBUSY;
3346 	}
3347 
3348 	ablk->cbfn  = cbfn;
3349 	ablk->cbarg = cbarg;
3350 	ablk->busy  = BFA_TRUE;
3351 
3352 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3353 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3354 		bfa_ioc_portid(ablk->ioc));
3355 	m->pcifn = (u8)pcifn;
3356 	m->bw = cpu_to_be32(bw);
3357 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3358 
3359 	return BFA_STATUS_OK;
3360 }
3361 
3362 bfa_status_t
3363 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3364 {
3365 	struct bfi_ablk_h2i_optrom_s *m;
3366 
3367 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3368 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3369 		return BFA_STATUS_IOC_FAILURE;
3370 	}
3371 
3372 	if (ablk->busy) {
3373 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3374 		return  BFA_STATUS_DEVBUSY;
3375 	}
3376 
3377 	ablk->cbfn  = cbfn;
3378 	ablk->cbarg = cbarg;
3379 	ablk->busy  = BFA_TRUE;
3380 
3381 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3382 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3383 		bfa_ioc_portid(ablk->ioc));
3384 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3385 
3386 	return BFA_STATUS_OK;
3387 }
3388 
3389 bfa_status_t
3390 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3391 {
3392 	struct bfi_ablk_h2i_optrom_s *m;
3393 
3394 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3395 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3396 		return BFA_STATUS_IOC_FAILURE;
3397 	}
3398 
3399 	if (ablk->busy) {
3400 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3401 		return  BFA_STATUS_DEVBUSY;
3402 	}
3403 
3404 	ablk->cbfn  = cbfn;
3405 	ablk->cbarg = cbarg;
3406 	ablk->busy  = BFA_TRUE;
3407 
3408 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3409 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3410 		bfa_ioc_portid(ablk->ioc));
3411 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3412 
3413 	return BFA_STATUS_OK;
3414 }
3415 
3416 /*
3417  *	SFP module specific
3418  */
3419 
3420 /* forward declarations */
3421 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3422 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3423 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3424 				enum bfa_port_speed portspeed);
3425 
3426 static void
3427 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3428 {
3429 	bfa_trc(sfp, sfp->lock);
3430 	if (sfp->cbfn)
3431 		sfp->cbfn(sfp->cbarg, sfp->status);
3432 	sfp->lock = 0;
3433 	sfp->cbfn = NULL;
3434 }
3435 
3436 static void
3437 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3438 {
3439 	bfa_trc(sfp, sfp->portspeed);
3440 	if (sfp->media) {
3441 		bfa_sfp_media_get(sfp);
3442 		if (sfp->state_query_cbfn)
3443 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3444 					sfp->status);
3445 			sfp->media = NULL;
3446 		}
3447 
3448 		if (sfp->portspeed) {
3449 			sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3450 			if (sfp->state_query_cbfn)
3451 				sfp->state_query_cbfn(sfp->state_query_cbarg,
3452 						sfp->status);
3453 				sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3454 		}
3455 
3456 		sfp->state_query_lock = 0;
3457 		sfp->state_query_cbfn = NULL;
3458 }
3459 
3460 /*
3461  *	IOC event handler.
3462  */
3463 static void
3464 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3465 {
3466 	struct bfa_sfp_s *sfp = sfp_arg;
3467 
3468 	bfa_trc(sfp, event);
3469 	bfa_trc(sfp, sfp->lock);
3470 	bfa_trc(sfp, sfp->state_query_lock);
3471 
3472 	switch (event) {
3473 	case BFA_IOC_E_DISABLED:
3474 	case BFA_IOC_E_FAILED:
3475 		if (sfp->lock) {
3476 			sfp->status = BFA_STATUS_IOC_FAILURE;
3477 			bfa_cb_sfp_show(sfp);
3478 		}
3479 
3480 		if (sfp->state_query_lock) {
3481 			sfp->status = BFA_STATUS_IOC_FAILURE;
3482 			bfa_cb_sfp_state_query(sfp);
3483 		}
3484 		break;
3485 
3486 	default:
3487 		break;
3488 	}
3489 }
3490 
3491 /*
3492  * SFP's State Change Notification post to AEN
3493  */
3494 static void
3495 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3496 {
3497 	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3498 	struct bfa_aen_entry_s  *aen_entry;
3499 	enum bfa_port_aen_event aen_evt = 0;
3500 
3501 	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3502 		      ((u64)rsp->event));
3503 
3504 	bfad_get_aen_entry(bfad, aen_entry);
3505 	if (!aen_entry)
3506 		return;
3507 
3508 	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3509 	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3510 	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3511 
3512 	switch (rsp->event) {
3513 	case BFA_SFP_SCN_INSERTED:
3514 		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3515 		break;
3516 	case BFA_SFP_SCN_REMOVED:
3517 		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3518 		break;
3519 	case BFA_SFP_SCN_FAILED:
3520 		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3521 		break;
3522 	case BFA_SFP_SCN_UNSUPPORT:
3523 		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3524 		break;
3525 	case BFA_SFP_SCN_POM:
3526 		aen_evt = BFA_PORT_AEN_SFP_POM;
3527 		aen_entry->aen_data.port.level = rsp->pomlvl;
3528 		break;
3529 	default:
3530 		bfa_trc(sfp, rsp->event);
3531 		WARN_ON(1);
3532 	}
3533 
3534 	/* Send the AEN notification */
3535 	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3536 				  BFA_AEN_CAT_PORT, aen_evt);
3537 }
3538 
3539 /*
3540  *	SFP get data send
3541  */
3542 static void
3543 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3544 {
3545 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3546 
3547 	bfa_trc(sfp, req->memtype);
3548 
3549 	/* build host command */
3550 	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3551 			bfa_ioc_portid(sfp->ioc));
3552 
3553 	/* send mbox cmd */
3554 	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3555 }
3556 
3557 /*
3558  *	SFP is valid, read sfp data
3559  */
3560 static void
3561 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3562 {
3563 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3564 
3565 	WARN_ON(sfp->lock != 0);
3566 	bfa_trc(sfp, sfp->state);
3567 
3568 	sfp->lock = 1;
3569 	sfp->memtype = memtype;
3570 	req->memtype = memtype;
3571 
3572 	/* Setup SG list */
3573 	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3574 
3575 	bfa_sfp_getdata_send(sfp);
3576 }
3577 
3578 /*
3579  *	SFP scn handler
3580  */
3581 static void
3582 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3583 {
3584 	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3585 
3586 	switch (rsp->event) {
3587 	case BFA_SFP_SCN_INSERTED:
3588 		sfp->state = BFA_SFP_STATE_INSERTED;
3589 		sfp->data_valid = 0;
3590 		bfa_sfp_scn_aen_post(sfp, rsp);
3591 		break;
3592 	case BFA_SFP_SCN_REMOVED:
3593 		sfp->state = BFA_SFP_STATE_REMOVED;
3594 		sfp->data_valid = 0;
3595 		bfa_sfp_scn_aen_post(sfp, rsp);
3596 		 break;
3597 	case BFA_SFP_SCN_FAILED:
3598 		sfp->state = BFA_SFP_STATE_FAILED;
3599 		sfp->data_valid = 0;
3600 		bfa_sfp_scn_aen_post(sfp, rsp);
3601 		break;
3602 	case BFA_SFP_SCN_UNSUPPORT:
3603 		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3604 		bfa_sfp_scn_aen_post(sfp, rsp);
3605 		if (!sfp->lock)
3606 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3607 		break;
3608 	case BFA_SFP_SCN_POM:
3609 		bfa_sfp_scn_aen_post(sfp, rsp);
3610 		break;
3611 	case BFA_SFP_SCN_VALID:
3612 		sfp->state = BFA_SFP_STATE_VALID;
3613 		if (!sfp->lock)
3614 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3615 		break;
3616 	default:
3617 		bfa_trc(sfp, rsp->event);
3618 		WARN_ON(1);
3619 	}
3620 }
3621 
3622 /*
3623  * SFP show complete
3624  */
3625 static void
3626 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3627 {
3628 	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3629 
3630 	if (!sfp->lock) {
3631 		/*
3632 		 * receiving response after ioc failure
3633 		 */
3634 		bfa_trc(sfp, sfp->lock);
3635 		return;
3636 	}
3637 
3638 	bfa_trc(sfp, rsp->status);
3639 	if (rsp->status == BFA_STATUS_OK) {
3640 		sfp->data_valid = 1;
3641 		if (sfp->state == BFA_SFP_STATE_VALID)
3642 			sfp->status = BFA_STATUS_OK;
3643 		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3644 			sfp->status = BFA_STATUS_SFP_UNSUPP;
3645 		else
3646 			bfa_trc(sfp, sfp->state);
3647 	} else {
3648 		sfp->data_valid = 0;
3649 		sfp->status = rsp->status;
3650 		/* sfpshow shouldn't change sfp state */
3651 	}
3652 
3653 	bfa_trc(sfp, sfp->memtype);
3654 	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3655 		bfa_trc(sfp, sfp->data_valid);
3656 		if (sfp->data_valid) {
3657 			u32	size = sizeof(struct sfp_mem_s);
3658 			u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3659 			memcpy(des, sfp->dbuf_kva, size);
3660 		}
3661 		/*
3662 		 * Queue completion callback.
3663 		 */
3664 		bfa_cb_sfp_show(sfp);
3665 	} else
3666 		sfp->lock = 0;
3667 
3668 	bfa_trc(sfp, sfp->state_query_lock);
3669 	if (sfp->state_query_lock) {
3670 		sfp->state = rsp->state;
3671 		/* Complete callback */
3672 		bfa_cb_sfp_state_query(sfp);
3673 	}
3674 }
3675 
3676 /*
3677  *	SFP query fw sfp state
3678  */
3679 static void
3680 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3681 {
3682 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3683 
3684 	/* Should not be doing query if not in _INIT state */
3685 	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3686 	WARN_ON(sfp->state_query_lock != 0);
3687 	bfa_trc(sfp, sfp->state);
3688 
3689 	sfp->state_query_lock = 1;
3690 	req->memtype = 0;
3691 
3692 	if (!sfp->lock)
3693 		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3694 }
3695 
3696 static void
3697 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3698 {
3699 	enum bfa_defs_sfp_media_e *media = sfp->media;
3700 
3701 	*media = BFA_SFP_MEDIA_UNKNOWN;
3702 
3703 	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3704 		*media = BFA_SFP_MEDIA_UNSUPPORT;
3705 	else if (sfp->state == BFA_SFP_STATE_VALID) {
3706 		union sfp_xcvr_e10g_code_u e10g;
3707 		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3708 		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3709 				(sfpmem->srlid_base.xcvr[5] >> 1);
3710 
3711 		e10g.b = sfpmem->srlid_base.xcvr[0];
3712 		bfa_trc(sfp, e10g.b);
3713 		bfa_trc(sfp, xmtr_tech);
3714 		/* check fc transmitter tech */
3715 		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3716 		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3717 		    (xmtr_tech & SFP_XMTR_TECH_CA))
3718 			*media = BFA_SFP_MEDIA_CU;
3719 		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3720 			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3721 			*media = BFA_SFP_MEDIA_EL;
3722 		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3723 			 (xmtr_tech & SFP_XMTR_TECH_LC))
3724 			*media = BFA_SFP_MEDIA_LW;
3725 		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3726 			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3727 			 (xmtr_tech & SFP_XMTR_TECH_SA))
3728 			*media = BFA_SFP_MEDIA_SW;
3729 		/* Check 10G Ethernet Compilance code */
3730 		else if (e10g.r.e10g_sr)
3731 			*media = BFA_SFP_MEDIA_SW;
3732 		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3733 			*media = BFA_SFP_MEDIA_LW;
3734 		else if (e10g.r.e10g_unall)
3735 			*media = BFA_SFP_MEDIA_UNKNOWN;
3736 		else
3737 			bfa_trc(sfp, 0);
3738 	} else
3739 		bfa_trc(sfp, sfp->state);
3740 }
3741 
3742 static bfa_status_t
3743 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3744 {
3745 	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3746 	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3747 	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3748 	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3749 
3750 	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3751 		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3752 			return BFA_STATUS_OK;
3753 		else {
3754 			bfa_trc(sfp, e10g.b);
3755 			return BFA_STATUS_UNSUPP_SPEED;
3756 		}
3757 	}
3758 	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3759 	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3760 	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3761 	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3762 	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3763 		return BFA_STATUS_OK;
3764 	else {
3765 		bfa_trc(sfp, portspeed);
3766 		bfa_trc(sfp, fc3.b);
3767 		bfa_trc(sfp, e10g.b);
3768 		return BFA_STATUS_UNSUPP_SPEED;
3769 	}
3770 }
3771 
3772 /*
3773  *	SFP hmbox handler
3774  */
3775 void
3776 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3777 {
3778 	struct bfa_sfp_s *sfp = sfparg;
3779 
3780 	switch (msg->mh.msg_id) {
3781 	case BFI_SFP_I2H_SHOW:
3782 		bfa_sfp_show_comp(sfp, msg);
3783 		break;
3784 
3785 	case BFI_SFP_I2H_SCN:
3786 		bfa_sfp_scn(sfp, msg);
3787 		break;
3788 
3789 	default:
3790 		bfa_trc(sfp, msg->mh.msg_id);
3791 		WARN_ON(1);
3792 	}
3793 }
3794 
3795 /*
3796  *	Return DMA memory needed by sfp module.
3797  */
3798 u32
3799 bfa_sfp_meminfo(void)
3800 {
3801 	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3802 }
3803 
3804 /*
3805  *	Attach virtual and physical memory for SFP.
3806  */
3807 void
3808 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3809 		struct bfa_trc_mod_s *trcmod)
3810 {
3811 	sfp->dev = dev;
3812 	sfp->ioc = ioc;
3813 	sfp->trcmod = trcmod;
3814 
3815 	sfp->cbfn = NULL;
3816 	sfp->cbarg = NULL;
3817 	sfp->sfpmem = NULL;
3818 	sfp->lock = 0;
3819 	sfp->data_valid = 0;
3820 	sfp->state = BFA_SFP_STATE_INIT;
3821 	sfp->state_query_lock = 0;
3822 	sfp->state_query_cbfn = NULL;
3823 	sfp->state_query_cbarg = NULL;
3824 	sfp->media = NULL;
3825 	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3826 	sfp->is_elb = BFA_FALSE;
3827 
3828 	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3829 	bfa_q_qe_init(&sfp->ioc_notify);
3830 	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3831 	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3832 }
3833 
3834 /*
3835  *	Claim Memory for SFP
3836  */
3837 void
3838 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3839 {
3840 	sfp->dbuf_kva   = dm_kva;
3841 	sfp->dbuf_pa    = dm_pa;
3842 	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3843 
3844 	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3845 	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3846 }
3847 
3848 /*
3849  * Show SFP eeprom content
3850  *
3851  * @param[in] sfp   - bfa sfp module
3852  *
3853  * @param[out] sfpmem - sfp eeprom data
3854  *
3855  */
3856 bfa_status_t
3857 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3858 		bfa_cb_sfp_t cbfn, void *cbarg)
3859 {
3860 
3861 	if (!bfa_ioc_is_operational(sfp->ioc)) {
3862 		bfa_trc(sfp, 0);
3863 		return BFA_STATUS_IOC_NON_OP;
3864 	}
3865 
3866 	if (sfp->lock) {
3867 		bfa_trc(sfp, 0);
3868 		return BFA_STATUS_DEVBUSY;
3869 	}
3870 
3871 	sfp->cbfn = cbfn;
3872 	sfp->cbarg = cbarg;
3873 	sfp->sfpmem = sfpmem;
3874 
3875 	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3876 	return BFA_STATUS_OK;
3877 }
3878 
3879 /*
3880  * Return SFP Media type
3881  *
3882  * @param[in] sfp   - bfa sfp module
3883  *
3884  * @param[out] media - port speed from user
3885  *
3886  */
3887 bfa_status_t
3888 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3889 		bfa_cb_sfp_t cbfn, void *cbarg)
3890 {
3891 	if (!bfa_ioc_is_operational(sfp->ioc)) {
3892 		bfa_trc(sfp, 0);
3893 		return BFA_STATUS_IOC_NON_OP;
3894 	}
3895 
3896 	sfp->media = media;
3897 	if (sfp->state == BFA_SFP_STATE_INIT) {
3898 		if (sfp->state_query_lock) {
3899 			bfa_trc(sfp, 0);
3900 			return BFA_STATUS_DEVBUSY;
3901 		} else {
3902 			sfp->state_query_cbfn = cbfn;
3903 			sfp->state_query_cbarg = cbarg;
3904 			bfa_sfp_state_query(sfp);
3905 			return BFA_STATUS_SFP_NOT_READY;
3906 		}
3907 	}
3908 
3909 	bfa_sfp_media_get(sfp);
3910 	return BFA_STATUS_OK;
3911 }
3912 
3913 /*
3914  * Check if user set port speed is allowed by the SFP
3915  *
3916  * @param[in] sfp   - bfa sfp module
3917  * @param[in] portspeed - port speed from user
3918  *
3919  */
3920 bfa_status_t
3921 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3922 		bfa_cb_sfp_t cbfn, void *cbarg)
3923 {
3924 	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3925 
3926 	if (!bfa_ioc_is_operational(sfp->ioc))
3927 		return BFA_STATUS_IOC_NON_OP;
3928 
3929 	/* For Mezz card, all speed is allowed */
3930 	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3931 		return BFA_STATUS_OK;
3932 
3933 	/* Check SFP state */
3934 	sfp->portspeed = portspeed;
3935 	if (sfp->state == BFA_SFP_STATE_INIT) {
3936 		if (sfp->state_query_lock) {
3937 			bfa_trc(sfp, 0);
3938 			return BFA_STATUS_DEVBUSY;
3939 		} else {
3940 			sfp->state_query_cbfn = cbfn;
3941 			sfp->state_query_cbarg = cbarg;
3942 			bfa_sfp_state_query(sfp);
3943 			return BFA_STATUS_SFP_NOT_READY;
3944 		}
3945 	}
3946 
3947 	if (sfp->state == BFA_SFP_STATE_REMOVED ||
3948 	    sfp->state == BFA_SFP_STATE_FAILED) {
3949 		bfa_trc(sfp, sfp->state);
3950 		return BFA_STATUS_NO_SFP_DEV;
3951 	}
3952 
3953 	if (sfp->state == BFA_SFP_STATE_INSERTED) {
3954 		bfa_trc(sfp, sfp->state);
3955 		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
3956 	}
3957 
3958 	/* For eloopback, all speed is allowed */
3959 	if (sfp->is_elb)
3960 		return BFA_STATUS_OK;
3961 
3962 	return bfa_sfp_speed_valid(sfp, portspeed);
3963 }
3964 
3965 /*
3966  *	Flash module specific
3967  */
3968 
3969 /*
3970  * FLASH DMA buffer should be big enough to hold both MFG block and
3971  * asic block(64k) at the same time and also should be 2k aligned to
3972  * avoid write segement to cross sector boundary.
3973  */
3974 #define BFA_FLASH_SEG_SZ	2048
3975 #define BFA_FLASH_DMA_BUF_SZ	\
3976 	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3977 
3978 static void
3979 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3980 			int inst, int type)
3981 {
3982 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3983 	struct bfa_aen_entry_s  *aen_entry;
3984 
3985 	bfad_get_aen_entry(bfad, aen_entry);
3986 	if (!aen_entry)
3987 		return;
3988 
3989 	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3990 	aen_entry->aen_data.audit.partition_inst = inst;
3991 	aen_entry->aen_data.audit.partition_type = type;
3992 
3993 	/* Send the AEN notification */
3994 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3995 				  BFA_AEN_CAT_AUDIT, event);
3996 }
3997 
3998 static void
3999 bfa_flash_cb(struct bfa_flash_s *flash)
4000 {
4001 	flash->op_busy = 0;
4002 	if (flash->cbfn)
4003 		flash->cbfn(flash->cbarg, flash->status);
4004 }
4005 
4006 static void
4007 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4008 {
4009 	struct bfa_flash_s	*flash = cbarg;
4010 
4011 	bfa_trc(flash, event);
4012 	switch (event) {
4013 	case BFA_IOC_E_DISABLED:
4014 	case BFA_IOC_E_FAILED:
4015 		if (flash->op_busy) {
4016 			flash->status = BFA_STATUS_IOC_FAILURE;
4017 			flash->cbfn(flash->cbarg, flash->status);
4018 			flash->op_busy = 0;
4019 		}
4020 		break;
4021 
4022 	default:
4023 		break;
4024 	}
4025 }
4026 
4027 /*
4028  * Send flash attribute query request.
4029  *
4030  * @param[in] cbarg - callback argument
4031  */
4032 static void
4033 bfa_flash_query_send(void *cbarg)
4034 {
4035 	struct bfa_flash_s *flash = cbarg;
4036 	struct bfi_flash_query_req_s *msg =
4037 			(struct bfi_flash_query_req_s *) flash->mb.msg;
4038 
4039 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4040 		bfa_ioc_portid(flash->ioc));
4041 	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4042 		flash->dbuf_pa);
4043 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4044 }
4045 
4046 /*
4047  * Send flash write request.
4048  *
4049  * @param[in] cbarg - callback argument
4050  */
4051 static void
4052 bfa_flash_write_send(struct bfa_flash_s *flash)
4053 {
4054 	struct bfi_flash_write_req_s *msg =
4055 			(struct bfi_flash_write_req_s *) flash->mb.msg;
4056 	u32	len;
4057 
4058 	msg->type = be32_to_cpu(flash->type);
4059 	msg->instance = flash->instance;
4060 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4061 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4062 		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4063 	msg->length = be32_to_cpu(len);
4064 
4065 	/* indicate if it's the last msg of the whole write operation */
4066 	msg->last = (len == flash->residue) ? 1 : 0;
4067 
4068 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4069 			bfa_ioc_portid(flash->ioc));
4070 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4071 	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4072 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4073 
4074 	flash->residue -= len;
4075 	flash->offset += len;
4076 }
4077 
4078 /*
4079  * Send flash read request.
4080  *
4081  * @param[in] cbarg - callback argument
4082  */
4083 static void
4084 bfa_flash_read_send(void *cbarg)
4085 {
4086 	struct bfa_flash_s *flash = cbarg;
4087 	struct bfi_flash_read_req_s *msg =
4088 			(struct bfi_flash_read_req_s *) flash->mb.msg;
4089 	u32	len;
4090 
4091 	msg->type = be32_to_cpu(flash->type);
4092 	msg->instance = flash->instance;
4093 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4094 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4095 			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4096 	msg->length = be32_to_cpu(len);
4097 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4098 		bfa_ioc_portid(flash->ioc));
4099 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4100 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4101 }
4102 
4103 /*
4104  * Send flash erase request.
4105  *
4106  * @param[in] cbarg - callback argument
4107  */
4108 static void
4109 bfa_flash_erase_send(void *cbarg)
4110 {
4111 	struct bfa_flash_s *flash = cbarg;
4112 	struct bfi_flash_erase_req_s *msg =
4113 			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4114 
4115 	msg->type = be32_to_cpu(flash->type);
4116 	msg->instance = flash->instance;
4117 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4118 			bfa_ioc_portid(flash->ioc));
4119 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4120 }
4121 
4122 /*
4123  * Process flash response messages upon receiving interrupts.
4124  *
4125  * @param[in] flasharg - flash structure
4126  * @param[in] msg - message structure
4127  */
4128 static void
4129 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4130 {
4131 	struct bfa_flash_s *flash = flasharg;
4132 	u32	status;
4133 
4134 	union {
4135 		struct bfi_flash_query_rsp_s *query;
4136 		struct bfi_flash_erase_rsp_s *erase;
4137 		struct bfi_flash_write_rsp_s *write;
4138 		struct bfi_flash_read_rsp_s *read;
4139 		struct bfi_flash_event_s *event;
4140 		struct bfi_mbmsg_s   *msg;
4141 	} m;
4142 
4143 	m.msg = msg;
4144 	bfa_trc(flash, msg->mh.msg_id);
4145 
4146 	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4147 		/* receiving response after ioc failure */
4148 		bfa_trc(flash, 0x9999);
4149 		return;
4150 	}
4151 
4152 	switch (msg->mh.msg_id) {
4153 	case BFI_FLASH_I2H_QUERY_RSP:
4154 		status = be32_to_cpu(m.query->status);
4155 		bfa_trc(flash, status);
4156 		if (status == BFA_STATUS_OK) {
4157 			u32	i;
4158 			struct bfa_flash_attr_s *attr, *f;
4159 
4160 			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4161 			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4162 			attr->status = be32_to_cpu(f->status);
4163 			attr->npart = be32_to_cpu(f->npart);
4164 			bfa_trc(flash, attr->status);
4165 			bfa_trc(flash, attr->npart);
4166 			for (i = 0; i < attr->npart; i++) {
4167 				attr->part[i].part_type =
4168 					be32_to_cpu(f->part[i].part_type);
4169 				attr->part[i].part_instance =
4170 					be32_to_cpu(f->part[i].part_instance);
4171 				attr->part[i].part_off =
4172 					be32_to_cpu(f->part[i].part_off);
4173 				attr->part[i].part_size =
4174 					be32_to_cpu(f->part[i].part_size);
4175 				attr->part[i].part_len =
4176 					be32_to_cpu(f->part[i].part_len);
4177 				attr->part[i].part_status =
4178 					be32_to_cpu(f->part[i].part_status);
4179 			}
4180 		}
4181 		flash->status = status;
4182 		bfa_flash_cb(flash);
4183 		break;
4184 	case BFI_FLASH_I2H_ERASE_RSP:
4185 		status = be32_to_cpu(m.erase->status);
4186 		bfa_trc(flash, status);
4187 		flash->status = status;
4188 		bfa_flash_cb(flash);
4189 		break;
4190 	case BFI_FLASH_I2H_WRITE_RSP:
4191 		status = be32_to_cpu(m.write->status);
4192 		bfa_trc(flash, status);
4193 		if (status != BFA_STATUS_OK || flash->residue == 0) {
4194 			flash->status = status;
4195 			bfa_flash_cb(flash);
4196 		} else {
4197 			bfa_trc(flash, flash->offset);
4198 			bfa_flash_write_send(flash);
4199 		}
4200 		break;
4201 	case BFI_FLASH_I2H_READ_RSP:
4202 		status = be32_to_cpu(m.read->status);
4203 		bfa_trc(flash, status);
4204 		if (status != BFA_STATUS_OK) {
4205 			flash->status = status;
4206 			bfa_flash_cb(flash);
4207 		} else {
4208 			u32 len = be32_to_cpu(m.read->length);
4209 			bfa_trc(flash, flash->offset);
4210 			bfa_trc(flash, len);
4211 			memcpy(flash->ubuf + flash->offset,
4212 				flash->dbuf_kva, len);
4213 			flash->residue -= len;
4214 			flash->offset += len;
4215 			if (flash->residue == 0) {
4216 				flash->status = status;
4217 				bfa_flash_cb(flash);
4218 			} else
4219 				bfa_flash_read_send(flash);
4220 		}
4221 		break;
4222 	case BFI_FLASH_I2H_BOOT_VER_RSP:
4223 		break;
4224 	case BFI_FLASH_I2H_EVENT:
4225 		status = be32_to_cpu(m.event->status);
4226 		bfa_trc(flash, status);
4227 		if (status == BFA_STATUS_BAD_FWCFG)
4228 			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4229 		else if (status == BFA_STATUS_INVALID_VENDOR) {
4230 			u32 param;
4231 			param = be32_to_cpu(m.event->param);
4232 			bfa_trc(flash, param);
4233 			bfa_ioc_aen_post(flash->ioc,
4234 				BFA_IOC_AEN_INVALID_VENDOR);
4235 		}
4236 		break;
4237 
4238 	default:
4239 		WARN_ON(1);
4240 	}
4241 }
4242 
4243 /*
4244  * Flash memory info API.
4245  *
4246  * @param[in] mincfg - minimal cfg variable
4247  */
4248 u32
4249 bfa_flash_meminfo(bfa_boolean_t mincfg)
4250 {
4251 	/* min driver doesn't need flash */
4252 	if (mincfg)
4253 		return 0;
4254 	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4255 }
4256 
4257 /*
4258  * Flash attach API.
4259  *
4260  * @param[in] flash - flash structure
4261  * @param[in] ioc  - ioc structure
4262  * @param[in] dev  - device structure
4263  * @param[in] trcmod - trace module
4264  * @param[in] logmod - log module
4265  */
4266 void
4267 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4268 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4269 {
4270 	flash->ioc = ioc;
4271 	flash->trcmod = trcmod;
4272 	flash->cbfn = NULL;
4273 	flash->cbarg = NULL;
4274 	flash->op_busy = 0;
4275 
4276 	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4277 	bfa_q_qe_init(&flash->ioc_notify);
4278 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4279 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4280 
4281 	/* min driver doesn't need flash */
4282 	if (mincfg) {
4283 		flash->dbuf_kva = NULL;
4284 		flash->dbuf_pa = 0;
4285 	}
4286 }
4287 
4288 /*
4289  * Claim memory for flash
4290  *
4291  * @param[in] flash - flash structure
4292  * @param[in] dm_kva - pointer to virtual memory address
4293  * @param[in] dm_pa - physical memory address
4294  * @param[in] mincfg - minimal cfg variable
4295  */
4296 void
4297 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4298 		bfa_boolean_t mincfg)
4299 {
4300 	if (mincfg)
4301 		return;
4302 
4303 	flash->dbuf_kva = dm_kva;
4304 	flash->dbuf_pa = dm_pa;
4305 	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4306 	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4307 	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4308 }
4309 
4310 /*
4311  * Get flash attribute.
4312  *
4313  * @param[in] flash - flash structure
4314  * @param[in] attr - flash attribute structure
4315  * @param[in] cbfn - callback function
4316  * @param[in] cbarg - callback argument
4317  *
4318  * Return status.
4319  */
4320 bfa_status_t
4321 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4322 		bfa_cb_flash_t cbfn, void *cbarg)
4323 {
4324 	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4325 
4326 	if (!bfa_ioc_is_operational(flash->ioc))
4327 		return BFA_STATUS_IOC_NON_OP;
4328 
4329 	if (flash->op_busy) {
4330 		bfa_trc(flash, flash->op_busy);
4331 		return BFA_STATUS_DEVBUSY;
4332 	}
4333 
4334 	flash->op_busy = 1;
4335 	flash->cbfn = cbfn;
4336 	flash->cbarg = cbarg;
4337 	flash->ubuf = (u8 *) attr;
4338 	bfa_flash_query_send(flash);
4339 
4340 	return BFA_STATUS_OK;
4341 }
4342 
4343 /*
4344  * Erase flash partition.
4345  *
4346  * @param[in] flash - flash structure
4347  * @param[in] type - flash partition type
4348  * @param[in] instance - flash partition instance
4349  * @param[in] cbfn - callback function
4350  * @param[in] cbarg - callback argument
4351  *
4352  * Return status.
4353  */
4354 bfa_status_t
4355 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4356 		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4357 {
4358 	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4359 	bfa_trc(flash, type);
4360 	bfa_trc(flash, instance);
4361 
4362 	if (!bfa_ioc_is_operational(flash->ioc))
4363 		return BFA_STATUS_IOC_NON_OP;
4364 
4365 	if (flash->op_busy) {
4366 		bfa_trc(flash, flash->op_busy);
4367 		return BFA_STATUS_DEVBUSY;
4368 	}
4369 
4370 	flash->op_busy = 1;
4371 	flash->cbfn = cbfn;
4372 	flash->cbarg = cbarg;
4373 	flash->type = type;
4374 	flash->instance = instance;
4375 
4376 	bfa_flash_erase_send(flash);
4377 	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4378 				instance, type);
4379 	return BFA_STATUS_OK;
4380 }
4381 
4382 /*
4383  * Update flash partition.
4384  *
4385  * @param[in] flash - flash structure
4386  * @param[in] type - flash partition type
4387  * @param[in] instance - flash partition instance
4388  * @param[in] buf - update data buffer
4389  * @param[in] len - data buffer length
4390  * @param[in] offset - offset relative to the partition starting address
4391  * @param[in] cbfn - callback function
4392  * @param[in] cbarg - callback argument
4393  *
4394  * Return status.
4395  */
4396 bfa_status_t
4397 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4398 		u8 instance, void *buf, u32 len, u32 offset,
4399 		bfa_cb_flash_t cbfn, void *cbarg)
4400 {
4401 	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4402 	bfa_trc(flash, type);
4403 	bfa_trc(flash, instance);
4404 	bfa_trc(flash, len);
4405 	bfa_trc(flash, offset);
4406 
4407 	if (!bfa_ioc_is_operational(flash->ioc))
4408 		return BFA_STATUS_IOC_NON_OP;
4409 
4410 	/*
4411 	 * 'len' must be in word (4-byte) boundary
4412 	 * 'offset' must be in sector (16kb) boundary
4413 	 */
4414 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4415 		return BFA_STATUS_FLASH_BAD_LEN;
4416 
4417 	if (type == BFA_FLASH_PART_MFG)
4418 		return BFA_STATUS_EINVAL;
4419 
4420 	if (flash->op_busy) {
4421 		bfa_trc(flash, flash->op_busy);
4422 		return BFA_STATUS_DEVBUSY;
4423 	}
4424 
4425 	flash->op_busy = 1;
4426 	flash->cbfn = cbfn;
4427 	flash->cbarg = cbarg;
4428 	flash->type = type;
4429 	flash->instance = instance;
4430 	flash->residue = len;
4431 	flash->offset = 0;
4432 	flash->addr_off = offset;
4433 	flash->ubuf = buf;
4434 
4435 	bfa_flash_write_send(flash);
4436 	return BFA_STATUS_OK;
4437 }
4438 
4439 /*
4440  * Read flash partition.
4441  *
4442  * @param[in] flash - flash structure
4443  * @param[in] type - flash partition type
4444  * @param[in] instance - flash partition instance
4445  * @param[in] buf - read data buffer
4446  * @param[in] len - data buffer length
4447  * @param[in] offset - offset relative to the partition starting address
4448  * @param[in] cbfn - callback function
4449  * @param[in] cbarg - callback argument
4450  *
4451  * Return status.
4452  */
4453 bfa_status_t
4454 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4455 		u8 instance, void *buf, u32 len, u32 offset,
4456 		bfa_cb_flash_t cbfn, void *cbarg)
4457 {
4458 	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4459 	bfa_trc(flash, type);
4460 	bfa_trc(flash, instance);
4461 	bfa_trc(flash, len);
4462 	bfa_trc(flash, offset);
4463 
4464 	if (!bfa_ioc_is_operational(flash->ioc))
4465 		return BFA_STATUS_IOC_NON_OP;
4466 
4467 	/*
4468 	 * 'len' must be in word (4-byte) boundary
4469 	 * 'offset' must be in sector (16kb) boundary
4470 	 */
4471 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4472 		return BFA_STATUS_FLASH_BAD_LEN;
4473 
4474 	if (flash->op_busy) {
4475 		bfa_trc(flash, flash->op_busy);
4476 		return BFA_STATUS_DEVBUSY;
4477 	}
4478 
4479 	flash->op_busy = 1;
4480 	flash->cbfn = cbfn;
4481 	flash->cbarg = cbarg;
4482 	flash->type = type;
4483 	flash->instance = instance;
4484 	flash->residue = len;
4485 	flash->offset = 0;
4486 	flash->addr_off = offset;
4487 	flash->ubuf = buf;
4488 	bfa_flash_read_send(flash);
4489 
4490 	return BFA_STATUS_OK;
4491 }
4492 
4493 /*
4494  *	DIAG module specific
4495  */
4496 
4497 #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4498 #define BFA_DIAG_FWPING_TOV	1000	/* msec */
4499 
4500 /* IOC event handler */
4501 static void
4502 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4503 {
4504 	struct bfa_diag_s *diag = diag_arg;
4505 
4506 	bfa_trc(diag, event);
4507 	bfa_trc(diag, diag->block);
4508 	bfa_trc(diag, diag->fwping.lock);
4509 	bfa_trc(diag, diag->tsensor.lock);
4510 
4511 	switch (event) {
4512 	case BFA_IOC_E_DISABLED:
4513 	case BFA_IOC_E_FAILED:
4514 		if (diag->fwping.lock) {
4515 			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4516 			diag->fwping.cbfn(diag->fwping.cbarg,
4517 					diag->fwping.status);
4518 			diag->fwping.lock = 0;
4519 		}
4520 
4521 		if (diag->tsensor.lock) {
4522 			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4523 			diag->tsensor.cbfn(diag->tsensor.cbarg,
4524 					   diag->tsensor.status);
4525 			diag->tsensor.lock = 0;
4526 		}
4527 
4528 		if (diag->block) {
4529 			if (diag->timer_active) {
4530 				bfa_timer_stop(&diag->timer);
4531 				diag->timer_active = 0;
4532 			}
4533 
4534 			diag->status = BFA_STATUS_IOC_FAILURE;
4535 			diag->cbfn(diag->cbarg, diag->status);
4536 			diag->block = 0;
4537 		}
4538 		break;
4539 
4540 	default:
4541 		break;
4542 	}
4543 }
4544 
4545 static void
4546 bfa_diag_memtest_done(void *cbarg)
4547 {
4548 	struct bfa_diag_s *diag = cbarg;
4549 	struct bfa_ioc_s  *ioc = diag->ioc;
4550 	struct bfa_diag_memtest_result *res = diag->result;
4551 	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4552 	u32	pgnum, pgoff, i;
4553 
4554 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4555 	pgoff = PSS_SMEM_PGOFF(loff);
4556 
4557 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4558 
4559 	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4560 			 sizeof(u32)); i++) {
4561 		/* read test result from smem */
4562 		*((u32 *) res + i) =
4563 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4564 		loff += sizeof(u32);
4565 	}
4566 
4567 	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4568 	bfa_ioc_reset_fwstate(ioc);
4569 
4570 	res->status = swab32(res->status);
4571 	bfa_trc(diag, res->status);
4572 
4573 	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4574 		diag->status = BFA_STATUS_OK;
4575 	else {
4576 		diag->status = BFA_STATUS_MEMTEST_FAILED;
4577 		res->addr = swab32(res->addr);
4578 		res->exp = swab32(res->exp);
4579 		res->act = swab32(res->act);
4580 		res->err_status = swab32(res->err_status);
4581 		res->err_status1 = swab32(res->err_status1);
4582 		res->err_addr = swab32(res->err_addr);
4583 		bfa_trc(diag, res->addr);
4584 		bfa_trc(diag, res->exp);
4585 		bfa_trc(diag, res->act);
4586 		bfa_trc(diag, res->err_status);
4587 		bfa_trc(diag, res->err_status1);
4588 		bfa_trc(diag, res->err_addr);
4589 	}
4590 	diag->timer_active = 0;
4591 	diag->cbfn(diag->cbarg, diag->status);
4592 	diag->block = 0;
4593 }
4594 
4595 /*
4596  * Firmware ping
4597  */
4598 
4599 /*
4600  * Perform DMA test directly
4601  */
4602 static void
4603 diag_fwping_send(struct bfa_diag_s *diag)
4604 {
4605 	struct bfi_diag_fwping_req_s *fwping_req;
4606 	u32	i;
4607 
4608 	bfa_trc(diag, diag->fwping.dbuf_pa);
4609 
4610 	/* fill DMA area with pattern */
4611 	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4612 		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4613 
4614 	/* Fill mbox msg */
4615 	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4616 
4617 	/* Setup SG list */
4618 	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4619 			diag->fwping.dbuf_pa);
4620 	/* Set up dma count */
4621 	fwping_req->count = cpu_to_be32(diag->fwping.count);
4622 	/* Set up data pattern */
4623 	fwping_req->data = diag->fwping.data;
4624 
4625 	/* build host command */
4626 	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4627 		bfa_ioc_portid(diag->ioc));
4628 
4629 	/* send mbox cmd */
4630 	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4631 }
4632 
4633 static void
4634 diag_fwping_comp(struct bfa_diag_s *diag,
4635 		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4636 {
4637 	u32	rsp_data = diag_rsp->data;
4638 	u8	rsp_dma_status = diag_rsp->dma_status;
4639 
4640 	bfa_trc(diag, rsp_data);
4641 	bfa_trc(diag, rsp_dma_status);
4642 
4643 	if (rsp_dma_status == BFA_STATUS_OK) {
4644 		u32	i, pat;
4645 		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4646 			diag->fwping.data;
4647 		/* Check mbox data */
4648 		if (diag->fwping.data != rsp_data) {
4649 			bfa_trc(diag, rsp_data);
4650 			diag->fwping.result->dmastatus =
4651 					BFA_STATUS_DATACORRUPTED;
4652 			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4653 			diag->fwping.cbfn(diag->fwping.cbarg,
4654 					diag->fwping.status);
4655 			diag->fwping.lock = 0;
4656 			return;
4657 		}
4658 		/* Check dma pattern */
4659 		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4660 			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4661 				bfa_trc(diag, i);
4662 				bfa_trc(diag, pat);
4663 				bfa_trc(diag,
4664 					*((u32 *)diag->fwping.dbuf_kva + i));
4665 				diag->fwping.result->dmastatus =
4666 						BFA_STATUS_DATACORRUPTED;
4667 				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4668 				diag->fwping.cbfn(diag->fwping.cbarg,
4669 						diag->fwping.status);
4670 				diag->fwping.lock = 0;
4671 				return;
4672 			}
4673 		}
4674 		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4675 		diag->fwping.status = BFA_STATUS_OK;
4676 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4677 		diag->fwping.lock = 0;
4678 	} else {
4679 		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4680 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4681 		diag->fwping.lock = 0;
4682 	}
4683 }
4684 
4685 /*
4686  * Temperature Sensor
4687  */
4688 
4689 static void
4690 diag_tempsensor_send(struct bfa_diag_s *diag)
4691 {
4692 	struct bfi_diag_ts_req_s *msg;
4693 
4694 	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4695 	bfa_trc(diag, msg->temp);
4696 	/* build host command */
4697 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4698 		bfa_ioc_portid(diag->ioc));
4699 	/* send mbox cmd */
4700 	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4701 }
4702 
4703 static void
4704 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4705 {
4706 	if (!diag->tsensor.lock) {
4707 		/* receiving response after ioc failure */
4708 		bfa_trc(diag, diag->tsensor.lock);
4709 		return;
4710 	}
4711 
4712 	/*
4713 	 * ASIC junction tempsensor is a reg read operation
4714 	 * it will always return OK
4715 	 */
4716 	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4717 	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4718 	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4719 	diag->tsensor.temp->status = BFA_STATUS_OK;
4720 
4721 	if (rsp->ts_brd) {
4722 		if (rsp->status == BFA_STATUS_OK) {
4723 			diag->tsensor.temp->brd_temp =
4724 				be16_to_cpu(rsp->brd_temp);
4725 		} else {
4726 			bfa_trc(diag, rsp->status);
4727 			diag->tsensor.temp->brd_temp = 0;
4728 			diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4729 		}
4730 	}
4731 	bfa_trc(diag, rsp->ts_junc);
4732 	bfa_trc(diag, rsp->temp);
4733 	bfa_trc(diag, rsp->ts_brd);
4734 	bfa_trc(diag, rsp->brd_temp);
4735 	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4736 	diag->tsensor.lock = 0;
4737 }
4738 
4739 /*
4740  *	LED Test command
4741  */
4742 static void
4743 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4744 {
4745 	struct bfi_diag_ledtest_req_s  *msg;
4746 
4747 	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4748 	/* build host command */
4749 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4750 			bfa_ioc_portid(diag->ioc));
4751 
4752 	/*
4753 	 * convert the freq from N blinks per 10 sec to
4754 	 * crossbow ontime value. We do it here because division is need
4755 	 */
4756 	if (ledtest->freq)
4757 		ledtest->freq = 500 / ledtest->freq;
4758 
4759 	if (ledtest->freq == 0)
4760 		ledtest->freq = 1;
4761 
4762 	bfa_trc(diag, ledtest->freq);
4763 	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4764 	msg->cmd = (u8) ledtest->cmd;
4765 	msg->color = (u8) ledtest->color;
4766 	msg->portid = bfa_ioc_portid(diag->ioc);
4767 	msg->led = ledtest->led;
4768 	msg->freq = cpu_to_be16(ledtest->freq);
4769 
4770 	/* send mbox cmd */
4771 	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4772 }
4773 
4774 static void
4775 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
4776 {
4777 	bfa_trc(diag, diag->ledtest.lock);
4778 	diag->ledtest.lock = BFA_FALSE;
4779 	/* no bfa_cb_queue is needed because driver is not waiting */
4780 }
4781 
4782 /*
4783  * Port beaconing
4784  */
4785 static void
4786 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4787 {
4788 	struct bfi_diag_portbeacon_req_s *msg;
4789 
4790 	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4791 	/* build host command */
4792 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4793 		bfa_ioc_portid(diag->ioc));
4794 	msg->beacon = beacon;
4795 	msg->period = cpu_to_be32(sec);
4796 	/* send mbox cmd */
4797 	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4798 }
4799 
4800 static void
4801 diag_portbeacon_comp(struct bfa_diag_s *diag)
4802 {
4803 	bfa_trc(diag, diag->beacon.state);
4804 	diag->beacon.state = BFA_FALSE;
4805 	if (diag->cbfn_beacon)
4806 		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4807 }
4808 
4809 /*
4810  *	Diag hmbox handler
4811  */
4812 void
4813 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4814 {
4815 	struct bfa_diag_s *diag = diagarg;
4816 
4817 	switch (msg->mh.msg_id) {
4818 	case BFI_DIAG_I2H_PORTBEACON:
4819 		diag_portbeacon_comp(diag);
4820 		break;
4821 	case BFI_DIAG_I2H_FWPING:
4822 		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4823 		break;
4824 	case BFI_DIAG_I2H_TEMPSENSOR:
4825 		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4826 		break;
4827 	case BFI_DIAG_I2H_LEDTEST:
4828 		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4829 		break;
4830 	default:
4831 		bfa_trc(diag, msg->mh.msg_id);
4832 		WARN_ON(1);
4833 	}
4834 }
4835 
4836 /*
4837  * Gen RAM Test
4838  *
4839  *   @param[in] *diag           - diag data struct
4840  *   @param[in] *memtest        - mem test params input from upper layer,
4841  *   @param[in] pattern         - mem test pattern
4842  *   @param[in] *result         - mem test result
4843  *   @param[in] cbfn            - mem test callback functioin
4844  *   @param[in] cbarg           - callback functioin arg
4845  *
4846  *   @param[out]
4847  */
4848 bfa_status_t
4849 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4850 		u32 pattern, struct bfa_diag_memtest_result *result,
4851 		bfa_cb_diag_t cbfn, void *cbarg)
4852 {
4853 	bfa_trc(diag, pattern);
4854 
4855 	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4856 		return BFA_STATUS_ADAPTER_ENABLED;
4857 
4858 	/* check to see if there is another destructive diag cmd running */
4859 	if (diag->block) {
4860 		bfa_trc(diag, diag->block);
4861 		return BFA_STATUS_DEVBUSY;
4862 	} else
4863 		diag->block = 1;
4864 
4865 	diag->result = result;
4866 	diag->cbfn = cbfn;
4867 	diag->cbarg = cbarg;
4868 
4869 	/* download memtest code and take LPU0 out of reset */
4870 	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4871 
4872 	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4873 			bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4874 	diag->timer_active = 1;
4875 	return BFA_STATUS_OK;
4876 }
4877 
4878 /*
4879  * DIAG firmware ping command
4880  *
4881  *   @param[in] *diag           - diag data struct
4882  *   @param[in] cnt             - dma loop count for testing PCIE
4883  *   @param[in] data            - data pattern to pass in fw
4884  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
4885  *   @param[in] cbfn            - callback function
4886  *   @param[in] *cbarg          - callback functioin arg
4887  *
4888  *   @param[out]
4889  */
4890 bfa_status_t
4891 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4892 		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4893 		void *cbarg)
4894 {
4895 	bfa_trc(diag, cnt);
4896 	bfa_trc(diag, data);
4897 
4898 	if (!bfa_ioc_is_operational(diag->ioc))
4899 		return BFA_STATUS_IOC_NON_OP;
4900 
4901 	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4902 	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4903 		return BFA_STATUS_CMD_NOTSUPP;
4904 
4905 	/* check to see if there is another destructive diag cmd running */
4906 	if (diag->block || diag->fwping.lock) {
4907 		bfa_trc(diag, diag->block);
4908 		bfa_trc(diag, diag->fwping.lock);
4909 		return BFA_STATUS_DEVBUSY;
4910 	}
4911 
4912 	/* Initialization */
4913 	diag->fwping.lock = 1;
4914 	diag->fwping.cbfn = cbfn;
4915 	diag->fwping.cbarg = cbarg;
4916 	diag->fwping.result = result;
4917 	diag->fwping.data = data;
4918 	diag->fwping.count = cnt;
4919 
4920 	/* Init test results */
4921 	diag->fwping.result->data = 0;
4922 	diag->fwping.result->status = BFA_STATUS_OK;
4923 
4924 	/* kick off the first ping */
4925 	diag_fwping_send(diag);
4926 	return BFA_STATUS_OK;
4927 }
4928 
4929 /*
4930  * Read Temperature Sensor
4931  *
4932  *   @param[in] *diag           - diag data struct
4933  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
4934  *   @param[in] cbfn            - callback function
4935  *   @param[in] *cbarg          - callback functioin arg
4936  *
4937  *   @param[out]
4938  */
4939 bfa_status_t
4940 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4941 		struct bfa_diag_results_tempsensor_s *result,
4942 		bfa_cb_diag_t cbfn, void *cbarg)
4943 {
4944 	/* check to see if there is a destructive diag cmd running */
4945 	if (diag->block || diag->tsensor.lock) {
4946 		bfa_trc(diag, diag->block);
4947 		bfa_trc(diag, diag->tsensor.lock);
4948 		return BFA_STATUS_DEVBUSY;
4949 	}
4950 
4951 	if (!bfa_ioc_is_operational(diag->ioc))
4952 		return BFA_STATUS_IOC_NON_OP;
4953 
4954 	/* Init diag mod params */
4955 	diag->tsensor.lock = 1;
4956 	diag->tsensor.temp = result;
4957 	diag->tsensor.cbfn = cbfn;
4958 	diag->tsensor.cbarg = cbarg;
4959 
4960 	/* Send msg to fw */
4961 	diag_tempsensor_send(diag);
4962 
4963 	return BFA_STATUS_OK;
4964 }
4965 
4966 /*
4967  * LED Test command
4968  *
4969  *   @param[in] *diag           - diag data struct
4970  *   @param[in] *ledtest        - pt to ledtest data structure
4971  *
4972  *   @param[out]
4973  */
4974 bfa_status_t
4975 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4976 {
4977 	bfa_trc(diag, ledtest->cmd);
4978 
4979 	if (!bfa_ioc_is_operational(diag->ioc))
4980 		return BFA_STATUS_IOC_NON_OP;
4981 
4982 	if (diag->beacon.state)
4983 		return BFA_STATUS_BEACON_ON;
4984 
4985 	if (diag->ledtest.lock)
4986 		return BFA_STATUS_LEDTEST_OP;
4987 
4988 	/* Send msg to fw */
4989 	diag->ledtest.lock = BFA_TRUE;
4990 	diag_ledtest_send(diag, ledtest);
4991 
4992 	return BFA_STATUS_OK;
4993 }
4994 
4995 /*
4996  * Port beaconing command
4997  *
4998  *   @param[in] *diag           - diag data struct
4999  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5000  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5001  *   @param[in] sec             - beaconing duration in seconds
5002  *
5003  *   @param[out]
5004  */
5005 bfa_status_t
5006 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5007 		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5008 {
5009 	bfa_trc(diag, beacon);
5010 	bfa_trc(diag, link_e2e_beacon);
5011 	bfa_trc(diag, sec);
5012 
5013 	if (!bfa_ioc_is_operational(diag->ioc))
5014 		return BFA_STATUS_IOC_NON_OP;
5015 
5016 	if (diag->ledtest.lock)
5017 		return BFA_STATUS_LEDTEST_OP;
5018 
5019 	if (diag->beacon.state && beacon)       /* beacon alread on */
5020 		return BFA_STATUS_BEACON_ON;
5021 
5022 	diag->beacon.state	= beacon;
5023 	diag->beacon.link_e2e	= link_e2e_beacon;
5024 	if (diag->cbfn_beacon)
5025 		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5026 
5027 	/* Send msg to fw */
5028 	diag_portbeacon_send(diag, beacon, sec);
5029 
5030 	return BFA_STATUS_OK;
5031 }
5032 
5033 /*
5034  * Return DMA memory needed by diag module.
5035  */
5036 u32
5037 bfa_diag_meminfo(void)
5038 {
5039 	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5040 }
5041 
5042 /*
5043  *	Attach virtual and physical memory for Diag.
5044  */
5045 void
5046 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5047 	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5048 {
5049 	diag->dev = dev;
5050 	diag->ioc = ioc;
5051 	diag->trcmod = trcmod;
5052 
5053 	diag->block = 0;
5054 	diag->cbfn = NULL;
5055 	diag->cbarg = NULL;
5056 	diag->result = NULL;
5057 	diag->cbfn_beacon = cbfn_beacon;
5058 
5059 	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5060 	bfa_q_qe_init(&diag->ioc_notify);
5061 	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5062 	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5063 }
5064 
5065 void
5066 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5067 {
5068 	diag->fwping.dbuf_kva = dm_kva;
5069 	diag->fwping.dbuf_pa = dm_pa;
5070 	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5071 }
5072 
5073 /*
5074  *	PHY module specific
5075  */
5076 #define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5077 #define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5078 
5079 static void
5080 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5081 {
5082 	int i, m = sz >> 2;
5083 
5084 	for (i = 0; i < m; i++)
5085 		obuf[i] = be32_to_cpu(ibuf[i]);
5086 }
5087 
5088 static bfa_boolean_t
5089 bfa_phy_present(struct bfa_phy_s *phy)
5090 {
5091 	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5092 }
5093 
5094 static void
5095 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5096 {
5097 	struct bfa_phy_s *phy = cbarg;
5098 
5099 	bfa_trc(phy, event);
5100 
5101 	switch (event) {
5102 	case BFA_IOC_E_DISABLED:
5103 	case BFA_IOC_E_FAILED:
5104 		if (phy->op_busy) {
5105 			phy->status = BFA_STATUS_IOC_FAILURE;
5106 			phy->cbfn(phy->cbarg, phy->status);
5107 			phy->op_busy = 0;
5108 		}
5109 		break;
5110 
5111 	default:
5112 		break;
5113 	}
5114 }
5115 
5116 /*
5117  * Send phy attribute query request.
5118  *
5119  * @param[in] cbarg - callback argument
5120  */
5121 static void
5122 bfa_phy_query_send(void *cbarg)
5123 {
5124 	struct bfa_phy_s *phy = cbarg;
5125 	struct bfi_phy_query_req_s *msg =
5126 			(struct bfi_phy_query_req_s *) phy->mb.msg;
5127 
5128 	msg->instance = phy->instance;
5129 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5130 		bfa_ioc_portid(phy->ioc));
5131 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5132 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5133 }
5134 
5135 /*
5136  * Send phy write request.
5137  *
5138  * @param[in] cbarg - callback argument
5139  */
5140 static void
5141 bfa_phy_write_send(void *cbarg)
5142 {
5143 	struct bfa_phy_s *phy = cbarg;
5144 	struct bfi_phy_write_req_s *msg =
5145 			(struct bfi_phy_write_req_s *) phy->mb.msg;
5146 	u32	len;
5147 	u16	*buf, *dbuf;
5148 	int	i, sz;
5149 
5150 	msg->instance = phy->instance;
5151 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5152 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5153 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5154 	msg->length = cpu_to_be32(len);
5155 
5156 	/* indicate if it's the last msg of the whole write operation */
5157 	msg->last = (len == phy->residue) ? 1 : 0;
5158 
5159 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5160 		bfa_ioc_portid(phy->ioc));
5161 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5162 
5163 	buf = (u16 *) (phy->ubuf + phy->offset);
5164 	dbuf = (u16 *)phy->dbuf_kva;
5165 	sz = len >> 1;
5166 	for (i = 0; i < sz; i++)
5167 		buf[i] = cpu_to_be16(dbuf[i]);
5168 
5169 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5170 
5171 	phy->residue -= len;
5172 	phy->offset += len;
5173 }
5174 
5175 /*
5176  * Send phy read request.
5177  *
5178  * @param[in] cbarg - callback argument
5179  */
5180 static void
5181 bfa_phy_read_send(void *cbarg)
5182 {
5183 	struct bfa_phy_s *phy = cbarg;
5184 	struct bfi_phy_read_req_s *msg =
5185 			(struct bfi_phy_read_req_s *) phy->mb.msg;
5186 	u32	len;
5187 
5188 	msg->instance = phy->instance;
5189 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5190 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5191 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5192 	msg->length = cpu_to_be32(len);
5193 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5194 		bfa_ioc_portid(phy->ioc));
5195 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5196 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5197 }
5198 
5199 /*
5200  * Send phy stats request.
5201  *
5202  * @param[in] cbarg - callback argument
5203  */
5204 static void
5205 bfa_phy_stats_send(void *cbarg)
5206 {
5207 	struct bfa_phy_s *phy = cbarg;
5208 	struct bfi_phy_stats_req_s *msg =
5209 			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5210 
5211 	msg->instance = phy->instance;
5212 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5213 		bfa_ioc_portid(phy->ioc));
5214 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5215 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5216 }
5217 
5218 /*
5219  * Flash memory info API.
5220  *
5221  * @param[in] mincfg - minimal cfg variable
5222  */
5223 u32
5224 bfa_phy_meminfo(bfa_boolean_t mincfg)
5225 {
5226 	/* min driver doesn't need phy */
5227 	if (mincfg)
5228 		return 0;
5229 
5230 	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5231 }
5232 
5233 /*
5234  * Flash attach API.
5235  *
5236  * @param[in] phy - phy structure
5237  * @param[in] ioc  - ioc structure
5238  * @param[in] dev  - device structure
5239  * @param[in] trcmod - trace module
5240  * @param[in] logmod - log module
5241  */
5242 void
5243 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5244 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5245 {
5246 	phy->ioc = ioc;
5247 	phy->trcmod = trcmod;
5248 	phy->cbfn = NULL;
5249 	phy->cbarg = NULL;
5250 	phy->op_busy = 0;
5251 
5252 	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5253 	bfa_q_qe_init(&phy->ioc_notify);
5254 	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5255 	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5256 
5257 	/* min driver doesn't need phy */
5258 	if (mincfg) {
5259 		phy->dbuf_kva = NULL;
5260 		phy->dbuf_pa = 0;
5261 	}
5262 }
5263 
5264 /*
5265  * Claim memory for phy
5266  *
5267  * @param[in] phy - phy structure
5268  * @param[in] dm_kva - pointer to virtual memory address
5269  * @param[in] dm_pa - physical memory address
5270  * @param[in] mincfg - minimal cfg variable
5271  */
5272 void
5273 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5274 		bfa_boolean_t mincfg)
5275 {
5276 	if (mincfg)
5277 		return;
5278 
5279 	phy->dbuf_kva = dm_kva;
5280 	phy->dbuf_pa = dm_pa;
5281 	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5282 	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5283 	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5284 }
5285 
5286 bfa_boolean_t
5287 bfa_phy_busy(struct bfa_ioc_s *ioc)
5288 {
5289 	void __iomem	*rb;
5290 
5291 	rb = bfa_ioc_bar0(ioc);
5292 	return readl(rb + BFA_PHY_LOCK_STATUS);
5293 }
5294 
5295 /*
5296  * Get phy attribute.
5297  *
5298  * @param[in] phy - phy structure
5299  * @param[in] attr - phy attribute structure
5300  * @param[in] cbfn - callback function
5301  * @param[in] cbarg - callback argument
5302  *
5303  * Return status.
5304  */
5305 bfa_status_t
5306 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5307 		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5308 {
5309 	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5310 	bfa_trc(phy, instance);
5311 
5312 	if (!bfa_phy_present(phy))
5313 		return BFA_STATUS_PHY_NOT_PRESENT;
5314 
5315 	if (!bfa_ioc_is_operational(phy->ioc))
5316 		return BFA_STATUS_IOC_NON_OP;
5317 
5318 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5319 		bfa_trc(phy, phy->op_busy);
5320 		return BFA_STATUS_DEVBUSY;
5321 	}
5322 
5323 	phy->op_busy = 1;
5324 	phy->cbfn = cbfn;
5325 	phy->cbarg = cbarg;
5326 	phy->instance = instance;
5327 	phy->ubuf = (uint8_t *) attr;
5328 	bfa_phy_query_send(phy);
5329 
5330 	return BFA_STATUS_OK;
5331 }
5332 
5333 /*
5334  * Get phy stats.
5335  *
5336  * @param[in] phy - phy structure
5337  * @param[in] instance - phy image instance
5338  * @param[in] stats - pointer to phy stats
5339  * @param[in] cbfn - callback function
5340  * @param[in] cbarg - callback argument
5341  *
5342  * Return status.
5343  */
5344 bfa_status_t
5345 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5346 		struct bfa_phy_stats_s *stats,
5347 		bfa_cb_phy_t cbfn, void *cbarg)
5348 {
5349 	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5350 	bfa_trc(phy, instance);
5351 
5352 	if (!bfa_phy_present(phy))
5353 		return BFA_STATUS_PHY_NOT_PRESENT;
5354 
5355 	if (!bfa_ioc_is_operational(phy->ioc))
5356 		return BFA_STATUS_IOC_NON_OP;
5357 
5358 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5359 		bfa_trc(phy, phy->op_busy);
5360 		return BFA_STATUS_DEVBUSY;
5361 	}
5362 
5363 	phy->op_busy = 1;
5364 	phy->cbfn = cbfn;
5365 	phy->cbarg = cbarg;
5366 	phy->instance = instance;
5367 	phy->ubuf = (u8 *) stats;
5368 	bfa_phy_stats_send(phy);
5369 
5370 	return BFA_STATUS_OK;
5371 }
5372 
5373 /*
5374  * Update phy image.
5375  *
5376  * @param[in] phy - phy structure
5377  * @param[in] instance - phy image instance
5378  * @param[in] buf - update data buffer
5379  * @param[in] len - data buffer length
5380  * @param[in] offset - offset relative to starting address
5381  * @param[in] cbfn - callback function
5382  * @param[in] cbarg - callback argument
5383  *
5384  * Return status.
5385  */
5386 bfa_status_t
5387 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5388 		void *buf, u32 len, u32 offset,
5389 		bfa_cb_phy_t cbfn, void *cbarg)
5390 {
5391 	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5392 	bfa_trc(phy, instance);
5393 	bfa_trc(phy, len);
5394 	bfa_trc(phy, offset);
5395 
5396 	if (!bfa_phy_present(phy))
5397 		return BFA_STATUS_PHY_NOT_PRESENT;
5398 
5399 	if (!bfa_ioc_is_operational(phy->ioc))
5400 		return BFA_STATUS_IOC_NON_OP;
5401 
5402 	/* 'len' must be in word (4-byte) boundary */
5403 	if (!len || (len & 0x03))
5404 		return BFA_STATUS_FAILED;
5405 
5406 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5407 		bfa_trc(phy, phy->op_busy);
5408 		return BFA_STATUS_DEVBUSY;
5409 	}
5410 
5411 	phy->op_busy = 1;
5412 	phy->cbfn = cbfn;
5413 	phy->cbarg = cbarg;
5414 	phy->instance = instance;
5415 	phy->residue = len;
5416 	phy->offset = 0;
5417 	phy->addr_off = offset;
5418 	phy->ubuf = buf;
5419 
5420 	bfa_phy_write_send(phy);
5421 	return BFA_STATUS_OK;
5422 }
5423 
5424 /*
5425  * Read phy image.
5426  *
5427  * @param[in] phy - phy structure
5428  * @param[in] instance - phy image instance
5429  * @param[in] buf - read data buffer
5430  * @param[in] len - data buffer length
5431  * @param[in] offset - offset relative to starting address
5432  * @param[in] cbfn - callback function
5433  * @param[in] cbarg - callback argument
5434  *
5435  * Return status.
5436  */
5437 bfa_status_t
5438 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5439 		void *buf, u32 len, u32 offset,
5440 		bfa_cb_phy_t cbfn, void *cbarg)
5441 {
5442 	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5443 	bfa_trc(phy, instance);
5444 	bfa_trc(phy, len);
5445 	bfa_trc(phy, offset);
5446 
5447 	if (!bfa_phy_present(phy))
5448 		return BFA_STATUS_PHY_NOT_PRESENT;
5449 
5450 	if (!bfa_ioc_is_operational(phy->ioc))
5451 		return BFA_STATUS_IOC_NON_OP;
5452 
5453 	/* 'len' must be in word (4-byte) boundary */
5454 	if (!len || (len & 0x03))
5455 		return BFA_STATUS_FAILED;
5456 
5457 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5458 		bfa_trc(phy, phy->op_busy);
5459 		return BFA_STATUS_DEVBUSY;
5460 	}
5461 
5462 	phy->op_busy = 1;
5463 	phy->cbfn = cbfn;
5464 	phy->cbarg = cbarg;
5465 	phy->instance = instance;
5466 	phy->residue = len;
5467 	phy->offset = 0;
5468 	phy->addr_off = offset;
5469 	phy->ubuf = buf;
5470 	bfa_phy_read_send(phy);
5471 
5472 	return BFA_STATUS_OK;
5473 }
5474 
5475 /*
5476  * Process phy response messages upon receiving interrupts.
5477  *
5478  * @param[in] phyarg - phy structure
5479  * @param[in] msg - message structure
5480  */
5481 void
5482 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5483 {
5484 	struct bfa_phy_s *phy = phyarg;
5485 	u32	status;
5486 
5487 	union {
5488 		struct bfi_phy_query_rsp_s *query;
5489 		struct bfi_phy_stats_rsp_s *stats;
5490 		struct bfi_phy_write_rsp_s *write;
5491 		struct bfi_phy_read_rsp_s *read;
5492 		struct bfi_mbmsg_s   *msg;
5493 	} m;
5494 
5495 	m.msg = msg;
5496 	bfa_trc(phy, msg->mh.msg_id);
5497 
5498 	if (!phy->op_busy) {
5499 		/* receiving response after ioc failure */
5500 		bfa_trc(phy, 0x9999);
5501 		return;
5502 	}
5503 
5504 	switch (msg->mh.msg_id) {
5505 	case BFI_PHY_I2H_QUERY_RSP:
5506 		status = be32_to_cpu(m.query->status);
5507 		bfa_trc(phy, status);
5508 
5509 		if (status == BFA_STATUS_OK) {
5510 			struct bfa_phy_attr_s *attr =
5511 				(struct bfa_phy_attr_s *) phy->ubuf;
5512 			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5513 					sizeof(struct bfa_phy_attr_s));
5514 			bfa_trc(phy, attr->status);
5515 			bfa_trc(phy, attr->length);
5516 		}
5517 
5518 		phy->status = status;
5519 		phy->op_busy = 0;
5520 		if (phy->cbfn)
5521 			phy->cbfn(phy->cbarg, phy->status);
5522 		break;
5523 	case BFI_PHY_I2H_STATS_RSP:
5524 		status = be32_to_cpu(m.stats->status);
5525 		bfa_trc(phy, status);
5526 
5527 		if (status == BFA_STATUS_OK) {
5528 			struct bfa_phy_stats_s *stats =
5529 				(struct bfa_phy_stats_s *) phy->ubuf;
5530 			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5531 				sizeof(struct bfa_phy_stats_s));
5532 				bfa_trc(phy, stats->status);
5533 		}
5534 
5535 		phy->status = status;
5536 		phy->op_busy = 0;
5537 		if (phy->cbfn)
5538 			phy->cbfn(phy->cbarg, phy->status);
5539 		break;
5540 	case BFI_PHY_I2H_WRITE_RSP:
5541 		status = be32_to_cpu(m.write->status);
5542 		bfa_trc(phy, status);
5543 
5544 		if (status != BFA_STATUS_OK || phy->residue == 0) {
5545 			phy->status = status;
5546 			phy->op_busy = 0;
5547 			if (phy->cbfn)
5548 				phy->cbfn(phy->cbarg, phy->status);
5549 		} else {
5550 			bfa_trc(phy, phy->offset);
5551 			bfa_phy_write_send(phy);
5552 		}
5553 		break;
5554 	case BFI_PHY_I2H_READ_RSP:
5555 		status = be32_to_cpu(m.read->status);
5556 		bfa_trc(phy, status);
5557 
5558 		if (status != BFA_STATUS_OK) {
5559 			phy->status = status;
5560 			phy->op_busy = 0;
5561 			if (phy->cbfn)
5562 				phy->cbfn(phy->cbarg, phy->status);
5563 		} else {
5564 			u32 len = be32_to_cpu(m.read->length);
5565 			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5566 			u16 *dbuf = (u16 *)phy->dbuf_kva;
5567 			int i, sz = len >> 1;
5568 
5569 			bfa_trc(phy, phy->offset);
5570 			bfa_trc(phy, len);
5571 
5572 			for (i = 0; i < sz; i++)
5573 				buf[i] = be16_to_cpu(dbuf[i]);
5574 
5575 			phy->residue -= len;
5576 			phy->offset += len;
5577 
5578 			if (phy->residue == 0) {
5579 				phy->status = status;
5580 				phy->op_busy = 0;
5581 				if (phy->cbfn)
5582 					phy->cbfn(phy->cbarg, phy->status);
5583 			} else
5584 				bfa_phy_read_send(phy);
5585 		}
5586 		break;
5587 	default:
5588 		WARN_ON(1);
5589 	}
5590 }
5591 
5592 /*
5593  *	DCONF module specific
5594  */
5595 
5596 BFA_MODULE(dconf);
5597 
5598 /*
5599  * DCONF state machine events
5600  */
5601 enum bfa_dconf_event {
5602 	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5603 	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5604 	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5605 	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5606 	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5607 	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5608 };
5609 
5610 /* forward declaration of DCONF state machine */
5611 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5612 				enum bfa_dconf_event event);
5613 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5614 				enum bfa_dconf_event event);
5615 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5616 				enum bfa_dconf_event event);
5617 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5618 				enum bfa_dconf_event event);
5619 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5620 				enum bfa_dconf_event event);
5621 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5622 				enum bfa_dconf_event event);
5623 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5624 				enum bfa_dconf_event event);
5625 
5626 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5627 static void bfa_dconf_timer(void *cbarg);
5628 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5629 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5630 
5631 /*
5632  * Begining state of dconf module. Waiting for an event to start.
5633  */
5634 static void
5635 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5636 {
5637 	bfa_status_t bfa_status;
5638 	bfa_trc(dconf->bfa, event);
5639 
5640 	switch (event) {
5641 	case BFA_DCONF_SM_INIT:
5642 		if (dconf->min_cfg) {
5643 			bfa_trc(dconf->bfa, dconf->min_cfg);
5644 			return;
5645 		}
5646 		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5647 		dconf->flashdone = BFA_FALSE;
5648 		bfa_trc(dconf->bfa, dconf->flashdone);
5649 		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5650 					BFA_FLASH_PART_DRV, dconf->instance,
5651 					dconf->dconf,
5652 					sizeof(struct bfa_dconf_s), 0,
5653 					bfa_dconf_init_cb, dconf->bfa);
5654 		if (bfa_status != BFA_STATUS_OK) {
5655 			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5656 			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5657 			return;
5658 		}
5659 		break;
5660 	case BFA_DCONF_SM_EXIT:
5661 		dconf->flashdone = BFA_TRUE;
5662 	case BFA_DCONF_SM_IOCDISABLE:
5663 	case BFA_DCONF_SM_WR:
5664 	case BFA_DCONF_SM_FLASH_COMP:
5665 		break;
5666 	default:
5667 		bfa_sm_fault(dconf->bfa, event);
5668 	}
5669 }
5670 
5671 /*
5672  * Read flash for dconf entries and make a call back to the driver once done.
5673  */
5674 static void
5675 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5676 			enum bfa_dconf_event event)
5677 {
5678 	bfa_trc(dconf->bfa, event);
5679 
5680 	switch (event) {
5681 	case BFA_DCONF_SM_FLASH_COMP:
5682 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5683 		break;
5684 	case BFA_DCONF_SM_TIMEOUT:
5685 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5686 		break;
5687 	case BFA_DCONF_SM_EXIT:
5688 		dconf->flashdone = BFA_TRUE;
5689 		bfa_trc(dconf->bfa, dconf->flashdone);
5690 	case BFA_DCONF_SM_IOCDISABLE:
5691 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5692 		break;
5693 	default:
5694 		bfa_sm_fault(dconf->bfa, event);
5695 	}
5696 }
5697 
5698 /*
5699  * DCONF Module is in ready state. Has completed the initialization.
5700  */
5701 static void
5702 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5703 {
5704 	bfa_trc(dconf->bfa, event);
5705 
5706 	switch (event) {
5707 	case BFA_DCONF_SM_WR:
5708 		bfa_timer_start(dconf->bfa, &dconf->timer,
5709 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5710 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5711 		break;
5712 	case BFA_DCONF_SM_EXIT:
5713 		dconf->flashdone = BFA_TRUE;
5714 		bfa_trc(dconf->bfa, dconf->flashdone);
5715 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5716 		break;
5717 	case BFA_DCONF_SM_INIT:
5718 	case BFA_DCONF_SM_IOCDISABLE:
5719 		break;
5720 	default:
5721 		bfa_sm_fault(dconf->bfa, event);
5722 	}
5723 }
5724 
5725 /*
5726  * entries are dirty, write back to the flash.
5727  */
5728 
5729 static void
5730 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5731 {
5732 	bfa_trc(dconf->bfa, event);
5733 
5734 	switch (event) {
5735 	case BFA_DCONF_SM_TIMEOUT:
5736 		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5737 		bfa_dconf_flash_write(dconf);
5738 		break;
5739 	case BFA_DCONF_SM_WR:
5740 		bfa_timer_stop(&dconf->timer);
5741 		bfa_timer_start(dconf->bfa, &dconf->timer,
5742 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5743 		break;
5744 	case BFA_DCONF_SM_EXIT:
5745 		bfa_timer_stop(&dconf->timer);
5746 		bfa_timer_start(dconf->bfa, &dconf->timer,
5747 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5748 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5749 		bfa_dconf_flash_write(dconf);
5750 		break;
5751 	case BFA_DCONF_SM_FLASH_COMP:
5752 		break;
5753 	case BFA_DCONF_SM_IOCDISABLE:
5754 		bfa_timer_stop(&dconf->timer);
5755 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5756 		break;
5757 	default:
5758 		bfa_sm_fault(dconf->bfa, event);
5759 	}
5760 }
5761 
5762 /*
5763  * Sync the dconf entries to the flash.
5764  */
5765 static void
5766 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5767 			enum bfa_dconf_event event)
5768 {
5769 	bfa_trc(dconf->bfa, event);
5770 
5771 	switch (event) {
5772 	case BFA_DCONF_SM_IOCDISABLE:
5773 	case BFA_DCONF_SM_FLASH_COMP:
5774 		bfa_timer_stop(&dconf->timer);
5775 	case BFA_DCONF_SM_TIMEOUT:
5776 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5777 		dconf->flashdone = BFA_TRUE;
5778 		bfa_trc(dconf->bfa, dconf->flashdone);
5779 		bfa_ioc_disable(&dconf->bfa->ioc);
5780 		break;
5781 	default:
5782 		bfa_sm_fault(dconf->bfa, event);
5783 	}
5784 }
5785 
5786 static void
5787 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5788 {
5789 	bfa_trc(dconf->bfa, event);
5790 
5791 	switch (event) {
5792 	case BFA_DCONF_SM_FLASH_COMP:
5793 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5794 		break;
5795 	case BFA_DCONF_SM_WR:
5796 		bfa_timer_start(dconf->bfa, &dconf->timer,
5797 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5798 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5799 		break;
5800 	case BFA_DCONF_SM_EXIT:
5801 		bfa_timer_start(dconf->bfa, &dconf->timer,
5802 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5803 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5804 		break;
5805 	case BFA_DCONF_SM_IOCDISABLE:
5806 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5807 		break;
5808 	default:
5809 		bfa_sm_fault(dconf->bfa, event);
5810 	}
5811 }
5812 
5813 static void
5814 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5815 			enum bfa_dconf_event event)
5816 {
5817 	bfa_trc(dconf->bfa, event);
5818 
5819 	switch (event) {
5820 	case BFA_DCONF_SM_INIT:
5821 		bfa_timer_start(dconf->bfa, &dconf->timer,
5822 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5823 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5824 		break;
5825 	case BFA_DCONF_SM_EXIT:
5826 		dconf->flashdone = BFA_TRUE;
5827 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5828 		break;
5829 	case BFA_DCONF_SM_IOCDISABLE:
5830 		break;
5831 	default:
5832 		bfa_sm_fault(dconf->bfa, event);
5833 	}
5834 }
5835 
5836 /*
5837  * Compute and return memory needed by DRV_CFG module.
5838  */
5839 static void
5840 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5841 		  struct bfa_s *bfa)
5842 {
5843 	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5844 
5845 	if (cfg->drvcfg.min_cfg)
5846 		bfa_mem_kva_setup(meminfo, dconf_kva,
5847 				sizeof(struct bfa_dconf_hdr_s));
5848 	else
5849 		bfa_mem_kva_setup(meminfo, dconf_kva,
5850 				sizeof(struct bfa_dconf_s));
5851 }
5852 
5853 static void
5854 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5855 		struct bfa_pcidev_s *pcidev)
5856 {
5857 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5858 
5859 	dconf->bfad = bfad;
5860 	dconf->bfa = bfa;
5861 	dconf->instance = bfa->ioc.port_id;
5862 	bfa_trc(bfa, dconf->instance);
5863 
5864 	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5865 	if (cfg->drvcfg.min_cfg) {
5866 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5867 		dconf->min_cfg = BFA_TRUE;
5868 		/*
5869 		 * Set the flashdone flag to TRUE explicitly as no flash
5870 		 * write will happen in min_cfg mode.
5871 		 */
5872 		dconf->flashdone = BFA_TRUE;
5873 	} else {
5874 		dconf->min_cfg = BFA_FALSE;
5875 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5876 	}
5877 
5878 	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5879 	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5880 }
5881 
5882 static void
5883 bfa_dconf_init_cb(void *arg, bfa_status_t status)
5884 {
5885 	struct bfa_s *bfa = arg;
5886 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5887 
5888 	dconf->flashdone = BFA_TRUE;
5889 	bfa_trc(bfa, dconf->flashdone);
5890 	bfa_iocfc_cb_dconf_modinit(bfa, status);
5891 	if (status == BFA_STATUS_OK) {
5892 		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5893 		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5894 			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5895 		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5896 			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5897 	}
5898 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5899 }
5900 
5901 void
5902 bfa_dconf_modinit(struct bfa_s *bfa)
5903 {
5904 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5905 	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5906 }
5907 static void
5908 bfa_dconf_start(struct bfa_s *bfa)
5909 {
5910 }
5911 
5912 static void
5913 bfa_dconf_stop(struct bfa_s *bfa)
5914 {
5915 }
5916 
5917 static void bfa_dconf_timer(void *cbarg)
5918 {
5919 	struct bfa_dconf_mod_s *dconf = cbarg;
5920 	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5921 }
5922 static void
5923 bfa_dconf_iocdisable(struct bfa_s *bfa)
5924 {
5925 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5926 	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5927 }
5928 
5929 static void
5930 bfa_dconf_detach(struct bfa_s *bfa)
5931 {
5932 }
5933 
5934 static bfa_status_t
5935 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5936 {
5937 	bfa_status_t bfa_status;
5938 	bfa_trc(dconf->bfa, 0);
5939 
5940 	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5941 				BFA_FLASH_PART_DRV, dconf->instance,
5942 				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
5943 				bfa_dconf_cbfn, dconf);
5944 	if (bfa_status != BFA_STATUS_OK)
5945 		WARN_ON(bfa_status);
5946 	bfa_trc(dconf->bfa, bfa_status);
5947 
5948 	return bfa_status;
5949 }
5950 
5951 bfa_status_t
5952 bfa_dconf_update(struct bfa_s *bfa)
5953 {
5954 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5955 	bfa_trc(dconf->bfa, 0);
5956 	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5957 		return BFA_STATUS_FAILED;
5958 
5959 	if (dconf->min_cfg) {
5960 		bfa_trc(dconf->bfa, dconf->min_cfg);
5961 		return BFA_STATUS_FAILED;
5962 	}
5963 
5964 	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5965 	return BFA_STATUS_OK;
5966 }
5967 
5968 static void
5969 bfa_dconf_cbfn(void *arg, bfa_status_t status)
5970 {
5971 	struct bfa_dconf_mod_s *dconf = arg;
5972 	WARN_ON(status);
5973 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5974 }
5975 
5976 void
5977 bfa_dconf_modexit(struct bfa_s *bfa)
5978 {
5979 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5980 	BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
5981 	bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
5982 	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5983 }
5984