xref: /openbmc/linux/drivers/scsi/bfa/bfa_ioc.c (revision 81d67439)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22 #include "bfa_defs_svc.h"
23 
24 BFA_TRC_FILE(CNA, IOC);
25 
26 /*
27  * IOC local definitions
28  */
29 #define BFA_IOC_TOV		3000	/* msecs */
30 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
31 #define BFA_IOC_HB_TOV		500	/* msecs */
32 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
33 #define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
34 
35 #define bfa_ioc_timer_start(__ioc)					\
36 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
37 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
39 
40 #define bfa_hb_timer_start(__ioc)					\
41 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
42 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
44 
45 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
46 
47 /*
48  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
49  */
50 
51 #define bfa_ioc_firmware_lock(__ioc)			\
52 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53 #define bfa_ioc_firmware_unlock(__ioc)			\
54 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57 #define bfa_ioc_notify_fail(__ioc)              \
58 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 #define bfa_ioc_sync_start(__ioc)               \
60 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
61 #define bfa_ioc_sync_join(__ioc)                \
62 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
63 #define bfa_ioc_sync_leave(__ioc)               \
64 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
65 #define bfa_ioc_sync_ack(__ioc)                 \
66 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
67 #define bfa_ioc_sync_complete(__ioc)            \
68 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
69 
70 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
71 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
72 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
73 
74 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
75 
76 /*
77  * forward declarations
78  */
79 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81 static void bfa_ioc_timeout(void *ioc);
82 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 				enum bfa_ioc_event_e event);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
98 
99 
100 /*
101  * IOC state machine definitions/declarations
102  */
103 enum ioc_event {
104 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
105 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
106 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
107 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
108 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
109 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
110 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
111 	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
112 	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
113 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
114 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
115 	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
116 	IOC_E_FWRSP_ACQ_ADDR	= 13,	/*  Acquiring address		*/
117 };
118 
119 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
130 
131 static struct bfa_sm_table_s ioc_sm_table[] = {
132 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
133 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
134 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
135 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
136 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
137 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
138 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
139 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
140 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
141 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
142 	{BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
143 };
144 
145 /*
146  * IOCPF state machine definitions/declarations
147  */
148 
149 #define bfa_iocpf_timer_start(__ioc)					\
150 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
151 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
152 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
153 
154 #define bfa_iocpf_poll_timer_start(__ioc)				\
155 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
156 			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
157 
158 #define bfa_sem_timer_start(__ioc)					\
159 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
160 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
161 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
162 
163 /*
164  * Forward declareations for iocpf state machine
165  */
166 static void bfa_iocpf_timeout(void *ioc_arg);
167 static void bfa_iocpf_sem_timeout(void *ioc_arg);
168 static void bfa_iocpf_poll_timeout(void *ioc_arg);
169 
170 /*
171  * IOCPF state machine events
172  */
173 enum iocpf_event {
174 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
175 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
176 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
177 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
178 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
179 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
180 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
181 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
182 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
183 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
184 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
185 	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
186 };
187 
188 /*
189  * IOCPF states
190  */
191 enum bfa_iocpf_state {
192 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
193 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
194 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
195 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
196 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
197 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
198 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
199 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
200 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
201 };
202 
203 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
211 						enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
217 						enum iocpf_event);
218 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
219 
220 static struct bfa_sm_table_s iocpf_sm_table[] = {
221 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
222 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
223 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
224 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
225 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
226 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
227 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
228 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
229 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
231 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
232 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
233 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
234 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
235 };
236 
237 /*
238  * IOC State Machine
239  */
240 
241 /*
242  * Beginning state. IOC uninit state.
243  */
244 
245 static void
246 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
247 {
248 }
249 
250 /*
251  * IOC is in uninit state.
252  */
253 static void
254 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
255 {
256 	bfa_trc(ioc, event);
257 
258 	switch (event) {
259 	case IOC_E_RESET:
260 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
261 		break;
262 
263 	default:
264 		bfa_sm_fault(ioc, event);
265 	}
266 }
267 /*
268  * Reset entry actions -- initialize state machine
269  */
270 static void
271 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
272 {
273 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
274 }
275 
276 /*
277  * IOC is in reset state.
278  */
279 static void
280 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
281 {
282 	bfa_trc(ioc, event);
283 
284 	switch (event) {
285 	case IOC_E_ENABLE:
286 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
287 		break;
288 
289 	case IOC_E_DISABLE:
290 		bfa_ioc_disable_comp(ioc);
291 		break;
292 
293 	case IOC_E_DETACH:
294 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
295 		break;
296 
297 	default:
298 		bfa_sm_fault(ioc, event);
299 	}
300 }
301 
302 
303 static void
304 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
305 {
306 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
307 }
308 
309 /*
310  * Host IOC function is being enabled, awaiting response from firmware.
311  * Semaphore is acquired.
312  */
313 static void
314 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
315 {
316 	bfa_trc(ioc, event);
317 
318 	switch (event) {
319 	case IOC_E_ENABLED:
320 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
321 		break;
322 
323 	case IOC_E_PFFAILED:
324 		/* !!! fall through !!! */
325 	case IOC_E_HWERROR:
326 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
327 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
328 		if (event != IOC_E_PFFAILED)
329 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
330 		break;
331 
332 	case IOC_E_HWFAILED:
333 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
334 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
335 		break;
336 
337 	case IOC_E_DISABLE:
338 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
339 		break;
340 
341 	case IOC_E_DETACH:
342 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
343 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
344 		break;
345 
346 	case IOC_E_ENABLE:
347 		break;
348 
349 	default:
350 		bfa_sm_fault(ioc, event);
351 	}
352 }
353 
354 
355 static void
356 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
357 {
358 	bfa_ioc_timer_start(ioc);
359 	bfa_ioc_send_getattr(ioc);
360 }
361 
362 /*
363  * IOC configuration in progress. Timer is active.
364  */
365 static void
366 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
367 {
368 	bfa_trc(ioc, event);
369 
370 	switch (event) {
371 	case IOC_E_FWRSP_GETATTR:
372 		bfa_ioc_timer_stop(ioc);
373 		bfa_ioc_check_attr_wwns(ioc);
374 		bfa_ioc_hb_monitor(ioc);
375 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
376 		break;
377 
378 	case IOC_E_FWRSP_ACQ_ADDR:
379 		bfa_ioc_timer_stop(ioc);
380 		bfa_ioc_hb_monitor(ioc);
381 		bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
382 		break;
383 
384 	case IOC_E_PFFAILED:
385 	case IOC_E_HWERROR:
386 		bfa_ioc_timer_stop(ioc);
387 		/* !!! fall through !!! */
388 	case IOC_E_TIMEOUT:
389 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
390 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
391 		if (event != IOC_E_PFFAILED)
392 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
393 		break;
394 
395 	case IOC_E_DISABLE:
396 		bfa_ioc_timer_stop(ioc);
397 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
398 		break;
399 
400 	case IOC_E_ENABLE:
401 		break;
402 
403 	default:
404 		bfa_sm_fault(ioc, event);
405 	}
406 }
407 
408 /*
409  * Acquiring address from fabric (entry function)
410  */
411 static void
412 bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
413 {
414 }
415 
416 /*
417  *	Acquiring address from the fabric
418  */
419 static void
420 bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
421 {
422 	bfa_trc(ioc, event);
423 
424 	switch (event) {
425 	case IOC_E_FWRSP_GETATTR:
426 		bfa_ioc_check_attr_wwns(ioc);
427 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
428 		break;
429 
430 	case IOC_E_PFFAILED:
431 	case IOC_E_HWERROR:
432 		bfa_hb_timer_stop(ioc);
433 	case IOC_E_HBFAIL:
434 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
435 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
436 		if (event != IOC_E_PFFAILED)
437 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
438 		break;
439 
440 	case IOC_E_DISABLE:
441 		bfa_hb_timer_stop(ioc);
442 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 		break;
444 
445 	case IOC_E_ENABLE:
446 		break;
447 
448 	default:
449 		bfa_sm_fault(ioc, event);
450 	}
451 }
452 
453 static void
454 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
455 {
456 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
457 
458 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
459 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
460 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
461 }
462 
463 static void
464 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
465 {
466 	bfa_trc(ioc, event);
467 
468 	switch (event) {
469 	case IOC_E_ENABLE:
470 		break;
471 
472 	case IOC_E_DISABLE:
473 		bfa_hb_timer_stop(ioc);
474 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
475 		break;
476 
477 	case IOC_E_PFFAILED:
478 	case IOC_E_HWERROR:
479 		bfa_hb_timer_stop(ioc);
480 		/* !!! fall through !!! */
481 	case IOC_E_HBFAIL:
482 		if (ioc->iocpf.auto_recover)
483 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
484 		else
485 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
486 
487 		bfa_ioc_fail_notify(ioc);
488 
489 		if (event != IOC_E_PFFAILED)
490 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
491 		break;
492 
493 	default:
494 		bfa_sm_fault(ioc, event);
495 	}
496 }
497 
498 
499 static void
500 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
501 {
502 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
503 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
504 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
505 }
506 
507 /*
508  * IOC is being disabled
509  */
510 static void
511 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
512 {
513 	bfa_trc(ioc, event);
514 
515 	switch (event) {
516 	case IOC_E_DISABLED:
517 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
518 		break;
519 
520 	case IOC_E_HWERROR:
521 		/*
522 		 * No state change.  Will move to disabled state
523 		 * after iocpf sm completes failure processing and
524 		 * moves to disabled state.
525 		 */
526 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
527 		break;
528 
529 	case IOC_E_HWFAILED:
530 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
531 		bfa_ioc_disable_comp(ioc);
532 		break;
533 
534 	default:
535 		bfa_sm_fault(ioc, event);
536 	}
537 }
538 
539 /*
540  * IOC disable completion entry.
541  */
542 static void
543 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
544 {
545 	bfa_ioc_disable_comp(ioc);
546 }
547 
548 static void
549 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
550 {
551 	bfa_trc(ioc, event);
552 
553 	switch (event) {
554 	case IOC_E_ENABLE:
555 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
556 		break;
557 
558 	case IOC_E_DISABLE:
559 		ioc->cbfn->disable_cbfn(ioc->bfa);
560 		break;
561 
562 	case IOC_E_DETACH:
563 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
564 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
565 		break;
566 
567 	default:
568 		bfa_sm_fault(ioc, event);
569 	}
570 }
571 
572 
573 static void
574 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
575 {
576 	bfa_trc(ioc, 0);
577 }
578 
579 /*
580  * Hardware initialization retry.
581  */
582 static void
583 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
584 {
585 	bfa_trc(ioc, event);
586 
587 	switch (event) {
588 	case IOC_E_ENABLED:
589 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
590 		break;
591 
592 	case IOC_E_PFFAILED:
593 	case IOC_E_HWERROR:
594 		/*
595 		 * Initialization retry failed.
596 		 */
597 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
598 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
599 		if (event != IOC_E_PFFAILED)
600 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
601 		break;
602 
603 	case IOC_E_HWFAILED:
604 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
605 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
606 		break;
607 
608 	case IOC_E_ENABLE:
609 		break;
610 
611 	case IOC_E_DISABLE:
612 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
613 		break;
614 
615 	case IOC_E_DETACH:
616 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
617 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
618 		break;
619 
620 	default:
621 		bfa_sm_fault(ioc, event);
622 	}
623 }
624 
625 
626 static void
627 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
628 {
629 	bfa_trc(ioc, 0);
630 }
631 
632 /*
633  * IOC failure.
634  */
635 static void
636 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
637 {
638 	bfa_trc(ioc, event);
639 
640 	switch (event) {
641 
642 	case IOC_E_ENABLE:
643 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
644 		break;
645 
646 	case IOC_E_DISABLE:
647 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
648 		break;
649 
650 	case IOC_E_DETACH:
651 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
652 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
653 		break;
654 
655 	case IOC_E_HWERROR:
656 		/*
657 		 * HB failure notification, ignore.
658 		 */
659 		break;
660 	default:
661 		bfa_sm_fault(ioc, event);
662 	}
663 }
664 
665 static void
666 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
667 {
668 	bfa_trc(ioc, 0);
669 }
670 
671 static void
672 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
673 {
674 	bfa_trc(ioc, event);
675 
676 	switch (event) {
677 	case IOC_E_ENABLE:
678 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
679 		break;
680 
681 	case IOC_E_DISABLE:
682 		ioc->cbfn->disable_cbfn(ioc->bfa);
683 		break;
684 
685 	case IOC_E_DETACH:
686 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
687 		break;
688 
689 	default:
690 		bfa_sm_fault(ioc, event);
691 	}
692 }
693 
694 /*
695  * IOCPF State Machine
696  */
697 
698 /*
699  * Reset entry actions -- initialize state machine
700  */
701 static void
702 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
703 {
704 	iocpf->fw_mismatch_notified = BFA_FALSE;
705 	iocpf->auto_recover = bfa_auto_recover;
706 }
707 
708 /*
709  * Beginning state. IOC is in reset state.
710  */
711 static void
712 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
713 {
714 	struct bfa_ioc_s *ioc = iocpf->ioc;
715 
716 	bfa_trc(ioc, event);
717 
718 	switch (event) {
719 	case IOCPF_E_ENABLE:
720 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
721 		break;
722 
723 	case IOCPF_E_STOP:
724 		break;
725 
726 	default:
727 		bfa_sm_fault(ioc, event);
728 	}
729 }
730 
731 /*
732  * Semaphore should be acquired for version check.
733  */
734 static void
735 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
736 {
737 	struct bfi_ioc_image_hdr_s	fwhdr;
738 	u32	fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
739 
740 	/* h/w sem init */
741 	if (fwstate == BFI_IOC_UNINIT)
742 		goto sem_get;
743 
744 	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
745 
746 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
747 		goto sem_get;
748 
749 	bfa_trc(iocpf->ioc, fwstate);
750 	bfa_trc(iocpf->ioc, fwhdr.exec);
751 	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
752 
753 	/*
754 	 * Try to lock and then unlock the semaphore.
755 	 */
756 	readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
757 	writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
758 sem_get:
759 	bfa_ioc_hw_sem_get(iocpf->ioc);
760 }
761 
762 /*
763  * Awaiting h/w semaphore to continue with version check.
764  */
765 static void
766 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
767 {
768 	struct bfa_ioc_s *ioc = iocpf->ioc;
769 
770 	bfa_trc(ioc, event);
771 
772 	switch (event) {
773 	case IOCPF_E_SEMLOCKED:
774 		if (bfa_ioc_firmware_lock(ioc)) {
775 			if (bfa_ioc_sync_start(ioc)) {
776 				bfa_ioc_sync_join(ioc);
777 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
778 			} else {
779 				bfa_ioc_firmware_unlock(ioc);
780 				writel(1, ioc->ioc_regs.ioc_sem_reg);
781 				bfa_sem_timer_start(ioc);
782 			}
783 		} else {
784 			writel(1, ioc->ioc_regs.ioc_sem_reg);
785 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
786 		}
787 		break;
788 
789 	case IOCPF_E_SEM_ERROR:
790 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
791 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
792 		break;
793 
794 	case IOCPF_E_DISABLE:
795 		bfa_sem_timer_stop(ioc);
796 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
797 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
798 		break;
799 
800 	case IOCPF_E_STOP:
801 		bfa_sem_timer_stop(ioc);
802 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
803 		break;
804 
805 	default:
806 		bfa_sm_fault(ioc, event);
807 	}
808 }
809 
810 /*
811  * Notify enable completion callback.
812  */
813 static void
814 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
815 {
816 	/*
817 	 * Call only the first time sm enters fwmismatch state.
818 	 */
819 	if (iocpf->fw_mismatch_notified == BFA_FALSE)
820 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
821 
822 	iocpf->fw_mismatch_notified = BFA_TRUE;
823 	bfa_iocpf_timer_start(iocpf->ioc);
824 }
825 
826 /*
827  * Awaiting firmware version match.
828  */
829 static void
830 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
831 {
832 	struct bfa_ioc_s *ioc = iocpf->ioc;
833 
834 	bfa_trc(ioc, event);
835 
836 	switch (event) {
837 	case IOCPF_E_TIMEOUT:
838 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
839 		break;
840 
841 	case IOCPF_E_DISABLE:
842 		bfa_iocpf_timer_stop(ioc);
843 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
844 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
845 		break;
846 
847 	case IOCPF_E_STOP:
848 		bfa_iocpf_timer_stop(ioc);
849 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
850 		break;
851 
852 	default:
853 		bfa_sm_fault(ioc, event);
854 	}
855 }
856 
857 /*
858  * Request for semaphore.
859  */
860 static void
861 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
862 {
863 	bfa_ioc_hw_sem_get(iocpf->ioc);
864 }
865 
866 /*
867  * Awaiting semaphore for h/w initialzation.
868  */
869 static void
870 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
871 {
872 	struct bfa_ioc_s *ioc = iocpf->ioc;
873 
874 	bfa_trc(ioc, event);
875 
876 	switch (event) {
877 	case IOCPF_E_SEMLOCKED:
878 		if (bfa_ioc_sync_complete(ioc)) {
879 			bfa_ioc_sync_join(ioc);
880 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
881 		} else {
882 			writel(1, ioc->ioc_regs.ioc_sem_reg);
883 			bfa_sem_timer_start(ioc);
884 		}
885 		break;
886 
887 	case IOCPF_E_SEM_ERROR:
888 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
889 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
890 		break;
891 
892 	case IOCPF_E_DISABLE:
893 		bfa_sem_timer_stop(ioc);
894 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
895 		break;
896 
897 	default:
898 		bfa_sm_fault(ioc, event);
899 	}
900 }
901 
902 static void
903 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
904 {
905 	iocpf->poll_time = 0;
906 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
907 }
908 
909 /*
910  * Hardware is being initialized. Interrupts are enabled.
911  * Holding hardware semaphore lock.
912  */
913 static void
914 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
915 {
916 	struct bfa_ioc_s *ioc = iocpf->ioc;
917 
918 	bfa_trc(ioc, event);
919 
920 	switch (event) {
921 	case IOCPF_E_FWREADY:
922 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
923 		break;
924 
925 	case IOCPF_E_TIMEOUT:
926 		writel(1, ioc->ioc_regs.ioc_sem_reg);
927 		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
928 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
929 		break;
930 
931 	case IOCPF_E_DISABLE:
932 		bfa_iocpf_timer_stop(ioc);
933 		bfa_ioc_sync_leave(ioc);
934 		writel(1, ioc->ioc_regs.ioc_sem_reg);
935 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
936 		break;
937 
938 	default:
939 		bfa_sm_fault(ioc, event);
940 	}
941 }
942 
943 static void
944 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
945 {
946 	bfa_iocpf_timer_start(iocpf->ioc);
947 	/*
948 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
949 	 */
950 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
951 	bfa_ioc_send_enable(iocpf->ioc);
952 }
953 
954 /*
955  * Host IOC function is being enabled, awaiting response from firmware.
956  * Semaphore is acquired.
957  */
958 static void
959 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
960 {
961 	struct bfa_ioc_s *ioc = iocpf->ioc;
962 
963 	bfa_trc(ioc, event);
964 
965 	switch (event) {
966 	case IOCPF_E_FWRSP_ENABLE:
967 		bfa_iocpf_timer_stop(ioc);
968 		writel(1, ioc->ioc_regs.ioc_sem_reg);
969 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
970 		break;
971 
972 	case IOCPF_E_INITFAIL:
973 		bfa_iocpf_timer_stop(ioc);
974 		/*
975 		 * !!! fall through !!!
976 		 */
977 
978 	case IOCPF_E_TIMEOUT:
979 		writel(1, ioc->ioc_regs.ioc_sem_reg);
980 		if (event == IOCPF_E_TIMEOUT)
981 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
982 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
983 		break;
984 
985 	case IOCPF_E_DISABLE:
986 		bfa_iocpf_timer_stop(ioc);
987 		writel(1, ioc->ioc_regs.ioc_sem_reg);
988 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
989 		break;
990 
991 	default:
992 		bfa_sm_fault(ioc, event);
993 	}
994 }
995 
996 static void
997 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
998 {
999 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1000 }
1001 
1002 static void
1003 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1004 {
1005 	struct bfa_ioc_s *ioc = iocpf->ioc;
1006 
1007 	bfa_trc(ioc, event);
1008 
1009 	switch (event) {
1010 	case IOCPF_E_DISABLE:
1011 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1012 		break;
1013 
1014 	case IOCPF_E_GETATTRFAIL:
1015 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1016 		break;
1017 
1018 	case IOCPF_E_FAIL:
1019 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1020 		break;
1021 
1022 	default:
1023 		bfa_sm_fault(ioc, event);
1024 	}
1025 }
1026 
1027 static void
1028 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1029 {
1030 	bfa_iocpf_timer_start(iocpf->ioc);
1031 	bfa_ioc_send_disable(iocpf->ioc);
1032 }
1033 
1034 /*
1035  * IOC is being disabled
1036  */
1037 static void
1038 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1039 {
1040 	struct bfa_ioc_s *ioc = iocpf->ioc;
1041 
1042 	bfa_trc(ioc, event);
1043 
1044 	switch (event) {
1045 	case IOCPF_E_FWRSP_DISABLE:
1046 		bfa_iocpf_timer_stop(ioc);
1047 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1048 		break;
1049 
1050 	case IOCPF_E_FAIL:
1051 		bfa_iocpf_timer_stop(ioc);
1052 		/*
1053 		 * !!! fall through !!!
1054 		 */
1055 
1056 	case IOCPF_E_TIMEOUT:
1057 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1058 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1059 		break;
1060 
1061 	case IOCPF_E_FWRSP_ENABLE:
1062 		break;
1063 
1064 	default:
1065 		bfa_sm_fault(ioc, event);
1066 	}
1067 }
1068 
1069 static void
1070 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1071 {
1072 	bfa_ioc_hw_sem_get(iocpf->ioc);
1073 }
1074 
1075 /*
1076  * IOC hb ack request is being removed.
1077  */
1078 static void
1079 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1080 {
1081 	struct bfa_ioc_s *ioc = iocpf->ioc;
1082 
1083 	bfa_trc(ioc, event);
1084 
1085 	switch (event) {
1086 	case IOCPF_E_SEMLOCKED:
1087 		bfa_ioc_sync_leave(ioc);
1088 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1089 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 		break;
1091 
1092 	case IOCPF_E_SEM_ERROR:
1093 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1094 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1095 		break;
1096 
1097 	case IOCPF_E_FAIL:
1098 		break;
1099 
1100 	default:
1101 		bfa_sm_fault(ioc, event);
1102 	}
1103 }
1104 
1105 /*
1106  * IOC disable completion entry.
1107  */
1108 static void
1109 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1110 {
1111 	bfa_ioc_mbox_flush(iocpf->ioc);
1112 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1113 }
1114 
1115 static void
1116 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1117 {
1118 	struct bfa_ioc_s *ioc = iocpf->ioc;
1119 
1120 	bfa_trc(ioc, event);
1121 
1122 	switch (event) {
1123 	case IOCPF_E_ENABLE:
1124 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1125 		break;
1126 
1127 	case IOCPF_E_STOP:
1128 		bfa_ioc_firmware_unlock(ioc);
1129 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1130 		break;
1131 
1132 	default:
1133 		bfa_sm_fault(ioc, event);
1134 	}
1135 }
1136 
1137 static void
1138 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1139 {
1140 	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1141 	bfa_ioc_hw_sem_get(iocpf->ioc);
1142 }
1143 
1144 /*
1145  * Hardware initialization failed.
1146  */
1147 static void
1148 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1149 {
1150 	struct bfa_ioc_s *ioc = iocpf->ioc;
1151 
1152 	bfa_trc(ioc, event);
1153 
1154 	switch (event) {
1155 	case IOCPF_E_SEMLOCKED:
1156 		bfa_ioc_notify_fail(ioc);
1157 		bfa_ioc_sync_leave(ioc);
1158 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1159 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1160 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1161 		break;
1162 
1163 	case IOCPF_E_SEM_ERROR:
1164 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1165 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1166 		break;
1167 
1168 	case IOCPF_E_DISABLE:
1169 		bfa_sem_timer_stop(ioc);
1170 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1171 		break;
1172 
1173 	case IOCPF_E_STOP:
1174 		bfa_sem_timer_stop(ioc);
1175 		bfa_ioc_firmware_unlock(ioc);
1176 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1177 		break;
1178 
1179 	case IOCPF_E_FAIL:
1180 		break;
1181 
1182 	default:
1183 		bfa_sm_fault(ioc, event);
1184 	}
1185 }
1186 
1187 static void
1188 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1189 {
1190 	bfa_trc(iocpf->ioc, 0);
1191 }
1192 
1193 /*
1194  * Hardware initialization failed.
1195  */
1196 static void
1197 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1198 {
1199 	struct bfa_ioc_s *ioc = iocpf->ioc;
1200 
1201 	bfa_trc(ioc, event);
1202 
1203 	switch (event) {
1204 	case IOCPF_E_DISABLE:
1205 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1206 		break;
1207 
1208 	case IOCPF_E_STOP:
1209 		bfa_ioc_firmware_unlock(ioc);
1210 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1211 		break;
1212 
1213 	default:
1214 		bfa_sm_fault(ioc, event);
1215 	}
1216 }
1217 
1218 static void
1219 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1220 {
1221 	/*
1222 	 * Mark IOC as failed in hardware and stop firmware.
1223 	 */
1224 	bfa_ioc_lpu_stop(iocpf->ioc);
1225 
1226 	/*
1227 	 * Flush any queued up mailbox requests.
1228 	 */
1229 	bfa_ioc_mbox_flush(iocpf->ioc);
1230 
1231 	bfa_ioc_hw_sem_get(iocpf->ioc);
1232 }
1233 
1234 static void
1235 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1236 {
1237 	struct bfa_ioc_s *ioc = iocpf->ioc;
1238 
1239 	bfa_trc(ioc, event);
1240 
1241 	switch (event) {
1242 	case IOCPF_E_SEMLOCKED:
1243 		bfa_ioc_sync_ack(ioc);
1244 		bfa_ioc_notify_fail(ioc);
1245 		if (!iocpf->auto_recover) {
1246 			bfa_ioc_sync_leave(ioc);
1247 			writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1248 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1249 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1250 		} else {
1251 			if (bfa_ioc_sync_complete(ioc))
1252 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1253 			else {
1254 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1255 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1256 			}
1257 		}
1258 		break;
1259 
1260 	case IOCPF_E_SEM_ERROR:
1261 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1262 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1263 		break;
1264 
1265 	case IOCPF_E_DISABLE:
1266 		bfa_sem_timer_stop(ioc);
1267 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1268 		break;
1269 
1270 	case IOCPF_E_FAIL:
1271 		break;
1272 
1273 	default:
1274 		bfa_sm_fault(ioc, event);
1275 	}
1276 }
1277 
1278 static void
1279 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1280 {
1281 	bfa_trc(iocpf->ioc, 0);
1282 }
1283 
1284 /*
1285  * IOC is in failed state.
1286  */
1287 static void
1288 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1289 {
1290 	struct bfa_ioc_s *ioc = iocpf->ioc;
1291 
1292 	bfa_trc(ioc, event);
1293 
1294 	switch (event) {
1295 	case IOCPF_E_DISABLE:
1296 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1297 		break;
1298 
1299 	default:
1300 		bfa_sm_fault(ioc, event);
1301 	}
1302 }
1303 
1304 /*
1305  *  BFA IOC private functions
1306  */
1307 
1308 /*
1309  * Notify common modules registered for notification.
1310  */
1311 static void
1312 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1313 {
1314 	struct bfa_ioc_notify_s	*notify;
1315 	struct list_head	*qe;
1316 
1317 	list_for_each(qe, &ioc->notify_q) {
1318 		notify = (struct bfa_ioc_notify_s *)qe;
1319 		notify->cbfn(notify->cbarg, event);
1320 	}
1321 }
1322 
1323 static void
1324 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1325 {
1326 	ioc->cbfn->disable_cbfn(ioc->bfa);
1327 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1328 }
1329 
1330 bfa_boolean_t
1331 bfa_ioc_sem_get(void __iomem *sem_reg)
1332 {
1333 	u32 r32;
1334 	int cnt = 0;
1335 #define BFA_SEM_SPINCNT	3000
1336 
1337 	r32 = readl(sem_reg);
1338 
1339 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1340 		cnt++;
1341 		udelay(2);
1342 		r32 = readl(sem_reg);
1343 	}
1344 
1345 	if (!(r32 & 1))
1346 		return BFA_TRUE;
1347 
1348 	return BFA_FALSE;
1349 }
1350 
1351 static void
1352 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1353 {
1354 	u32	r32;
1355 
1356 	/*
1357 	 * First read to the semaphore register will return 0, subsequent reads
1358 	 * will return 1. Semaphore is released by writing 1 to the register
1359 	 */
1360 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1361 	if (r32 == ~0) {
1362 		WARN_ON(r32 == ~0);
1363 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1364 		return;
1365 	}
1366 	if (!(r32 & 1)) {
1367 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1368 		return;
1369 	}
1370 
1371 	bfa_sem_timer_start(ioc);
1372 }
1373 
1374 /*
1375  * Initialize LPU local memory (aka secondary memory / SRAM)
1376  */
1377 static void
1378 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1379 {
1380 	u32	pss_ctl;
1381 	int		i;
1382 #define PSS_LMEM_INIT_TIME  10000
1383 
1384 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1385 	pss_ctl &= ~__PSS_LMEM_RESET;
1386 	pss_ctl |= __PSS_LMEM_INIT_EN;
1387 
1388 	/*
1389 	 * i2c workaround 12.5khz clock
1390 	 */
1391 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1392 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1393 
1394 	/*
1395 	 * wait for memory initialization to be complete
1396 	 */
1397 	i = 0;
1398 	do {
1399 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1400 		i++;
1401 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1402 
1403 	/*
1404 	 * If memory initialization is not successful, IOC timeout will catch
1405 	 * such failures.
1406 	 */
1407 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1408 	bfa_trc(ioc, pss_ctl);
1409 
1410 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1411 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1412 }
1413 
1414 static void
1415 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1416 {
1417 	u32	pss_ctl;
1418 
1419 	/*
1420 	 * Take processor out of reset.
1421 	 */
1422 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1423 	pss_ctl &= ~__PSS_LPU0_RESET;
1424 
1425 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1426 }
1427 
1428 static void
1429 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1430 {
1431 	u32	pss_ctl;
1432 
1433 	/*
1434 	 * Put processors in reset.
1435 	 */
1436 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1437 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1438 
1439 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1440 }
1441 
1442 /*
1443  * Get driver and firmware versions.
1444  */
1445 void
1446 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1447 {
1448 	u32	pgnum, pgoff;
1449 	u32	loff = 0;
1450 	int		i;
1451 	u32	*fwsig = (u32 *) fwhdr;
1452 
1453 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1454 	pgoff = PSS_SMEM_PGOFF(loff);
1455 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1456 
1457 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1458 	     i++) {
1459 		fwsig[i] =
1460 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1461 		loff += sizeof(u32);
1462 	}
1463 }
1464 
1465 /*
1466  * Returns TRUE if same.
1467  */
1468 bfa_boolean_t
1469 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1470 {
1471 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1472 	int i;
1473 
1474 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1475 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1476 
1477 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1478 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1479 			bfa_trc(ioc, i);
1480 			bfa_trc(ioc, fwhdr->md5sum[i]);
1481 			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1482 			return BFA_FALSE;
1483 		}
1484 	}
1485 
1486 	bfa_trc(ioc, fwhdr->md5sum[0]);
1487 	return BFA_TRUE;
1488 }
1489 
1490 /*
1491  * Return true if current running version is valid. Firmware signature and
1492  * execution context (driver/bios) must match.
1493  */
1494 static bfa_boolean_t
1495 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1496 {
1497 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1498 
1499 	bfa_ioc_fwver_get(ioc, &fwhdr);
1500 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1501 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1502 
1503 	if (fwhdr.signature != drv_fwhdr->signature) {
1504 		bfa_trc(ioc, fwhdr.signature);
1505 		bfa_trc(ioc, drv_fwhdr->signature);
1506 		return BFA_FALSE;
1507 	}
1508 
1509 	if (swab32(fwhdr.bootenv) != boot_env) {
1510 		bfa_trc(ioc, fwhdr.bootenv);
1511 		bfa_trc(ioc, boot_env);
1512 		return BFA_FALSE;
1513 	}
1514 
1515 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1516 }
1517 
1518 /*
1519  * Conditionally flush any pending message from firmware at start.
1520  */
1521 static void
1522 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1523 {
1524 	u32	r32;
1525 
1526 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1527 	if (r32)
1528 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1529 }
1530 
1531 static void
1532 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1533 {
1534 	enum bfi_ioc_state ioc_fwstate;
1535 	bfa_boolean_t fwvalid;
1536 	u32 boot_type;
1537 	u32 boot_env;
1538 
1539 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1540 
1541 	if (force)
1542 		ioc_fwstate = BFI_IOC_UNINIT;
1543 
1544 	bfa_trc(ioc, ioc_fwstate);
1545 
1546 	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1547 	boot_env = BFI_FWBOOT_ENV_OS;
1548 
1549 	/*
1550 	 * check if firmware is valid
1551 	 */
1552 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1553 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1554 
1555 	if (!fwvalid) {
1556 		bfa_ioc_boot(ioc, boot_type, boot_env);
1557 		bfa_ioc_poll_fwinit(ioc);
1558 		return;
1559 	}
1560 
1561 	/*
1562 	 * If hardware initialization is in progress (initialized by other IOC),
1563 	 * just wait for an initialization completion interrupt.
1564 	 */
1565 	if (ioc_fwstate == BFI_IOC_INITING) {
1566 		bfa_ioc_poll_fwinit(ioc);
1567 		return;
1568 	}
1569 
1570 	/*
1571 	 * If IOC function is disabled and firmware version is same,
1572 	 * just re-enable IOC.
1573 	 *
1574 	 * If option rom, IOC must not be in operational state. With
1575 	 * convergence, IOC will be in operational state when 2nd driver
1576 	 * is loaded.
1577 	 */
1578 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1579 
1580 		/*
1581 		 * When using MSI-X any pending firmware ready event should
1582 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1583 		 */
1584 		bfa_ioc_msgflush(ioc);
1585 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1586 		return;
1587 	}
1588 
1589 	/*
1590 	 * Initialize the h/w for any other states.
1591 	 */
1592 	bfa_ioc_boot(ioc, boot_type, boot_env);
1593 	bfa_ioc_poll_fwinit(ioc);
1594 }
1595 
1596 static void
1597 bfa_ioc_timeout(void *ioc_arg)
1598 {
1599 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1600 
1601 	bfa_trc(ioc, 0);
1602 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1603 }
1604 
1605 void
1606 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1607 {
1608 	u32 *msgp = (u32 *) ioc_msg;
1609 	u32 i;
1610 
1611 	bfa_trc(ioc, msgp[0]);
1612 	bfa_trc(ioc, len);
1613 
1614 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1615 
1616 	/*
1617 	 * first write msg to mailbox registers
1618 	 */
1619 	for (i = 0; i < len / sizeof(u32); i++)
1620 		writel(cpu_to_le32(msgp[i]),
1621 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1622 
1623 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1624 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1625 
1626 	/*
1627 	 * write 1 to mailbox CMD to trigger LPU event
1628 	 */
1629 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1630 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1631 }
1632 
1633 static void
1634 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1635 {
1636 	struct bfi_ioc_ctrl_req_s enable_req;
1637 	struct timeval tv;
1638 
1639 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1640 		    bfa_ioc_portid(ioc));
1641 	enable_req.clscode = cpu_to_be16(ioc->clscode);
1642 	do_gettimeofday(&tv);
1643 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1644 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1645 }
1646 
1647 static void
1648 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1649 {
1650 	struct bfi_ioc_ctrl_req_s disable_req;
1651 
1652 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1653 		    bfa_ioc_portid(ioc));
1654 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1655 }
1656 
1657 static void
1658 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1659 {
1660 	struct bfi_ioc_getattr_req_s	attr_req;
1661 
1662 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1663 		    bfa_ioc_portid(ioc));
1664 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1665 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1666 }
1667 
1668 static void
1669 bfa_ioc_hb_check(void *cbarg)
1670 {
1671 	struct bfa_ioc_s  *ioc = cbarg;
1672 	u32	hb_count;
1673 
1674 	hb_count = readl(ioc->ioc_regs.heartbeat);
1675 	if (ioc->hb_count == hb_count) {
1676 		bfa_ioc_recover(ioc);
1677 		return;
1678 	} else {
1679 		ioc->hb_count = hb_count;
1680 	}
1681 
1682 	bfa_ioc_mbox_poll(ioc);
1683 	bfa_hb_timer_start(ioc);
1684 }
1685 
1686 static void
1687 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1688 {
1689 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1690 	bfa_hb_timer_start(ioc);
1691 }
1692 
1693 /*
1694  *	Initiate a full firmware download.
1695  */
1696 static void
1697 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1698 		    u32 boot_env)
1699 {
1700 	u32 *fwimg;
1701 	u32 pgnum, pgoff;
1702 	u32 loff = 0;
1703 	u32 chunkno = 0;
1704 	u32 i;
1705 	u32 asicmode;
1706 
1707 	/*
1708 	 * Initialize LMEM first before code download
1709 	 */
1710 	bfa_ioc_lmem_init(ioc);
1711 
1712 	bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1713 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1714 
1715 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1716 	pgoff = PSS_SMEM_PGOFF(loff);
1717 
1718 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1719 
1720 	for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1721 
1722 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1723 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1724 			fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1725 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1726 		}
1727 
1728 		/*
1729 		 * write smem
1730 		 */
1731 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1732 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1733 
1734 		loff += sizeof(u32);
1735 
1736 		/*
1737 		 * handle page offset wrap around
1738 		 */
1739 		loff = PSS_SMEM_PGOFF(loff);
1740 		if (loff == 0) {
1741 			pgnum++;
1742 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1743 		}
1744 	}
1745 
1746 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1747 			ioc->ioc_regs.host_page_num_fn);
1748 
1749 	/*
1750 	 * Set boot type and device mode at the end.
1751 	 */
1752 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1753 				ioc->port0_mode, ioc->port1_mode);
1754 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1755 			swab32(asicmode));
1756 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1757 			swab32(boot_type));
1758 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1759 			swab32(boot_env));
1760 }
1761 
1762 
1763 /*
1764  * Update BFA configuration from firmware configuration.
1765  */
1766 static void
1767 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1768 {
1769 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1770 
1771 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1772 	attr->card_type     = be32_to_cpu(attr->card_type);
1773 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1774 	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1775 
1776 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1777 }
1778 
1779 /*
1780  * Attach time initialization of mbox logic.
1781  */
1782 static void
1783 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1784 {
1785 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1786 	int	mc;
1787 
1788 	INIT_LIST_HEAD(&mod->cmd_q);
1789 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1790 		mod->mbhdlr[mc].cbfn = NULL;
1791 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1792 	}
1793 }
1794 
1795 /*
1796  * Mbox poll timer -- restarts any pending mailbox requests.
1797  */
1798 static void
1799 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1800 {
1801 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1802 	struct bfa_mbox_cmd_s		*cmd;
1803 	u32			stat;
1804 
1805 	/*
1806 	 * If no command pending, do nothing
1807 	 */
1808 	if (list_empty(&mod->cmd_q))
1809 		return;
1810 
1811 	/*
1812 	 * If previous command is not yet fetched by firmware, do nothing
1813 	 */
1814 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1815 	if (stat)
1816 		return;
1817 
1818 	/*
1819 	 * Enqueue command to firmware.
1820 	 */
1821 	bfa_q_deq(&mod->cmd_q, &cmd);
1822 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1823 }
1824 
1825 /*
1826  * Cleanup any pending requests.
1827  */
1828 static void
1829 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1830 {
1831 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1832 	struct bfa_mbox_cmd_s		*cmd;
1833 
1834 	while (!list_empty(&mod->cmd_q))
1835 		bfa_q_deq(&mod->cmd_q, &cmd);
1836 }
1837 
1838 /*
1839  * Read data from SMEM to host through PCI memmap
1840  *
1841  * @param[in]	ioc	memory for IOC
1842  * @param[in]	tbuf	app memory to store data from smem
1843  * @param[in]	soff	smem offset
1844  * @param[in]	sz	size of smem in bytes
1845  */
1846 static bfa_status_t
1847 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1848 {
1849 	u32 pgnum, loff;
1850 	__be32 r32;
1851 	int i, len;
1852 	u32 *buf = tbuf;
1853 
1854 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1855 	loff = PSS_SMEM_PGOFF(soff);
1856 	bfa_trc(ioc, pgnum);
1857 	bfa_trc(ioc, loff);
1858 	bfa_trc(ioc, sz);
1859 
1860 	/*
1861 	 *  Hold semaphore to serialize pll init and fwtrc.
1862 	 */
1863 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1864 		bfa_trc(ioc, 0);
1865 		return BFA_STATUS_FAILED;
1866 	}
1867 
1868 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1869 
1870 	len = sz/sizeof(u32);
1871 	bfa_trc(ioc, len);
1872 	for (i = 0; i < len; i++) {
1873 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1874 		buf[i] = be32_to_cpu(r32);
1875 		loff += sizeof(u32);
1876 
1877 		/*
1878 		 * handle page offset wrap around
1879 		 */
1880 		loff = PSS_SMEM_PGOFF(loff);
1881 		if (loff == 0) {
1882 			pgnum++;
1883 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1884 		}
1885 	}
1886 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1887 			ioc->ioc_regs.host_page_num_fn);
1888 	/*
1889 	 *  release semaphore.
1890 	 */
1891 	readl(ioc->ioc_regs.ioc_init_sem_reg);
1892 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1893 
1894 	bfa_trc(ioc, pgnum);
1895 	return BFA_STATUS_OK;
1896 }
1897 
1898 /*
1899  * Clear SMEM data from host through PCI memmap
1900  *
1901  * @param[in]	ioc	memory for IOC
1902  * @param[in]	soff	smem offset
1903  * @param[in]	sz	size of smem in bytes
1904  */
1905 static bfa_status_t
1906 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1907 {
1908 	int i, len;
1909 	u32 pgnum, loff;
1910 
1911 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1912 	loff = PSS_SMEM_PGOFF(soff);
1913 	bfa_trc(ioc, pgnum);
1914 	bfa_trc(ioc, loff);
1915 	bfa_trc(ioc, sz);
1916 
1917 	/*
1918 	 *  Hold semaphore to serialize pll init and fwtrc.
1919 	 */
1920 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1921 		bfa_trc(ioc, 0);
1922 		return BFA_STATUS_FAILED;
1923 	}
1924 
1925 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1926 
1927 	len = sz/sizeof(u32); /* len in words */
1928 	bfa_trc(ioc, len);
1929 	for (i = 0; i < len; i++) {
1930 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1931 		loff += sizeof(u32);
1932 
1933 		/*
1934 		 * handle page offset wrap around
1935 		 */
1936 		loff = PSS_SMEM_PGOFF(loff);
1937 		if (loff == 0) {
1938 			pgnum++;
1939 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1940 		}
1941 	}
1942 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1943 			ioc->ioc_regs.host_page_num_fn);
1944 
1945 	/*
1946 	 *  release semaphore.
1947 	 */
1948 	readl(ioc->ioc_regs.ioc_init_sem_reg);
1949 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1950 	bfa_trc(ioc, pgnum);
1951 	return BFA_STATUS_OK;
1952 }
1953 
1954 static void
1955 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1956 {
1957 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1958 
1959 	/*
1960 	 * Notify driver and common modules registered for notification.
1961 	 */
1962 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
1963 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1964 
1965 	bfa_ioc_debug_save_ftrc(ioc);
1966 
1967 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1968 		"Heart Beat of IOC has failed\n");
1969 
1970 }
1971 
1972 static void
1973 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1974 {
1975 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1976 	/*
1977 	 * Provide enable completion callback.
1978 	 */
1979 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1980 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1981 		"Running firmware version is incompatible "
1982 		"with the driver version\n");
1983 }
1984 
1985 bfa_status_t
1986 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1987 {
1988 
1989 	/*
1990 	 *  Hold semaphore so that nobody can access the chip during init.
1991 	 */
1992 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1993 
1994 	bfa_ioc_pll_init_asic(ioc);
1995 
1996 	ioc->pllinit = BFA_TRUE;
1997 	/*
1998 	 *  release semaphore.
1999 	 */
2000 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2001 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2002 
2003 	return BFA_STATUS_OK;
2004 }
2005 
2006 /*
2007  * Interface used by diag module to do firmware boot with memory test
2008  * as the entry vector.
2009  */
2010 void
2011 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2012 {
2013 	bfa_ioc_stats(ioc, ioc_boots);
2014 
2015 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2016 		return;
2017 
2018 	/*
2019 	 * Initialize IOC state of all functions on a chip reset.
2020 	 */
2021 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2022 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2023 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2024 	} else {
2025 		writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2026 		writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2027 	}
2028 
2029 	bfa_ioc_msgflush(ioc);
2030 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
2031 	bfa_ioc_lpu_start(ioc);
2032 }
2033 
2034 /*
2035  * Enable/disable IOC failure auto recovery.
2036  */
2037 void
2038 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2039 {
2040 	bfa_auto_recover = auto_recover;
2041 }
2042 
2043 
2044 
2045 bfa_boolean_t
2046 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2047 {
2048 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2049 }
2050 
2051 bfa_boolean_t
2052 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2053 {
2054 	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2055 
2056 	return ((r32 != BFI_IOC_UNINIT) &&
2057 		(r32 != BFI_IOC_INITING) &&
2058 		(r32 != BFI_IOC_MEMTEST));
2059 }
2060 
2061 bfa_boolean_t
2062 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2063 {
2064 	__be32	*msgp = mbmsg;
2065 	u32	r32;
2066 	int		i;
2067 
2068 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2069 	if ((r32 & 1) == 0)
2070 		return BFA_FALSE;
2071 
2072 	/*
2073 	 * read the MBOX msg
2074 	 */
2075 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2076 	     i++) {
2077 		r32 = readl(ioc->ioc_regs.lpu_mbox +
2078 				   i * sizeof(u32));
2079 		msgp[i] = cpu_to_be32(r32);
2080 	}
2081 
2082 	/*
2083 	 * turn off mailbox interrupt by clearing mailbox status
2084 	 */
2085 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2086 	readl(ioc->ioc_regs.lpu_mbox_cmd);
2087 
2088 	return BFA_TRUE;
2089 }
2090 
2091 void
2092 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2093 {
2094 	union bfi_ioc_i2h_msg_u	*msg;
2095 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2096 
2097 	msg = (union bfi_ioc_i2h_msg_u *) m;
2098 
2099 	bfa_ioc_stats(ioc, ioc_isrs);
2100 
2101 	switch (msg->mh.msg_id) {
2102 	case BFI_IOC_I2H_HBEAT:
2103 		break;
2104 
2105 	case BFI_IOC_I2H_ENABLE_REPLY:
2106 		ioc->port_mode = ioc->port_mode_cfg =
2107 				(enum bfa_mode_s)msg->fw_event.port_mode;
2108 		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2109 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2110 		break;
2111 
2112 	case BFI_IOC_I2H_DISABLE_REPLY:
2113 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2114 		break;
2115 
2116 	case BFI_IOC_I2H_GETATTR_REPLY:
2117 		bfa_ioc_getattr_reply(ioc);
2118 		break;
2119 
2120 	case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2121 		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2122 		break;
2123 
2124 	default:
2125 		bfa_trc(ioc, msg->mh.msg_id);
2126 		WARN_ON(1);
2127 	}
2128 }
2129 
2130 /*
2131  * IOC attach time initialization and setup.
2132  *
2133  * @param[in]	ioc	memory for IOC
2134  * @param[in]	bfa	driver instance structure
2135  */
2136 void
2137 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2138 	       struct bfa_timer_mod_s *timer_mod)
2139 {
2140 	ioc->bfa	= bfa;
2141 	ioc->cbfn	= cbfn;
2142 	ioc->timer_mod	= timer_mod;
2143 	ioc->fcmode	= BFA_FALSE;
2144 	ioc->pllinit	= BFA_FALSE;
2145 	ioc->dbg_fwsave_once = BFA_TRUE;
2146 	ioc->iocpf.ioc	= ioc;
2147 
2148 	bfa_ioc_mbox_attach(ioc);
2149 	INIT_LIST_HEAD(&ioc->notify_q);
2150 
2151 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2152 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2153 }
2154 
2155 /*
2156  * Driver detach time IOC cleanup.
2157  */
2158 void
2159 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2160 {
2161 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2162 	INIT_LIST_HEAD(&ioc->notify_q);
2163 }
2164 
2165 /*
2166  * Setup IOC PCI properties.
2167  *
2168  * @param[in]	pcidev	PCI device information for this IOC
2169  */
2170 void
2171 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2172 		enum bfi_pcifn_class clscode)
2173 {
2174 	ioc->clscode	= clscode;
2175 	ioc->pcidev	= *pcidev;
2176 
2177 	/*
2178 	 * Initialize IOC and device personality
2179 	 */
2180 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2181 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2182 
2183 	switch (pcidev->device_id) {
2184 	case BFA_PCI_DEVICE_ID_FC_8G1P:
2185 	case BFA_PCI_DEVICE_ID_FC_8G2P:
2186 		ioc->asic_gen = BFI_ASIC_GEN_CB;
2187 		ioc->fcmode = BFA_TRUE;
2188 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2189 		ioc->ad_cap_bm = BFA_CM_HBA;
2190 		break;
2191 
2192 	case BFA_PCI_DEVICE_ID_CT:
2193 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2194 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2195 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2196 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2197 		ioc->ad_cap_bm = BFA_CM_CNA;
2198 		break;
2199 
2200 	case BFA_PCI_DEVICE_ID_CT_FC:
2201 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2202 		ioc->fcmode = BFA_TRUE;
2203 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2204 		ioc->ad_cap_bm = BFA_CM_HBA;
2205 		break;
2206 
2207 	case BFA_PCI_DEVICE_ID_CT2:
2208 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2209 		if (clscode == BFI_PCIFN_CLASS_FC &&
2210 		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2211 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2212 			ioc->fcmode = BFA_TRUE;
2213 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2214 			ioc->ad_cap_bm = BFA_CM_HBA;
2215 		} else {
2216 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2217 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2218 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2219 				ioc->port_mode =
2220 				ioc->port_mode_cfg = BFA_MODE_CNA;
2221 				ioc->ad_cap_bm = BFA_CM_CNA;
2222 			} else {
2223 				ioc->port_mode =
2224 				ioc->port_mode_cfg = BFA_MODE_NIC;
2225 				ioc->ad_cap_bm = BFA_CM_NIC;
2226 			}
2227 		}
2228 		break;
2229 
2230 	default:
2231 		WARN_ON(1);
2232 	}
2233 
2234 	/*
2235 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2236 	 */
2237 	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2238 		bfa_ioc_set_cb_hwif(ioc);
2239 	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2240 		bfa_ioc_set_ct_hwif(ioc);
2241 	else {
2242 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2243 		bfa_ioc_set_ct2_hwif(ioc);
2244 		bfa_ioc_ct2_poweron(ioc);
2245 	}
2246 
2247 	bfa_ioc_map_port(ioc);
2248 	bfa_ioc_reg_init(ioc);
2249 }
2250 
2251 /*
2252  * Initialize IOC dma memory
2253  *
2254  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2255  * @param[in]	dm_pa	physical address of IOC dma memory
2256  */
2257 void
2258 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2259 {
2260 	/*
2261 	 * dma memory for firmware attribute
2262 	 */
2263 	ioc->attr_dma.kva = dm_kva;
2264 	ioc->attr_dma.pa = dm_pa;
2265 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2266 }
2267 
2268 void
2269 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2270 {
2271 	bfa_ioc_stats(ioc, ioc_enables);
2272 	ioc->dbg_fwsave_once = BFA_TRUE;
2273 
2274 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2275 }
2276 
2277 void
2278 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2279 {
2280 	bfa_ioc_stats(ioc, ioc_disables);
2281 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2282 }
2283 
2284 
2285 /*
2286  * Initialize memory for saving firmware trace. Driver must initialize
2287  * trace memory before call bfa_ioc_enable().
2288  */
2289 void
2290 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2291 {
2292 	ioc->dbg_fwsave	    = dbg_fwsave;
2293 	ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2294 }
2295 
2296 /*
2297  * Register mailbox message handler functions
2298  *
2299  * @param[in]	ioc		IOC instance
2300  * @param[in]	mcfuncs		message class handler functions
2301  */
2302 void
2303 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2304 {
2305 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2306 	int				mc;
2307 
2308 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2309 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2310 }
2311 
2312 /*
2313  * Register mailbox message handler function, to be called by common modules
2314  */
2315 void
2316 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2317 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2318 {
2319 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2320 
2321 	mod->mbhdlr[mc].cbfn	= cbfn;
2322 	mod->mbhdlr[mc].cbarg	= cbarg;
2323 }
2324 
2325 /*
2326  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2327  * Responsibility of caller to serialize
2328  *
2329  * @param[in]	ioc	IOC instance
2330  * @param[i]	cmd	Mailbox command
2331  */
2332 void
2333 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2334 {
2335 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2336 	u32			stat;
2337 
2338 	/*
2339 	 * If a previous command is pending, queue new command
2340 	 */
2341 	if (!list_empty(&mod->cmd_q)) {
2342 		list_add_tail(&cmd->qe, &mod->cmd_q);
2343 		return;
2344 	}
2345 
2346 	/*
2347 	 * If mailbox is busy, queue command for poll timer
2348 	 */
2349 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2350 	if (stat) {
2351 		list_add_tail(&cmd->qe, &mod->cmd_q);
2352 		return;
2353 	}
2354 
2355 	/*
2356 	 * mailbox is free -- queue command to firmware
2357 	 */
2358 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2359 }
2360 
2361 /*
2362  * Handle mailbox interrupts
2363  */
2364 void
2365 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2366 {
2367 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2368 	struct bfi_mbmsg_s		m;
2369 	int				mc;
2370 
2371 	if (bfa_ioc_msgget(ioc, &m)) {
2372 		/*
2373 		 * Treat IOC message class as special.
2374 		 */
2375 		mc = m.mh.msg_class;
2376 		if (mc == BFI_MC_IOC) {
2377 			bfa_ioc_isr(ioc, &m);
2378 			return;
2379 		}
2380 
2381 		if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2382 			return;
2383 
2384 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2385 	}
2386 
2387 	bfa_ioc_lpu_read_stat(ioc);
2388 
2389 	/*
2390 	 * Try to send pending mailbox commands
2391 	 */
2392 	bfa_ioc_mbox_poll(ioc);
2393 }
2394 
2395 void
2396 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2397 {
2398 	bfa_ioc_stats(ioc, ioc_hbfails);
2399 	ioc->stats.hb_count = ioc->hb_count;
2400 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2401 }
2402 
2403 /*
2404  * return true if IOC is disabled
2405  */
2406 bfa_boolean_t
2407 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2408 {
2409 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2410 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2411 }
2412 
2413 /*
2414  * Return TRUE if IOC is in acquiring address state
2415  */
2416 bfa_boolean_t
2417 bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2418 {
2419 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2420 }
2421 
2422 /*
2423  * return true if IOC firmware is different.
2424  */
2425 bfa_boolean_t
2426 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2427 {
2428 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2429 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2430 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2431 }
2432 
2433 #define bfa_ioc_state_disabled(__sm)		\
2434 	(((__sm) == BFI_IOC_UNINIT) ||		\
2435 	 ((__sm) == BFI_IOC_INITING) ||		\
2436 	 ((__sm) == BFI_IOC_HWINIT) ||		\
2437 	 ((__sm) == BFI_IOC_DISABLED) ||	\
2438 	 ((__sm) == BFI_IOC_FAIL) ||		\
2439 	 ((__sm) == BFI_IOC_CFG_DISABLED))
2440 
2441 /*
2442  * Check if adapter is disabled -- both IOCs should be in a disabled
2443  * state.
2444  */
2445 bfa_boolean_t
2446 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2447 {
2448 	u32	ioc_state;
2449 
2450 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2451 		return BFA_FALSE;
2452 
2453 	ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2454 	if (!bfa_ioc_state_disabled(ioc_state))
2455 		return BFA_FALSE;
2456 
2457 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2458 		ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2459 		if (!bfa_ioc_state_disabled(ioc_state))
2460 			return BFA_FALSE;
2461 	}
2462 
2463 	return BFA_TRUE;
2464 }
2465 
2466 /*
2467  * Reset IOC fwstate registers.
2468  */
2469 void
2470 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2471 {
2472 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2473 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2474 }
2475 
2476 #define BFA_MFG_NAME "Brocade"
2477 void
2478 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2479 			 struct bfa_adapter_attr_s *ad_attr)
2480 {
2481 	struct bfi_ioc_attr_s	*ioc_attr;
2482 
2483 	ioc_attr = ioc->attr;
2484 
2485 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2486 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2487 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2488 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2489 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2490 		      sizeof(struct bfa_mfg_vpd_s));
2491 
2492 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2493 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2494 
2495 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2496 	/* For now, model descr uses same model string */
2497 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2498 
2499 	ad_attr->card_type = ioc_attr->card_type;
2500 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2501 
2502 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2503 		ad_attr->prototype = 1;
2504 	else
2505 		ad_attr->prototype = 0;
2506 
2507 	ad_attr->pwwn = ioc->attr->pwwn;
2508 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2509 
2510 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2511 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2512 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2513 	ad_attr->asic_rev = ioc_attr->asic_rev;
2514 
2515 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2516 
2517 	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2518 	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2519 				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2520 }
2521 
2522 enum bfa_ioc_type_e
2523 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2524 {
2525 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2526 		return BFA_IOC_TYPE_LL;
2527 
2528 	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2529 
2530 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2531 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2532 }
2533 
2534 void
2535 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2536 {
2537 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2538 	memcpy((void *)serial_num,
2539 			(void *)ioc->attr->brcd_serialnum,
2540 			BFA_ADAPTER_SERIAL_NUM_LEN);
2541 }
2542 
2543 void
2544 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2545 {
2546 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2547 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2548 }
2549 
2550 void
2551 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2552 {
2553 	WARN_ON(!chip_rev);
2554 
2555 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2556 
2557 	chip_rev[0] = 'R';
2558 	chip_rev[1] = 'e';
2559 	chip_rev[2] = 'v';
2560 	chip_rev[3] = '-';
2561 	chip_rev[4] = ioc->attr->asic_rev;
2562 	chip_rev[5] = '\0';
2563 }
2564 
2565 void
2566 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2567 {
2568 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2569 	memcpy(optrom_ver, ioc->attr->optrom_version,
2570 		      BFA_VERSION_LEN);
2571 }
2572 
2573 void
2574 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2575 {
2576 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2577 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2578 }
2579 
2580 void
2581 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2582 {
2583 	struct bfi_ioc_attr_s	*ioc_attr;
2584 
2585 	WARN_ON(!model);
2586 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2587 
2588 	ioc_attr = ioc->attr;
2589 
2590 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2591 			BFA_MFG_NAME, ioc_attr->card_type);
2592 }
2593 
2594 enum bfa_ioc_state
2595 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2596 {
2597 	enum bfa_iocpf_state iocpf_st;
2598 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2599 
2600 	if (ioc_st == BFA_IOC_ENABLING ||
2601 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2602 
2603 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2604 
2605 		switch (iocpf_st) {
2606 		case BFA_IOCPF_SEMWAIT:
2607 			ioc_st = BFA_IOC_SEMWAIT;
2608 			break;
2609 
2610 		case BFA_IOCPF_HWINIT:
2611 			ioc_st = BFA_IOC_HWINIT;
2612 			break;
2613 
2614 		case BFA_IOCPF_FWMISMATCH:
2615 			ioc_st = BFA_IOC_FWMISMATCH;
2616 			break;
2617 
2618 		case BFA_IOCPF_FAIL:
2619 			ioc_st = BFA_IOC_FAIL;
2620 			break;
2621 
2622 		case BFA_IOCPF_INITFAIL:
2623 			ioc_st = BFA_IOC_INITFAIL;
2624 			break;
2625 
2626 		default:
2627 			break;
2628 		}
2629 	}
2630 
2631 	return ioc_st;
2632 }
2633 
2634 void
2635 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2636 {
2637 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2638 
2639 	ioc_attr->state = bfa_ioc_get_state(ioc);
2640 	ioc_attr->port_id = ioc->port_id;
2641 	ioc_attr->port_mode = ioc->port_mode;
2642 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2643 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2644 
2645 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2646 
2647 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2648 
2649 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2650 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2651 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2652 }
2653 
2654 mac_t
2655 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2656 {
2657 	/*
2658 	 * Check the IOC type and return the appropriate MAC
2659 	 */
2660 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2661 		return ioc->attr->fcoe_mac;
2662 	else
2663 		return ioc->attr->mac;
2664 }
2665 
2666 mac_t
2667 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2668 {
2669 	mac_t	m;
2670 
2671 	m = ioc->attr->mfg_mac;
2672 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2673 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2674 	else
2675 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2676 			bfa_ioc_pcifn(ioc));
2677 
2678 	return m;
2679 }
2680 
2681 /*
2682  * Retrieve saved firmware trace from a prior IOC failure.
2683  */
2684 bfa_status_t
2685 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2686 {
2687 	int	tlen;
2688 
2689 	if (ioc->dbg_fwsave_len == 0)
2690 		return BFA_STATUS_ENOFSAVE;
2691 
2692 	tlen = *trclen;
2693 	if (tlen > ioc->dbg_fwsave_len)
2694 		tlen = ioc->dbg_fwsave_len;
2695 
2696 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2697 	*trclen = tlen;
2698 	return BFA_STATUS_OK;
2699 }
2700 
2701 
2702 /*
2703  * Retrieve saved firmware trace from a prior IOC failure.
2704  */
2705 bfa_status_t
2706 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2707 {
2708 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2709 	int tlen;
2710 	bfa_status_t status;
2711 
2712 	bfa_trc(ioc, *trclen);
2713 
2714 	tlen = *trclen;
2715 	if (tlen > BFA_DBG_FWTRC_LEN)
2716 		tlen = BFA_DBG_FWTRC_LEN;
2717 
2718 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2719 	*trclen = tlen;
2720 	return status;
2721 }
2722 
2723 static void
2724 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2725 {
2726 	struct bfa_mbox_cmd_s cmd;
2727 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2728 
2729 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2730 		    bfa_ioc_portid(ioc));
2731 	req->clscode = cpu_to_be16(ioc->clscode);
2732 	bfa_ioc_mbox_queue(ioc, &cmd);
2733 }
2734 
2735 static void
2736 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2737 {
2738 	u32 fwsync_iter = 1000;
2739 
2740 	bfa_ioc_send_fwsync(ioc);
2741 
2742 	/*
2743 	 * After sending a fw sync mbox command wait for it to
2744 	 * take effect.  We will not wait for a response because
2745 	 *    1. fw_sync mbox cmd doesn't have a response.
2746 	 *    2. Even if we implement that,  interrupts might not
2747 	 *	 be enabled when we call this function.
2748 	 * So, just keep checking if any mbox cmd is pending, and
2749 	 * after waiting for a reasonable amount of time, go ahead.
2750 	 * It is possible that fw has crashed and the mbox command
2751 	 * is never acknowledged.
2752 	 */
2753 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2754 		fwsync_iter--;
2755 }
2756 
2757 /*
2758  * Dump firmware smem
2759  */
2760 bfa_status_t
2761 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2762 				u32 *offset, int *buflen)
2763 {
2764 	u32 loff;
2765 	int dlen;
2766 	bfa_status_t status;
2767 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2768 
2769 	if (*offset >= smem_len) {
2770 		*offset = *buflen = 0;
2771 		return BFA_STATUS_EINVAL;
2772 	}
2773 
2774 	loff = *offset;
2775 	dlen = *buflen;
2776 
2777 	/*
2778 	 * First smem read, sync smem before proceeding
2779 	 * No need to sync before reading every chunk.
2780 	 */
2781 	if (loff == 0)
2782 		bfa_ioc_fwsync(ioc);
2783 
2784 	if ((loff + dlen) >= smem_len)
2785 		dlen = smem_len - loff;
2786 
2787 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2788 
2789 	if (status != BFA_STATUS_OK) {
2790 		*offset = *buflen = 0;
2791 		return status;
2792 	}
2793 
2794 	*offset += dlen;
2795 
2796 	if (*offset >= smem_len)
2797 		*offset = 0;
2798 
2799 	*buflen = dlen;
2800 
2801 	return status;
2802 }
2803 
2804 /*
2805  * Firmware statistics
2806  */
2807 bfa_status_t
2808 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2809 {
2810 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2811 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2812 	int tlen;
2813 	bfa_status_t status;
2814 
2815 	if (ioc->stats_busy) {
2816 		bfa_trc(ioc, ioc->stats_busy);
2817 		return BFA_STATUS_DEVBUSY;
2818 	}
2819 	ioc->stats_busy = BFA_TRUE;
2820 
2821 	tlen = sizeof(struct bfa_fw_stats_s);
2822 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2823 
2824 	ioc->stats_busy = BFA_FALSE;
2825 	return status;
2826 }
2827 
2828 bfa_status_t
2829 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2830 {
2831 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2832 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2833 	int tlen;
2834 	bfa_status_t status;
2835 
2836 	if (ioc->stats_busy) {
2837 		bfa_trc(ioc, ioc->stats_busy);
2838 		return BFA_STATUS_DEVBUSY;
2839 	}
2840 	ioc->stats_busy = BFA_TRUE;
2841 
2842 	tlen = sizeof(struct bfa_fw_stats_s);
2843 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
2844 
2845 	ioc->stats_busy = BFA_FALSE;
2846 	return status;
2847 }
2848 
2849 /*
2850  * Save firmware trace if configured.
2851  */
2852 static void
2853 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2854 {
2855 	int		tlen;
2856 
2857 	if (ioc->dbg_fwsave_once) {
2858 		ioc->dbg_fwsave_once = BFA_FALSE;
2859 		if (ioc->dbg_fwsave_len) {
2860 			tlen = ioc->dbg_fwsave_len;
2861 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2862 		}
2863 	}
2864 }
2865 
2866 /*
2867  * Firmware failure detected. Start recovery actions.
2868  */
2869 static void
2870 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2871 {
2872 	bfa_ioc_stats(ioc, ioc_hbfails);
2873 	ioc->stats.hb_count = ioc->hb_count;
2874 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2875 }
2876 
2877 static void
2878 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2879 {
2880 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2881 		return;
2882 }
2883 
2884 /*
2885  *  BFA IOC PF private functions
2886  */
2887 static void
2888 bfa_iocpf_timeout(void *ioc_arg)
2889 {
2890 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2891 
2892 	bfa_trc(ioc, 0);
2893 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2894 }
2895 
2896 static void
2897 bfa_iocpf_sem_timeout(void *ioc_arg)
2898 {
2899 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2900 
2901 	bfa_ioc_hw_sem_get(ioc);
2902 }
2903 
2904 static void
2905 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2906 {
2907 	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2908 
2909 	bfa_trc(ioc, fwstate);
2910 
2911 	if (fwstate == BFI_IOC_DISABLED) {
2912 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2913 		return;
2914 	}
2915 
2916 	if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2917 		bfa_iocpf_timeout(ioc);
2918 	else {
2919 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2920 		bfa_iocpf_poll_timer_start(ioc);
2921 	}
2922 }
2923 
2924 static void
2925 bfa_iocpf_poll_timeout(void *ioc_arg)
2926 {
2927 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2928 
2929 	bfa_ioc_poll_fwinit(ioc);
2930 }
2931 
2932 /*
2933  *  bfa timer function
2934  */
2935 void
2936 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2937 {
2938 	struct list_head *qh = &mod->timer_q;
2939 	struct list_head *qe, *qe_next;
2940 	struct bfa_timer_s *elem;
2941 	struct list_head timedout_q;
2942 
2943 	INIT_LIST_HEAD(&timedout_q);
2944 
2945 	qe = bfa_q_next(qh);
2946 
2947 	while (qe != qh) {
2948 		qe_next = bfa_q_next(qe);
2949 
2950 		elem = (struct bfa_timer_s *) qe;
2951 		if (elem->timeout <= BFA_TIMER_FREQ) {
2952 			elem->timeout = 0;
2953 			list_del(&elem->qe);
2954 			list_add_tail(&elem->qe, &timedout_q);
2955 		} else {
2956 			elem->timeout -= BFA_TIMER_FREQ;
2957 		}
2958 
2959 		qe = qe_next;	/* go to next elem */
2960 	}
2961 
2962 	/*
2963 	 * Pop all the timeout entries
2964 	 */
2965 	while (!list_empty(&timedout_q)) {
2966 		bfa_q_deq(&timedout_q, &elem);
2967 		elem->timercb(elem->arg);
2968 	}
2969 }
2970 
2971 /*
2972  * Should be called with lock protection
2973  */
2974 void
2975 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2976 		    void (*timercb) (void *), void *arg, unsigned int timeout)
2977 {
2978 
2979 	WARN_ON(timercb == NULL);
2980 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2981 
2982 	timer->timeout = timeout;
2983 	timer->timercb = timercb;
2984 	timer->arg = arg;
2985 
2986 	list_add_tail(&timer->qe, &mod->timer_q);
2987 }
2988 
2989 /*
2990  * Should be called with lock protection
2991  */
2992 void
2993 bfa_timer_stop(struct bfa_timer_s *timer)
2994 {
2995 	WARN_ON(list_empty(&timer->qe));
2996 
2997 	list_del(&timer->qe);
2998 }
2999 
3000 /*
3001  *	ASIC block related
3002  */
3003 static void
3004 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3005 {
3006 	struct bfa_ablk_cfg_inst_s *cfg_inst;
3007 	int i, j;
3008 	u16	be16;
3009 	u32	be32;
3010 
3011 	for (i = 0; i < BFA_ABLK_MAX; i++) {
3012 		cfg_inst = &cfg->inst[i];
3013 		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3014 			be16 = cfg_inst->pf_cfg[j].pers;
3015 			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3016 			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3017 			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3018 			be16 = cfg_inst->pf_cfg[j].num_vectors;
3019 			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3020 			be32 = cfg_inst->pf_cfg[j].bw;
3021 			cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3022 		}
3023 	}
3024 }
3025 
3026 static void
3027 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3028 {
3029 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3030 	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3031 	bfa_ablk_cbfn_t cbfn;
3032 
3033 	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3034 	bfa_trc(ablk->ioc, msg->mh.msg_id);
3035 
3036 	switch (msg->mh.msg_id) {
3037 	case BFI_ABLK_I2H_QUERY:
3038 		if (rsp->status == BFA_STATUS_OK) {
3039 			memcpy(ablk->cfg, ablk->dma_addr.kva,
3040 				sizeof(struct bfa_ablk_cfg_s));
3041 			bfa_ablk_config_swap(ablk->cfg);
3042 			ablk->cfg = NULL;
3043 		}
3044 		break;
3045 
3046 	case BFI_ABLK_I2H_ADPT_CONFIG:
3047 	case BFI_ABLK_I2H_PORT_CONFIG:
3048 		/* update config port mode */
3049 		ablk->ioc->port_mode_cfg = rsp->port_mode;
3050 
3051 	case BFI_ABLK_I2H_PF_DELETE:
3052 	case BFI_ABLK_I2H_PF_UPDATE:
3053 	case BFI_ABLK_I2H_OPTROM_ENABLE:
3054 	case BFI_ABLK_I2H_OPTROM_DISABLE:
3055 		/* No-op */
3056 		break;
3057 
3058 	case BFI_ABLK_I2H_PF_CREATE:
3059 		*(ablk->pcifn) = rsp->pcifn;
3060 		ablk->pcifn = NULL;
3061 		break;
3062 
3063 	default:
3064 		WARN_ON(1);
3065 	}
3066 
3067 	ablk->busy = BFA_FALSE;
3068 	if (ablk->cbfn) {
3069 		cbfn = ablk->cbfn;
3070 		ablk->cbfn = NULL;
3071 		cbfn(ablk->cbarg, rsp->status);
3072 	}
3073 }
3074 
3075 static void
3076 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3077 {
3078 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3079 
3080 	bfa_trc(ablk->ioc, event);
3081 
3082 	switch (event) {
3083 	case BFA_IOC_E_ENABLED:
3084 		WARN_ON(ablk->busy != BFA_FALSE);
3085 		break;
3086 
3087 	case BFA_IOC_E_DISABLED:
3088 	case BFA_IOC_E_FAILED:
3089 		/* Fail any pending requests */
3090 		ablk->pcifn = NULL;
3091 		if (ablk->busy) {
3092 			if (ablk->cbfn)
3093 				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3094 			ablk->cbfn = NULL;
3095 			ablk->busy = BFA_FALSE;
3096 		}
3097 		break;
3098 
3099 	default:
3100 		WARN_ON(1);
3101 		break;
3102 	}
3103 }
3104 
3105 u32
3106 bfa_ablk_meminfo(void)
3107 {
3108 	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3109 }
3110 
3111 void
3112 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3113 {
3114 	ablk->dma_addr.kva = dma_kva;
3115 	ablk->dma_addr.pa  = dma_pa;
3116 }
3117 
3118 void
3119 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3120 {
3121 	ablk->ioc = ioc;
3122 
3123 	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3124 	bfa_q_qe_init(&ablk->ioc_notify);
3125 	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3126 	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3127 }
3128 
3129 bfa_status_t
3130 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3131 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3132 {
3133 	struct bfi_ablk_h2i_query_s *m;
3134 
3135 	WARN_ON(!ablk_cfg);
3136 
3137 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3138 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3139 		return BFA_STATUS_IOC_FAILURE;
3140 	}
3141 
3142 	if (ablk->busy) {
3143 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3144 		return  BFA_STATUS_DEVBUSY;
3145 	}
3146 
3147 	ablk->cfg = ablk_cfg;
3148 	ablk->cbfn  = cbfn;
3149 	ablk->cbarg = cbarg;
3150 	ablk->busy  = BFA_TRUE;
3151 
3152 	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3153 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3154 		    bfa_ioc_portid(ablk->ioc));
3155 	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3156 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3157 
3158 	return BFA_STATUS_OK;
3159 }
3160 
3161 bfa_status_t
3162 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3163 		u8 port, enum bfi_pcifn_class personality, int bw,
3164 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3165 {
3166 	struct bfi_ablk_h2i_pf_req_s *m;
3167 
3168 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3169 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3170 		return BFA_STATUS_IOC_FAILURE;
3171 	}
3172 
3173 	if (ablk->busy) {
3174 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3175 		return  BFA_STATUS_DEVBUSY;
3176 	}
3177 
3178 	ablk->pcifn = pcifn;
3179 	ablk->cbfn = cbfn;
3180 	ablk->cbarg = cbarg;
3181 	ablk->busy  = BFA_TRUE;
3182 
3183 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3184 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3185 		    bfa_ioc_portid(ablk->ioc));
3186 	m->pers = cpu_to_be16((u16)personality);
3187 	m->bw = cpu_to_be32(bw);
3188 	m->port = port;
3189 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3190 
3191 	return BFA_STATUS_OK;
3192 }
3193 
3194 bfa_status_t
3195 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3196 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3197 {
3198 	struct bfi_ablk_h2i_pf_req_s *m;
3199 
3200 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3201 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3202 		return BFA_STATUS_IOC_FAILURE;
3203 	}
3204 
3205 	if (ablk->busy) {
3206 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3207 		return  BFA_STATUS_DEVBUSY;
3208 	}
3209 
3210 	ablk->cbfn  = cbfn;
3211 	ablk->cbarg = cbarg;
3212 	ablk->busy  = BFA_TRUE;
3213 
3214 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3215 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3216 		    bfa_ioc_portid(ablk->ioc));
3217 	m->pcifn = (u8)pcifn;
3218 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3219 
3220 	return BFA_STATUS_OK;
3221 }
3222 
3223 bfa_status_t
3224 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3225 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3226 {
3227 	struct bfi_ablk_h2i_cfg_req_s *m;
3228 
3229 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3230 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3231 		return BFA_STATUS_IOC_FAILURE;
3232 	}
3233 
3234 	if (ablk->busy) {
3235 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3236 		return  BFA_STATUS_DEVBUSY;
3237 	}
3238 
3239 	ablk->cbfn  = cbfn;
3240 	ablk->cbarg = cbarg;
3241 	ablk->busy  = BFA_TRUE;
3242 
3243 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3244 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3245 		    bfa_ioc_portid(ablk->ioc));
3246 	m->mode = (u8)mode;
3247 	m->max_pf = (u8)max_pf;
3248 	m->max_vf = (u8)max_vf;
3249 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3250 
3251 	return BFA_STATUS_OK;
3252 }
3253 
3254 bfa_status_t
3255 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3256 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3257 {
3258 	struct bfi_ablk_h2i_cfg_req_s *m;
3259 
3260 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3261 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3262 		return BFA_STATUS_IOC_FAILURE;
3263 	}
3264 
3265 	if (ablk->busy) {
3266 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3267 		return  BFA_STATUS_DEVBUSY;
3268 	}
3269 
3270 	ablk->cbfn  = cbfn;
3271 	ablk->cbarg = cbarg;
3272 	ablk->busy  = BFA_TRUE;
3273 
3274 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3275 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3276 		bfa_ioc_portid(ablk->ioc));
3277 	m->port = (u8)port;
3278 	m->mode = (u8)mode;
3279 	m->max_pf = (u8)max_pf;
3280 	m->max_vf = (u8)max_vf;
3281 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3282 
3283 	return BFA_STATUS_OK;
3284 }
3285 
3286 bfa_status_t
3287 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3288 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3289 {
3290 	struct bfi_ablk_h2i_pf_req_s *m;
3291 
3292 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3293 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3294 		return BFA_STATUS_IOC_FAILURE;
3295 	}
3296 
3297 	if (ablk->busy) {
3298 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3299 		return  BFA_STATUS_DEVBUSY;
3300 	}
3301 
3302 	ablk->cbfn  = cbfn;
3303 	ablk->cbarg = cbarg;
3304 	ablk->busy  = BFA_TRUE;
3305 
3306 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3307 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3308 		bfa_ioc_portid(ablk->ioc));
3309 	m->pcifn = (u8)pcifn;
3310 	m->bw = cpu_to_be32(bw);
3311 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3312 
3313 	return BFA_STATUS_OK;
3314 }
3315 
3316 bfa_status_t
3317 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3318 {
3319 	struct bfi_ablk_h2i_optrom_s *m;
3320 
3321 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3322 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3323 		return BFA_STATUS_IOC_FAILURE;
3324 	}
3325 
3326 	if (ablk->busy) {
3327 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3328 		return  BFA_STATUS_DEVBUSY;
3329 	}
3330 
3331 	ablk->cbfn  = cbfn;
3332 	ablk->cbarg = cbarg;
3333 	ablk->busy  = BFA_TRUE;
3334 
3335 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3336 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3337 		bfa_ioc_portid(ablk->ioc));
3338 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3339 
3340 	return BFA_STATUS_OK;
3341 }
3342 
3343 bfa_status_t
3344 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3345 {
3346 	struct bfi_ablk_h2i_optrom_s *m;
3347 
3348 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3349 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3350 		return BFA_STATUS_IOC_FAILURE;
3351 	}
3352 
3353 	if (ablk->busy) {
3354 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3355 		return  BFA_STATUS_DEVBUSY;
3356 	}
3357 
3358 	ablk->cbfn  = cbfn;
3359 	ablk->cbarg = cbarg;
3360 	ablk->busy  = BFA_TRUE;
3361 
3362 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3363 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3364 		bfa_ioc_portid(ablk->ioc));
3365 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3366 
3367 	return BFA_STATUS_OK;
3368 }
3369 
3370 /*
3371  *	SFP module specific
3372  */
3373 
3374 /* forward declarations */
3375 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3376 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3377 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3378 				enum bfa_port_speed portspeed);
3379 
3380 static void
3381 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3382 {
3383 	bfa_trc(sfp, sfp->lock);
3384 	if (sfp->cbfn)
3385 		sfp->cbfn(sfp->cbarg, sfp->status);
3386 	sfp->lock = 0;
3387 	sfp->cbfn = NULL;
3388 }
3389 
3390 static void
3391 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3392 {
3393 	bfa_trc(sfp, sfp->portspeed);
3394 	if (sfp->media) {
3395 		bfa_sfp_media_get(sfp);
3396 		if (sfp->state_query_cbfn)
3397 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3398 					sfp->status);
3399 			sfp->media = NULL;
3400 		}
3401 
3402 		if (sfp->portspeed) {
3403 			sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3404 			if (sfp->state_query_cbfn)
3405 				sfp->state_query_cbfn(sfp->state_query_cbarg,
3406 						sfp->status);
3407 				sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3408 		}
3409 
3410 		sfp->state_query_lock = 0;
3411 		sfp->state_query_cbfn = NULL;
3412 }
3413 
3414 /*
3415  *	IOC event handler.
3416  */
3417 static void
3418 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3419 {
3420 	struct bfa_sfp_s *sfp = sfp_arg;
3421 
3422 	bfa_trc(sfp, event);
3423 	bfa_trc(sfp, sfp->lock);
3424 	bfa_trc(sfp, sfp->state_query_lock);
3425 
3426 	switch (event) {
3427 	case BFA_IOC_E_DISABLED:
3428 	case BFA_IOC_E_FAILED:
3429 		if (sfp->lock) {
3430 			sfp->status = BFA_STATUS_IOC_FAILURE;
3431 			bfa_cb_sfp_show(sfp);
3432 		}
3433 
3434 		if (sfp->state_query_lock) {
3435 			sfp->status = BFA_STATUS_IOC_FAILURE;
3436 			bfa_cb_sfp_state_query(sfp);
3437 		}
3438 		break;
3439 
3440 	default:
3441 		break;
3442 	}
3443 }
3444 
3445 /*
3446  *	SFP get data send
3447  */
3448 static void
3449 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3450 {
3451 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3452 
3453 	bfa_trc(sfp, req->memtype);
3454 
3455 	/* build host command */
3456 	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3457 			bfa_ioc_portid(sfp->ioc));
3458 
3459 	/* send mbox cmd */
3460 	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3461 }
3462 
3463 /*
3464  *	SFP is valid, read sfp data
3465  */
3466 static void
3467 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3468 {
3469 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3470 
3471 	WARN_ON(sfp->lock != 0);
3472 	bfa_trc(sfp, sfp->state);
3473 
3474 	sfp->lock = 1;
3475 	sfp->memtype = memtype;
3476 	req->memtype = memtype;
3477 
3478 	/* Setup SG list */
3479 	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3480 
3481 	bfa_sfp_getdata_send(sfp);
3482 }
3483 
3484 /*
3485  * SFP show complete
3486  */
3487 static void
3488 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3489 {
3490 	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3491 
3492 	if (!sfp->lock) {
3493 		/*
3494 		 * receiving response after ioc failure
3495 		 */
3496 		bfa_trc(sfp, sfp->lock);
3497 		return;
3498 	}
3499 
3500 	bfa_trc(sfp, rsp->status);
3501 	if (rsp->status == BFA_STATUS_OK) {
3502 		sfp->data_valid = 1;
3503 		if (sfp->state == BFA_SFP_STATE_VALID)
3504 			sfp->status = BFA_STATUS_OK;
3505 		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3506 			sfp->status = BFA_STATUS_SFP_UNSUPP;
3507 		else
3508 			bfa_trc(sfp, sfp->state);
3509 	} else {
3510 		sfp->data_valid = 0;
3511 		sfp->status = rsp->status;
3512 		/* sfpshow shouldn't change sfp state */
3513 	}
3514 
3515 	bfa_trc(sfp, sfp->memtype);
3516 	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3517 		bfa_trc(sfp, sfp->data_valid);
3518 		if (sfp->data_valid) {
3519 			u32	size = sizeof(struct sfp_mem_s);
3520 			u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3521 			memcpy(des, sfp->dbuf_kva, size);
3522 		}
3523 		/*
3524 		 * Queue completion callback.
3525 		 */
3526 		bfa_cb_sfp_show(sfp);
3527 	} else
3528 		sfp->lock = 0;
3529 
3530 	bfa_trc(sfp, sfp->state_query_lock);
3531 	if (sfp->state_query_lock) {
3532 		sfp->state = rsp->state;
3533 		/* Complete callback */
3534 		bfa_cb_sfp_state_query(sfp);
3535 	}
3536 }
3537 
3538 /*
3539  *	SFP query fw sfp state
3540  */
3541 static void
3542 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3543 {
3544 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3545 
3546 	/* Should not be doing query if not in _INIT state */
3547 	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3548 	WARN_ON(sfp->state_query_lock != 0);
3549 	bfa_trc(sfp, sfp->state);
3550 
3551 	sfp->state_query_lock = 1;
3552 	req->memtype = 0;
3553 
3554 	if (!sfp->lock)
3555 		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3556 }
3557 
3558 static void
3559 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3560 {
3561 	enum bfa_defs_sfp_media_e *media = sfp->media;
3562 
3563 	*media = BFA_SFP_MEDIA_UNKNOWN;
3564 
3565 	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3566 		*media = BFA_SFP_MEDIA_UNSUPPORT;
3567 	else if (sfp->state == BFA_SFP_STATE_VALID) {
3568 		union sfp_xcvr_e10g_code_u e10g;
3569 		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3570 		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3571 				(sfpmem->srlid_base.xcvr[5] >> 1);
3572 
3573 		e10g.b = sfpmem->srlid_base.xcvr[0];
3574 		bfa_trc(sfp, e10g.b);
3575 		bfa_trc(sfp, xmtr_tech);
3576 		/* check fc transmitter tech */
3577 		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3578 		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3579 		    (xmtr_tech & SFP_XMTR_TECH_CA))
3580 			*media = BFA_SFP_MEDIA_CU;
3581 		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3582 			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3583 			*media = BFA_SFP_MEDIA_EL;
3584 		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3585 			 (xmtr_tech & SFP_XMTR_TECH_LC))
3586 			*media = BFA_SFP_MEDIA_LW;
3587 		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3588 			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3589 			 (xmtr_tech & SFP_XMTR_TECH_SA))
3590 			*media = BFA_SFP_MEDIA_SW;
3591 		/* Check 10G Ethernet Compilance code */
3592 		else if (e10g.b & 0x10)
3593 			*media = BFA_SFP_MEDIA_SW;
3594 		else if (e10g.b & 0x60)
3595 			*media = BFA_SFP_MEDIA_LW;
3596 		else if (e10g.r.e10g_unall & 0x80)
3597 			*media = BFA_SFP_MEDIA_UNKNOWN;
3598 		else
3599 			bfa_trc(sfp, 0);
3600 	} else
3601 		bfa_trc(sfp, sfp->state);
3602 }
3603 
3604 static bfa_status_t
3605 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3606 {
3607 	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3608 	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3609 	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3610 	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3611 
3612 	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3613 		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3614 			return BFA_STATUS_OK;
3615 		else {
3616 			bfa_trc(sfp, e10g.b);
3617 			return BFA_STATUS_UNSUPP_SPEED;
3618 		}
3619 	}
3620 	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3621 	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3622 	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3623 	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3624 	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3625 		return BFA_STATUS_OK;
3626 	else {
3627 		bfa_trc(sfp, portspeed);
3628 		bfa_trc(sfp, fc3.b);
3629 		bfa_trc(sfp, e10g.b);
3630 		return BFA_STATUS_UNSUPP_SPEED;
3631 	}
3632 }
3633 
3634 /*
3635  *	SFP hmbox handler
3636  */
3637 void
3638 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3639 {
3640 	struct bfa_sfp_s *sfp = sfparg;
3641 
3642 	switch (msg->mh.msg_id) {
3643 	case BFI_SFP_I2H_SHOW:
3644 		bfa_sfp_show_comp(sfp, msg);
3645 		break;
3646 
3647 	case BFI_SFP_I2H_SCN:
3648 		bfa_trc(sfp, msg->mh.msg_id);
3649 		break;
3650 
3651 	default:
3652 		bfa_trc(sfp, msg->mh.msg_id);
3653 		WARN_ON(1);
3654 	}
3655 }
3656 
3657 /*
3658  *	Return DMA memory needed by sfp module.
3659  */
3660 u32
3661 bfa_sfp_meminfo(void)
3662 {
3663 	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3664 }
3665 
3666 /*
3667  *	Attach virtual and physical memory for SFP.
3668  */
3669 void
3670 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3671 		struct bfa_trc_mod_s *trcmod)
3672 {
3673 	sfp->dev = dev;
3674 	sfp->ioc = ioc;
3675 	sfp->trcmod = trcmod;
3676 
3677 	sfp->cbfn = NULL;
3678 	sfp->cbarg = NULL;
3679 	sfp->sfpmem = NULL;
3680 	sfp->lock = 0;
3681 	sfp->data_valid = 0;
3682 	sfp->state = BFA_SFP_STATE_INIT;
3683 	sfp->state_query_lock = 0;
3684 	sfp->state_query_cbfn = NULL;
3685 	sfp->state_query_cbarg = NULL;
3686 	sfp->media = NULL;
3687 	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3688 	sfp->is_elb = BFA_FALSE;
3689 
3690 	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3691 	bfa_q_qe_init(&sfp->ioc_notify);
3692 	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3693 	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3694 }
3695 
3696 /*
3697  *	Claim Memory for SFP
3698  */
3699 void
3700 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3701 {
3702 	sfp->dbuf_kva   = dm_kva;
3703 	sfp->dbuf_pa    = dm_pa;
3704 	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3705 
3706 	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3707 	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3708 }
3709 
3710 /*
3711  * Show SFP eeprom content
3712  *
3713  * @param[in] sfp   - bfa sfp module
3714  *
3715  * @param[out] sfpmem - sfp eeprom data
3716  *
3717  */
3718 bfa_status_t
3719 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3720 		bfa_cb_sfp_t cbfn, void *cbarg)
3721 {
3722 
3723 	if (!bfa_ioc_is_operational(sfp->ioc)) {
3724 		bfa_trc(sfp, 0);
3725 		return BFA_STATUS_IOC_NON_OP;
3726 	}
3727 
3728 	if (sfp->lock) {
3729 		bfa_trc(sfp, 0);
3730 		return BFA_STATUS_DEVBUSY;
3731 	}
3732 
3733 	sfp->cbfn = cbfn;
3734 	sfp->cbarg = cbarg;
3735 	sfp->sfpmem = sfpmem;
3736 
3737 	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3738 	return BFA_STATUS_OK;
3739 }
3740 
3741 /*
3742  * Return SFP Media type
3743  *
3744  * @param[in] sfp   - bfa sfp module
3745  *
3746  * @param[out] media - port speed from user
3747  *
3748  */
3749 bfa_status_t
3750 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3751 		bfa_cb_sfp_t cbfn, void *cbarg)
3752 {
3753 	if (!bfa_ioc_is_operational(sfp->ioc)) {
3754 		bfa_trc(sfp, 0);
3755 		return BFA_STATUS_IOC_NON_OP;
3756 	}
3757 
3758 	sfp->media = media;
3759 	if (sfp->state == BFA_SFP_STATE_INIT) {
3760 		if (sfp->state_query_lock) {
3761 			bfa_trc(sfp, 0);
3762 			return BFA_STATUS_DEVBUSY;
3763 		} else {
3764 			sfp->state_query_cbfn = cbfn;
3765 			sfp->state_query_cbarg = cbarg;
3766 			bfa_sfp_state_query(sfp);
3767 			return BFA_STATUS_SFP_NOT_READY;
3768 		}
3769 	}
3770 
3771 	bfa_sfp_media_get(sfp);
3772 	return BFA_STATUS_OK;
3773 }
3774 
3775 /*
3776  * Check if user set port speed is allowed by the SFP
3777  *
3778  * @param[in] sfp   - bfa sfp module
3779  * @param[in] portspeed - port speed from user
3780  *
3781  */
3782 bfa_status_t
3783 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3784 		bfa_cb_sfp_t cbfn, void *cbarg)
3785 {
3786 	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3787 
3788 	if (!bfa_ioc_is_operational(sfp->ioc))
3789 		return BFA_STATUS_IOC_NON_OP;
3790 
3791 	/* For Mezz card, all speed is allowed */
3792 	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3793 		return BFA_STATUS_OK;
3794 
3795 	/* Check SFP state */
3796 	sfp->portspeed = portspeed;
3797 	if (sfp->state == BFA_SFP_STATE_INIT) {
3798 		if (sfp->state_query_lock) {
3799 			bfa_trc(sfp, 0);
3800 			return BFA_STATUS_DEVBUSY;
3801 		} else {
3802 			sfp->state_query_cbfn = cbfn;
3803 			sfp->state_query_cbarg = cbarg;
3804 			bfa_sfp_state_query(sfp);
3805 			return BFA_STATUS_SFP_NOT_READY;
3806 		}
3807 	}
3808 
3809 	if (sfp->state == BFA_SFP_STATE_REMOVED ||
3810 	    sfp->state == BFA_SFP_STATE_FAILED) {
3811 		bfa_trc(sfp, sfp->state);
3812 		return BFA_STATUS_NO_SFP_DEV;
3813 	}
3814 
3815 	if (sfp->state == BFA_SFP_STATE_INSERTED) {
3816 		bfa_trc(sfp, sfp->state);
3817 		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
3818 	}
3819 
3820 	/* For eloopback, all speed is allowed */
3821 	if (sfp->is_elb)
3822 		return BFA_STATUS_OK;
3823 
3824 	return bfa_sfp_speed_valid(sfp, portspeed);
3825 }
3826 
3827 /*
3828  *	Flash module specific
3829  */
3830 
3831 /*
3832  * FLASH DMA buffer should be big enough to hold both MFG block and
3833  * asic block(64k) at the same time and also should be 2k aligned to
3834  * avoid write segement to cross sector boundary.
3835  */
3836 #define BFA_FLASH_SEG_SZ	2048
3837 #define BFA_FLASH_DMA_BUF_SZ	\
3838 	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3839 
3840 static void
3841 bfa_flash_cb(struct bfa_flash_s *flash)
3842 {
3843 	flash->op_busy = 0;
3844 	if (flash->cbfn)
3845 		flash->cbfn(flash->cbarg, flash->status);
3846 }
3847 
3848 static void
3849 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3850 {
3851 	struct bfa_flash_s	*flash = cbarg;
3852 
3853 	bfa_trc(flash, event);
3854 	switch (event) {
3855 	case BFA_IOC_E_DISABLED:
3856 	case BFA_IOC_E_FAILED:
3857 		if (flash->op_busy) {
3858 			flash->status = BFA_STATUS_IOC_FAILURE;
3859 			flash->cbfn(flash->cbarg, flash->status);
3860 			flash->op_busy = 0;
3861 		}
3862 		break;
3863 
3864 	default:
3865 		break;
3866 	}
3867 }
3868 
3869 /*
3870  * Send flash attribute query request.
3871  *
3872  * @param[in] cbarg - callback argument
3873  */
3874 static void
3875 bfa_flash_query_send(void *cbarg)
3876 {
3877 	struct bfa_flash_s *flash = cbarg;
3878 	struct bfi_flash_query_req_s *msg =
3879 			(struct bfi_flash_query_req_s *) flash->mb.msg;
3880 
3881 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3882 		bfa_ioc_portid(flash->ioc));
3883 	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
3884 		flash->dbuf_pa);
3885 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3886 }
3887 
3888 /*
3889  * Send flash write request.
3890  *
3891  * @param[in] cbarg - callback argument
3892  */
3893 static void
3894 bfa_flash_write_send(struct bfa_flash_s *flash)
3895 {
3896 	struct bfi_flash_write_req_s *msg =
3897 			(struct bfi_flash_write_req_s *) flash->mb.msg;
3898 	u32	len;
3899 
3900 	msg->type = be32_to_cpu(flash->type);
3901 	msg->instance = flash->instance;
3902 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3903 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3904 		flash->residue : BFA_FLASH_DMA_BUF_SZ;
3905 	msg->length = be32_to_cpu(len);
3906 
3907 	/* indicate if it's the last msg of the whole write operation */
3908 	msg->last = (len == flash->residue) ? 1 : 0;
3909 
3910 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3911 			bfa_ioc_portid(flash->ioc));
3912 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3913 	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3914 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3915 
3916 	flash->residue -= len;
3917 	flash->offset += len;
3918 }
3919 
3920 /*
3921  * Send flash read request.
3922  *
3923  * @param[in] cbarg - callback argument
3924  */
3925 static void
3926 bfa_flash_read_send(void *cbarg)
3927 {
3928 	struct bfa_flash_s *flash = cbarg;
3929 	struct bfi_flash_read_req_s *msg =
3930 			(struct bfi_flash_read_req_s *) flash->mb.msg;
3931 	u32	len;
3932 
3933 	msg->type = be32_to_cpu(flash->type);
3934 	msg->instance = flash->instance;
3935 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3936 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3937 			flash->residue : BFA_FLASH_DMA_BUF_SZ;
3938 	msg->length = be32_to_cpu(len);
3939 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3940 		bfa_ioc_portid(flash->ioc));
3941 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3942 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3943 }
3944 
3945 /*
3946  * Send flash erase request.
3947  *
3948  * @param[in] cbarg - callback argument
3949  */
3950 static void
3951 bfa_flash_erase_send(void *cbarg)
3952 {
3953 	struct bfa_flash_s *flash = cbarg;
3954 	struct bfi_flash_erase_req_s *msg =
3955 			(struct bfi_flash_erase_req_s *) flash->mb.msg;
3956 
3957 	msg->type = be32_to_cpu(flash->type);
3958 	msg->instance = flash->instance;
3959 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
3960 			bfa_ioc_portid(flash->ioc));
3961 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3962 }
3963 
3964 /*
3965  * Process flash response messages upon receiving interrupts.
3966  *
3967  * @param[in] flasharg - flash structure
3968  * @param[in] msg - message structure
3969  */
3970 static void
3971 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3972 {
3973 	struct bfa_flash_s *flash = flasharg;
3974 	u32	status;
3975 
3976 	union {
3977 		struct bfi_flash_query_rsp_s *query;
3978 		struct bfi_flash_erase_rsp_s *erase;
3979 		struct bfi_flash_write_rsp_s *write;
3980 		struct bfi_flash_read_rsp_s *read;
3981 		struct bfi_mbmsg_s   *msg;
3982 	} m;
3983 
3984 	m.msg = msg;
3985 	bfa_trc(flash, msg->mh.msg_id);
3986 
3987 	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
3988 		/* receiving response after ioc failure */
3989 		bfa_trc(flash, 0x9999);
3990 		return;
3991 	}
3992 
3993 	switch (msg->mh.msg_id) {
3994 	case BFI_FLASH_I2H_QUERY_RSP:
3995 		status = be32_to_cpu(m.query->status);
3996 		bfa_trc(flash, status);
3997 		if (status == BFA_STATUS_OK) {
3998 			u32	i;
3999 			struct bfa_flash_attr_s *attr, *f;
4000 
4001 			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4002 			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4003 			attr->status = be32_to_cpu(f->status);
4004 			attr->npart = be32_to_cpu(f->npart);
4005 			bfa_trc(flash, attr->status);
4006 			bfa_trc(flash, attr->npart);
4007 			for (i = 0; i < attr->npart; i++) {
4008 				attr->part[i].part_type =
4009 					be32_to_cpu(f->part[i].part_type);
4010 				attr->part[i].part_instance =
4011 					be32_to_cpu(f->part[i].part_instance);
4012 				attr->part[i].part_off =
4013 					be32_to_cpu(f->part[i].part_off);
4014 				attr->part[i].part_size =
4015 					be32_to_cpu(f->part[i].part_size);
4016 				attr->part[i].part_len =
4017 					be32_to_cpu(f->part[i].part_len);
4018 				attr->part[i].part_status =
4019 					be32_to_cpu(f->part[i].part_status);
4020 			}
4021 		}
4022 		flash->status = status;
4023 		bfa_flash_cb(flash);
4024 		break;
4025 	case BFI_FLASH_I2H_ERASE_RSP:
4026 		status = be32_to_cpu(m.erase->status);
4027 		bfa_trc(flash, status);
4028 		flash->status = status;
4029 		bfa_flash_cb(flash);
4030 		break;
4031 	case BFI_FLASH_I2H_WRITE_RSP:
4032 		status = be32_to_cpu(m.write->status);
4033 		bfa_trc(flash, status);
4034 		if (status != BFA_STATUS_OK || flash->residue == 0) {
4035 			flash->status = status;
4036 			bfa_flash_cb(flash);
4037 		} else {
4038 			bfa_trc(flash, flash->offset);
4039 			bfa_flash_write_send(flash);
4040 		}
4041 		break;
4042 	case BFI_FLASH_I2H_READ_RSP:
4043 		status = be32_to_cpu(m.read->status);
4044 		bfa_trc(flash, status);
4045 		if (status != BFA_STATUS_OK) {
4046 			flash->status = status;
4047 			bfa_flash_cb(flash);
4048 		} else {
4049 			u32 len = be32_to_cpu(m.read->length);
4050 			bfa_trc(flash, flash->offset);
4051 			bfa_trc(flash, len);
4052 			memcpy(flash->ubuf + flash->offset,
4053 				flash->dbuf_kva, len);
4054 			flash->residue -= len;
4055 			flash->offset += len;
4056 			if (flash->residue == 0) {
4057 				flash->status = status;
4058 				bfa_flash_cb(flash);
4059 			} else
4060 				bfa_flash_read_send(flash);
4061 		}
4062 		break;
4063 	case BFI_FLASH_I2H_BOOT_VER_RSP:
4064 	case BFI_FLASH_I2H_EVENT:
4065 		bfa_trc(flash, msg->mh.msg_id);
4066 		break;
4067 
4068 	default:
4069 		WARN_ON(1);
4070 	}
4071 }
4072 
4073 /*
4074  * Flash memory info API.
4075  *
4076  * @param[in] mincfg - minimal cfg variable
4077  */
4078 u32
4079 bfa_flash_meminfo(bfa_boolean_t mincfg)
4080 {
4081 	/* min driver doesn't need flash */
4082 	if (mincfg)
4083 		return 0;
4084 	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4085 }
4086 
4087 /*
4088  * Flash attach API.
4089  *
4090  * @param[in] flash - flash structure
4091  * @param[in] ioc  - ioc structure
4092  * @param[in] dev  - device structure
4093  * @param[in] trcmod - trace module
4094  * @param[in] logmod - log module
4095  */
4096 void
4097 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4098 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4099 {
4100 	flash->ioc = ioc;
4101 	flash->trcmod = trcmod;
4102 	flash->cbfn = NULL;
4103 	flash->cbarg = NULL;
4104 	flash->op_busy = 0;
4105 
4106 	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4107 	bfa_q_qe_init(&flash->ioc_notify);
4108 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4109 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4110 
4111 	/* min driver doesn't need flash */
4112 	if (mincfg) {
4113 		flash->dbuf_kva = NULL;
4114 		flash->dbuf_pa = 0;
4115 	}
4116 }
4117 
4118 /*
4119  * Claim memory for flash
4120  *
4121  * @param[in] flash - flash structure
4122  * @param[in] dm_kva - pointer to virtual memory address
4123  * @param[in] dm_pa - physical memory address
4124  * @param[in] mincfg - minimal cfg variable
4125  */
4126 void
4127 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4128 		bfa_boolean_t mincfg)
4129 {
4130 	if (mincfg)
4131 		return;
4132 
4133 	flash->dbuf_kva = dm_kva;
4134 	flash->dbuf_pa = dm_pa;
4135 	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4136 	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4137 	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4138 }
4139 
4140 /*
4141  * Get flash attribute.
4142  *
4143  * @param[in] flash - flash structure
4144  * @param[in] attr - flash attribute structure
4145  * @param[in] cbfn - callback function
4146  * @param[in] cbarg - callback argument
4147  *
4148  * Return status.
4149  */
4150 bfa_status_t
4151 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4152 		bfa_cb_flash_t cbfn, void *cbarg)
4153 {
4154 	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4155 
4156 	if (!bfa_ioc_is_operational(flash->ioc))
4157 		return BFA_STATUS_IOC_NON_OP;
4158 
4159 	if (flash->op_busy) {
4160 		bfa_trc(flash, flash->op_busy);
4161 		return BFA_STATUS_DEVBUSY;
4162 	}
4163 
4164 	flash->op_busy = 1;
4165 	flash->cbfn = cbfn;
4166 	flash->cbarg = cbarg;
4167 	flash->ubuf = (u8 *) attr;
4168 	bfa_flash_query_send(flash);
4169 
4170 	return BFA_STATUS_OK;
4171 }
4172 
4173 /*
4174  * Erase flash partition.
4175  *
4176  * @param[in] flash - flash structure
4177  * @param[in] type - flash partition type
4178  * @param[in] instance - flash partition instance
4179  * @param[in] cbfn - callback function
4180  * @param[in] cbarg - callback argument
4181  *
4182  * Return status.
4183  */
4184 bfa_status_t
4185 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4186 		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4187 {
4188 	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4189 	bfa_trc(flash, type);
4190 	bfa_trc(flash, instance);
4191 
4192 	if (!bfa_ioc_is_operational(flash->ioc))
4193 		return BFA_STATUS_IOC_NON_OP;
4194 
4195 	if (flash->op_busy) {
4196 		bfa_trc(flash, flash->op_busy);
4197 		return BFA_STATUS_DEVBUSY;
4198 	}
4199 
4200 	flash->op_busy = 1;
4201 	flash->cbfn = cbfn;
4202 	flash->cbarg = cbarg;
4203 	flash->type = type;
4204 	flash->instance = instance;
4205 
4206 	bfa_flash_erase_send(flash);
4207 	return BFA_STATUS_OK;
4208 }
4209 
4210 /*
4211  * Update flash partition.
4212  *
4213  * @param[in] flash - flash structure
4214  * @param[in] type - flash partition type
4215  * @param[in] instance - flash partition instance
4216  * @param[in] buf - update data buffer
4217  * @param[in] len - data buffer length
4218  * @param[in] offset - offset relative to the partition starting address
4219  * @param[in] cbfn - callback function
4220  * @param[in] cbarg - callback argument
4221  *
4222  * Return status.
4223  */
4224 bfa_status_t
4225 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4226 		u8 instance, void *buf, u32 len, u32 offset,
4227 		bfa_cb_flash_t cbfn, void *cbarg)
4228 {
4229 	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4230 	bfa_trc(flash, type);
4231 	bfa_trc(flash, instance);
4232 	bfa_trc(flash, len);
4233 	bfa_trc(flash, offset);
4234 
4235 	if (!bfa_ioc_is_operational(flash->ioc))
4236 		return BFA_STATUS_IOC_NON_OP;
4237 
4238 	/*
4239 	 * 'len' must be in word (4-byte) boundary
4240 	 * 'offset' must be in sector (16kb) boundary
4241 	 */
4242 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4243 		return BFA_STATUS_FLASH_BAD_LEN;
4244 
4245 	if (type == BFA_FLASH_PART_MFG)
4246 		return BFA_STATUS_EINVAL;
4247 
4248 	if (flash->op_busy) {
4249 		bfa_trc(flash, flash->op_busy);
4250 		return BFA_STATUS_DEVBUSY;
4251 	}
4252 
4253 	flash->op_busy = 1;
4254 	flash->cbfn = cbfn;
4255 	flash->cbarg = cbarg;
4256 	flash->type = type;
4257 	flash->instance = instance;
4258 	flash->residue = len;
4259 	flash->offset = 0;
4260 	flash->addr_off = offset;
4261 	flash->ubuf = buf;
4262 
4263 	bfa_flash_write_send(flash);
4264 	return BFA_STATUS_OK;
4265 }
4266 
4267 /*
4268  * Read flash partition.
4269  *
4270  * @param[in] flash - flash structure
4271  * @param[in] type - flash partition type
4272  * @param[in] instance - flash partition instance
4273  * @param[in] buf - read data buffer
4274  * @param[in] len - data buffer length
4275  * @param[in] offset - offset relative to the partition starting address
4276  * @param[in] cbfn - callback function
4277  * @param[in] cbarg - callback argument
4278  *
4279  * Return status.
4280  */
4281 bfa_status_t
4282 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4283 		u8 instance, void *buf, u32 len, u32 offset,
4284 		bfa_cb_flash_t cbfn, void *cbarg)
4285 {
4286 	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4287 	bfa_trc(flash, type);
4288 	bfa_trc(flash, instance);
4289 	bfa_trc(flash, len);
4290 	bfa_trc(flash, offset);
4291 
4292 	if (!bfa_ioc_is_operational(flash->ioc))
4293 		return BFA_STATUS_IOC_NON_OP;
4294 
4295 	/*
4296 	 * 'len' must be in word (4-byte) boundary
4297 	 * 'offset' must be in sector (16kb) boundary
4298 	 */
4299 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4300 		return BFA_STATUS_FLASH_BAD_LEN;
4301 
4302 	if (flash->op_busy) {
4303 		bfa_trc(flash, flash->op_busy);
4304 		return BFA_STATUS_DEVBUSY;
4305 	}
4306 
4307 	flash->op_busy = 1;
4308 	flash->cbfn = cbfn;
4309 	flash->cbarg = cbarg;
4310 	flash->type = type;
4311 	flash->instance = instance;
4312 	flash->residue = len;
4313 	flash->offset = 0;
4314 	flash->addr_off = offset;
4315 	flash->ubuf = buf;
4316 	bfa_flash_read_send(flash);
4317 
4318 	return BFA_STATUS_OK;
4319 }
4320 
4321 /*
4322  *	DIAG module specific
4323  */
4324 
4325 #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4326 #define BFA_DIAG_FWPING_TOV	1000	/* msec */
4327 
4328 /* IOC event handler */
4329 static void
4330 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4331 {
4332 	struct bfa_diag_s *diag = diag_arg;
4333 
4334 	bfa_trc(diag, event);
4335 	bfa_trc(diag, diag->block);
4336 	bfa_trc(diag, diag->fwping.lock);
4337 	bfa_trc(diag, diag->tsensor.lock);
4338 
4339 	switch (event) {
4340 	case BFA_IOC_E_DISABLED:
4341 	case BFA_IOC_E_FAILED:
4342 		if (diag->fwping.lock) {
4343 			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4344 			diag->fwping.cbfn(diag->fwping.cbarg,
4345 					diag->fwping.status);
4346 			diag->fwping.lock = 0;
4347 		}
4348 
4349 		if (diag->tsensor.lock) {
4350 			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4351 			diag->tsensor.cbfn(diag->tsensor.cbarg,
4352 					   diag->tsensor.status);
4353 			diag->tsensor.lock = 0;
4354 		}
4355 
4356 		if (diag->block) {
4357 			if (diag->timer_active) {
4358 				bfa_timer_stop(&diag->timer);
4359 				diag->timer_active = 0;
4360 			}
4361 
4362 			diag->status = BFA_STATUS_IOC_FAILURE;
4363 			diag->cbfn(diag->cbarg, diag->status);
4364 			diag->block = 0;
4365 		}
4366 		break;
4367 
4368 	default:
4369 		break;
4370 	}
4371 }
4372 
4373 static void
4374 bfa_diag_memtest_done(void *cbarg)
4375 {
4376 	struct bfa_diag_s *diag = cbarg;
4377 	struct bfa_ioc_s  *ioc = diag->ioc;
4378 	struct bfa_diag_memtest_result *res = diag->result;
4379 	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4380 	u32	pgnum, pgoff, i;
4381 
4382 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4383 	pgoff = PSS_SMEM_PGOFF(loff);
4384 
4385 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4386 
4387 	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4388 			 sizeof(u32)); i++) {
4389 		/* read test result from smem */
4390 		*((u32 *) res + i) =
4391 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4392 		loff += sizeof(u32);
4393 	}
4394 
4395 	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4396 	bfa_ioc_reset_fwstate(ioc);
4397 
4398 	res->status = swab32(res->status);
4399 	bfa_trc(diag, res->status);
4400 
4401 	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4402 		diag->status = BFA_STATUS_OK;
4403 	else {
4404 		diag->status = BFA_STATUS_MEMTEST_FAILED;
4405 		res->addr = swab32(res->addr);
4406 		res->exp = swab32(res->exp);
4407 		res->act = swab32(res->act);
4408 		res->err_status = swab32(res->err_status);
4409 		res->err_status1 = swab32(res->err_status1);
4410 		res->err_addr = swab32(res->err_addr);
4411 		bfa_trc(diag, res->addr);
4412 		bfa_trc(diag, res->exp);
4413 		bfa_trc(diag, res->act);
4414 		bfa_trc(diag, res->err_status);
4415 		bfa_trc(diag, res->err_status1);
4416 		bfa_trc(diag, res->err_addr);
4417 	}
4418 	diag->timer_active = 0;
4419 	diag->cbfn(diag->cbarg, diag->status);
4420 	diag->block = 0;
4421 }
4422 
4423 /*
4424  * Firmware ping
4425  */
4426 
4427 /*
4428  * Perform DMA test directly
4429  */
4430 static void
4431 diag_fwping_send(struct bfa_diag_s *diag)
4432 {
4433 	struct bfi_diag_fwping_req_s *fwping_req;
4434 	u32	i;
4435 
4436 	bfa_trc(diag, diag->fwping.dbuf_pa);
4437 
4438 	/* fill DMA area with pattern */
4439 	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4440 		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4441 
4442 	/* Fill mbox msg */
4443 	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4444 
4445 	/* Setup SG list */
4446 	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4447 			diag->fwping.dbuf_pa);
4448 	/* Set up dma count */
4449 	fwping_req->count = cpu_to_be32(diag->fwping.count);
4450 	/* Set up data pattern */
4451 	fwping_req->data = diag->fwping.data;
4452 
4453 	/* build host command */
4454 	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4455 		bfa_ioc_portid(diag->ioc));
4456 
4457 	/* send mbox cmd */
4458 	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4459 }
4460 
4461 static void
4462 diag_fwping_comp(struct bfa_diag_s *diag,
4463 		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4464 {
4465 	u32	rsp_data = diag_rsp->data;
4466 	u8	rsp_dma_status = diag_rsp->dma_status;
4467 
4468 	bfa_trc(diag, rsp_data);
4469 	bfa_trc(diag, rsp_dma_status);
4470 
4471 	if (rsp_dma_status == BFA_STATUS_OK) {
4472 		u32	i, pat;
4473 		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4474 			diag->fwping.data;
4475 		/* Check mbox data */
4476 		if (diag->fwping.data != rsp_data) {
4477 			bfa_trc(diag, rsp_data);
4478 			diag->fwping.result->dmastatus =
4479 					BFA_STATUS_DATACORRUPTED;
4480 			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4481 			diag->fwping.cbfn(diag->fwping.cbarg,
4482 					diag->fwping.status);
4483 			diag->fwping.lock = 0;
4484 			return;
4485 		}
4486 		/* Check dma pattern */
4487 		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4488 			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4489 				bfa_trc(diag, i);
4490 				bfa_trc(diag, pat);
4491 				bfa_trc(diag,
4492 					*((u32 *)diag->fwping.dbuf_kva + i));
4493 				diag->fwping.result->dmastatus =
4494 						BFA_STATUS_DATACORRUPTED;
4495 				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4496 				diag->fwping.cbfn(diag->fwping.cbarg,
4497 						diag->fwping.status);
4498 				diag->fwping.lock = 0;
4499 				return;
4500 			}
4501 		}
4502 		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4503 		diag->fwping.status = BFA_STATUS_OK;
4504 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4505 		diag->fwping.lock = 0;
4506 	} else {
4507 		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4508 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4509 		diag->fwping.lock = 0;
4510 	}
4511 }
4512 
4513 /*
4514  * Temperature Sensor
4515  */
4516 
4517 static void
4518 diag_tempsensor_send(struct bfa_diag_s *diag)
4519 {
4520 	struct bfi_diag_ts_req_s *msg;
4521 
4522 	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4523 	bfa_trc(diag, msg->temp);
4524 	/* build host command */
4525 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4526 		bfa_ioc_portid(diag->ioc));
4527 	/* send mbox cmd */
4528 	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4529 }
4530 
4531 static void
4532 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4533 {
4534 	if (!diag->tsensor.lock) {
4535 		/* receiving response after ioc failure */
4536 		bfa_trc(diag, diag->tsensor.lock);
4537 		return;
4538 	}
4539 
4540 	/*
4541 	 * ASIC junction tempsensor is a reg read operation
4542 	 * it will always return OK
4543 	 */
4544 	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4545 	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4546 	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4547 	diag->tsensor.temp->status = BFA_STATUS_OK;
4548 
4549 	if (rsp->ts_brd) {
4550 		if (rsp->status == BFA_STATUS_OK) {
4551 			diag->tsensor.temp->brd_temp =
4552 				be16_to_cpu(rsp->brd_temp);
4553 		} else {
4554 			bfa_trc(diag, rsp->status);
4555 			diag->tsensor.temp->brd_temp = 0;
4556 			diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4557 		}
4558 	}
4559 	bfa_trc(diag, rsp->ts_junc);
4560 	bfa_trc(diag, rsp->temp);
4561 	bfa_trc(diag, rsp->ts_brd);
4562 	bfa_trc(diag, rsp->brd_temp);
4563 	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4564 	diag->tsensor.lock = 0;
4565 }
4566 
4567 /*
4568  *	LED Test command
4569  */
4570 static void
4571 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4572 {
4573 	struct bfi_diag_ledtest_req_s  *msg;
4574 
4575 	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4576 	/* build host command */
4577 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4578 			bfa_ioc_portid(diag->ioc));
4579 
4580 	/*
4581 	 * convert the freq from N blinks per 10 sec to
4582 	 * crossbow ontime value. We do it here because division is need
4583 	 */
4584 	if (ledtest->freq)
4585 		ledtest->freq = 500 / ledtest->freq;
4586 
4587 	if (ledtest->freq == 0)
4588 		ledtest->freq = 1;
4589 
4590 	bfa_trc(diag, ledtest->freq);
4591 	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4592 	msg->cmd = (u8) ledtest->cmd;
4593 	msg->color = (u8) ledtest->color;
4594 	msg->portid = bfa_ioc_portid(diag->ioc);
4595 	msg->led = ledtest->led;
4596 	msg->freq = cpu_to_be16(ledtest->freq);
4597 
4598 	/* send mbox cmd */
4599 	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4600 }
4601 
4602 static void
4603 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
4604 {
4605 	bfa_trc(diag, diag->ledtest.lock);
4606 	diag->ledtest.lock = BFA_FALSE;
4607 	/* no bfa_cb_queue is needed because driver is not waiting */
4608 }
4609 
4610 /*
4611  * Port beaconing
4612  */
4613 static void
4614 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4615 {
4616 	struct bfi_diag_portbeacon_req_s *msg;
4617 
4618 	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4619 	/* build host command */
4620 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4621 		bfa_ioc_portid(diag->ioc));
4622 	msg->beacon = beacon;
4623 	msg->period = cpu_to_be32(sec);
4624 	/* send mbox cmd */
4625 	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4626 }
4627 
4628 static void
4629 diag_portbeacon_comp(struct bfa_diag_s *diag)
4630 {
4631 	bfa_trc(diag, diag->beacon.state);
4632 	diag->beacon.state = BFA_FALSE;
4633 	if (diag->cbfn_beacon)
4634 		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4635 }
4636 
4637 /*
4638  *	Diag hmbox handler
4639  */
4640 void
4641 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4642 {
4643 	struct bfa_diag_s *diag = diagarg;
4644 
4645 	switch (msg->mh.msg_id) {
4646 	case BFI_DIAG_I2H_PORTBEACON:
4647 		diag_portbeacon_comp(diag);
4648 		break;
4649 	case BFI_DIAG_I2H_FWPING:
4650 		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4651 		break;
4652 	case BFI_DIAG_I2H_TEMPSENSOR:
4653 		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4654 		break;
4655 	case BFI_DIAG_I2H_LEDTEST:
4656 		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4657 		break;
4658 	default:
4659 		bfa_trc(diag, msg->mh.msg_id);
4660 		WARN_ON(1);
4661 	}
4662 }
4663 
4664 /*
4665  * Gen RAM Test
4666  *
4667  *   @param[in] *diag           - diag data struct
4668  *   @param[in] *memtest        - mem test params input from upper layer,
4669  *   @param[in] pattern         - mem test pattern
4670  *   @param[in] *result         - mem test result
4671  *   @param[in] cbfn            - mem test callback functioin
4672  *   @param[in] cbarg           - callback functioin arg
4673  *
4674  *   @param[out]
4675  */
4676 bfa_status_t
4677 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4678 		u32 pattern, struct bfa_diag_memtest_result *result,
4679 		bfa_cb_diag_t cbfn, void *cbarg)
4680 {
4681 	bfa_trc(diag, pattern);
4682 
4683 	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4684 		return BFA_STATUS_ADAPTER_ENABLED;
4685 
4686 	/* check to see if there is another destructive diag cmd running */
4687 	if (diag->block) {
4688 		bfa_trc(diag, diag->block);
4689 		return BFA_STATUS_DEVBUSY;
4690 	} else
4691 		diag->block = 1;
4692 
4693 	diag->result = result;
4694 	diag->cbfn = cbfn;
4695 	diag->cbarg = cbarg;
4696 
4697 	/* download memtest code and take LPU0 out of reset */
4698 	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4699 
4700 	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4701 			bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4702 	diag->timer_active = 1;
4703 	return BFA_STATUS_OK;
4704 }
4705 
4706 /*
4707  * DIAG firmware ping command
4708  *
4709  *   @param[in] *diag           - diag data struct
4710  *   @param[in] cnt             - dma loop count for testing PCIE
4711  *   @param[in] data            - data pattern to pass in fw
4712  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
4713  *   @param[in] cbfn            - callback function
4714  *   @param[in] *cbarg          - callback functioin arg
4715  *
4716  *   @param[out]
4717  */
4718 bfa_status_t
4719 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4720 		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4721 		void *cbarg)
4722 {
4723 	bfa_trc(diag, cnt);
4724 	bfa_trc(diag, data);
4725 
4726 	if (!bfa_ioc_is_operational(diag->ioc))
4727 		return BFA_STATUS_IOC_NON_OP;
4728 
4729 	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4730 	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4731 		return BFA_STATUS_CMD_NOTSUPP;
4732 
4733 	/* check to see if there is another destructive diag cmd running */
4734 	if (diag->block || diag->fwping.lock) {
4735 		bfa_trc(diag, diag->block);
4736 		bfa_trc(diag, diag->fwping.lock);
4737 		return BFA_STATUS_DEVBUSY;
4738 	}
4739 
4740 	/* Initialization */
4741 	diag->fwping.lock = 1;
4742 	diag->fwping.cbfn = cbfn;
4743 	diag->fwping.cbarg = cbarg;
4744 	diag->fwping.result = result;
4745 	diag->fwping.data = data;
4746 	diag->fwping.count = cnt;
4747 
4748 	/* Init test results */
4749 	diag->fwping.result->data = 0;
4750 	diag->fwping.result->status = BFA_STATUS_OK;
4751 
4752 	/* kick off the first ping */
4753 	diag_fwping_send(diag);
4754 	return BFA_STATUS_OK;
4755 }
4756 
4757 /*
4758  * Read Temperature Sensor
4759  *
4760  *   @param[in] *diag           - diag data struct
4761  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
4762  *   @param[in] cbfn            - callback function
4763  *   @param[in] *cbarg          - callback functioin arg
4764  *
4765  *   @param[out]
4766  */
4767 bfa_status_t
4768 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4769 		struct bfa_diag_results_tempsensor_s *result,
4770 		bfa_cb_diag_t cbfn, void *cbarg)
4771 {
4772 	/* check to see if there is a destructive diag cmd running */
4773 	if (diag->block || diag->tsensor.lock) {
4774 		bfa_trc(diag, diag->block);
4775 		bfa_trc(diag, diag->tsensor.lock);
4776 		return BFA_STATUS_DEVBUSY;
4777 	}
4778 
4779 	if (!bfa_ioc_is_operational(diag->ioc))
4780 		return BFA_STATUS_IOC_NON_OP;
4781 
4782 	/* Init diag mod params */
4783 	diag->tsensor.lock = 1;
4784 	diag->tsensor.temp = result;
4785 	diag->tsensor.cbfn = cbfn;
4786 	diag->tsensor.cbarg = cbarg;
4787 
4788 	/* Send msg to fw */
4789 	diag_tempsensor_send(diag);
4790 
4791 	return BFA_STATUS_OK;
4792 }
4793 
4794 /*
4795  * LED Test command
4796  *
4797  *   @param[in] *diag           - diag data struct
4798  *   @param[in] *ledtest        - pt to ledtest data structure
4799  *
4800  *   @param[out]
4801  */
4802 bfa_status_t
4803 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4804 {
4805 	bfa_trc(diag, ledtest->cmd);
4806 
4807 	if (!bfa_ioc_is_operational(diag->ioc))
4808 		return BFA_STATUS_IOC_NON_OP;
4809 
4810 	if (diag->beacon.state)
4811 		return BFA_STATUS_BEACON_ON;
4812 
4813 	if (diag->ledtest.lock)
4814 		return BFA_STATUS_LEDTEST_OP;
4815 
4816 	/* Send msg to fw */
4817 	diag->ledtest.lock = BFA_TRUE;
4818 	diag_ledtest_send(diag, ledtest);
4819 
4820 	return BFA_STATUS_OK;
4821 }
4822 
4823 /*
4824  * Port beaconing command
4825  *
4826  *   @param[in] *diag           - diag data struct
4827  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
4828  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
4829  *   @param[in] sec             - beaconing duration in seconds
4830  *
4831  *   @param[out]
4832  */
4833 bfa_status_t
4834 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4835 		bfa_boolean_t link_e2e_beacon, uint32_t sec)
4836 {
4837 	bfa_trc(diag, beacon);
4838 	bfa_trc(diag, link_e2e_beacon);
4839 	bfa_trc(diag, sec);
4840 
4841 	if (!bfa_ioc_is_operational(diag->ioc))
4842 		return BFA_STATUS_IOC_NON_OP;
4843 
4844 	if (diag->ledtest.lock)
4845 		return BFA_STATUS_LEDTEST_OP;
4846 
4847 	if (diag->beacon.state && beacon)       /* beacon alread on */
4848 		return BFA_STATUS_BEACON_ON;
4849 
4850 	diag->beacon.state	= beacon;
4851 	diag->beacon.link_e2e	= link_e2e_beacon;
4852 	if (diag->cbfn_beacon)
4853 		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4854 
4855 	/* Send msg to fw */
4856 	diag_portbeacon_send(diag, beacon, sec);
4857 
4858 	return BFA_STATUS_OK;
4859 }
4860 
4861 /*
4862  * Return DMA memory needed by diag module.
4863  */
4864 u32
4865 bfa_diag_meminfo(void)
4866 {
4867 	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4868 }
4869 
4870 /*
4871  *	Attach virtual and physical memory for Diag.
4872  */
4873 void
4874 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
4875 	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
4876 {
4877 	diag->dev = dev;
4878 	diag->ioc = ioc;
4879 	diag->trcmod = trcmod;
4880 
4881 	diag->block = 0;
4882 	diag->cbfn = NULL;
4883 	diag->cbarg = NULL;
4884 	diag->result = NULL;
4885 	diag->cbfn_beacon = cbfn_beacon;
4886 
4887 	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
4888 	bfa_q_qe_init(&diag->ioc_notify);
4889 	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
4890 	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
4891 }
4892 
4893 void
4894 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
4895 {
4896 	diag->fwping.dbuf_kva = dm_kva;
4897 	diag->fwping.dbuf_pa = dm_pa;
4898 	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
4899 }
4900 
4901 /*
4902  *	PHY module specific
4903  */
4904 #define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
4905 #define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
4906 
4907 static void
4908 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
4909 {
4910 	int i, m = sz >> 2;
4911 
4912 	for (i = 0; i < m; i++)
4913 		obuf[i] = be32_to_cpu(ibuf[i]);
4914 }
4915 
4916 static bfa_boolean_t
4917 bfa_phy_present(struct bfa_phy_s *phy)
4918 {
4919 	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
4920 }
4921 
4922 static void
4923 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
4924 {
4925 	struct bfa_phy_s *phy = cbarg;
4926 
4927 	bfa_trc(phy, event);
4928 
4929 	switch (event) {
4930 	case BFA_IOC_E_DISABLED:
4931 	case BFA_IOC_E_FAILED:
4932 		if (phy->op_busy) {
4933 			phy->status = BFA_STATUS_IOC_FAILURE;
4934 			phy->cbfn(phy->cbarg, phy->status);
4935 			phy->op_busy = 0;
4936 		}
4937 		break;
4938 
4939 	default:
4940 		break;
4941 	}
4942 }
4943 
4944 /*
4945  * Send phy attribute query request.
4946  *
4947  * @param[in] cbarg - callback argument
4948  */
4949 static void
4950 bfa_phy_query_send(void *cbarg)
4951 {
4952 	struct bfa_phy_s *phy = cbarg;
4953 	struct bfi_phy_query_req_s *msg =
4954 			(struct bfi_phy_query_req_s *) phy->mb.msg;
4955 
4956 	msg->instance = phy->instance;
4957 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
4958 		bfa_ioc_portid(phy->ioc));
4959 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
4960 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4961 }
4962 
4963 /*
4964  * Send phy write request.
4965  *
4966  * @param[in] cbarg - callback argument
4967  */
4968 static void
4969 bfa_phy_write_send(void *cbarg)
4970 {
4971 	struct bfa_phy_s *phy = cbarg;
4972 	struct bfi_phy_write_req_s *msg =
4973 			(struct bfi_phy_write_req_s *) phy->mb.msg;
4974 	u32	len;
4975 	u16	*buf, *dbuf;
4976 	int	i, sz;
4977 
4978 	msg->instance = phy->instance;
4979 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
4980 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
4981 			phy->residue : BFA_PHY_DMA_BUF_SZ;
4982 	msg->length = cpu_to_be32(len);
4983 
4984 	/* indicate if it's the last msg of the whole write operation */
4985 	msg->last = (len == phy->residue) ? 1 : 0;
4986 
4987 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
4988 		bfa_ioc_portid(phy->ioc));
4989 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
4990 
4991 	buf = (u16 *) (phy->ubuf + phy->offset);
4992 	dbuf = (u16 *)phy->dbuf_kva;
4993 	sz = len >> 1;
4994 	for (i = 0; i < sz; i++)
4995 		buf[i] = cpu_to_be16(dbuf[i]);
4996 
4997 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4998 
4999 	phy->residue -= len;
5000 	phy->offset += len;
5001 }
5002 
5003 /*
5004  * Send phy read request.
5005  *
5006  * @param[in] cbarg - callback argument
5007  */
5008 static void
5009 bfa_phy_read_send(void *cbarg)
5010 {
5011 	struct bfa_phy_s *phy = cbarg;
5012 	struct bfi_phy_read_req_s *msg =
5013 			(struct bfi_phy_read_req_s *) phy->mb.msg;
5014 	u32	len;
5015 
5016 	msg->instance = phy->instance;
5017 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5018 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5019 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5020 	msg->length = cpu_to_be32(len);
5021 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5022 		bfa_ioc_portid(phy->ioc));
5023 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5024 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5025 }
5026 
5027 /*
5028  * Send phy stats request.
5029  *
5030  * @param[in] cbarg - callback argument
5031  */
5032 static void
5033 bfa_phy_stats_send(void *cbarg)
5034 {
5035 	struct bfa_phy_s *phy = cbarg;
5036 	struct bfi_phy_stats_req_s *msg =
5037 			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5038 
5039 	msg->instance = phy->instance;
5040 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5041 		bfa_ioc_portid(phy->ioc));
5042 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5043 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5044 }
5045 
5046 /*
5047  * Flash memory info API.
5048  *
5049  * @param[in] mincfg - minimal cfg variable
5050  */
5051 u32
5052 bfa_phy_meminfo(bfa_boolean_t mincfg)
5053 {
5054 	/* min driver doesn't need phy */
5055 	if (mincfg)
5056 		return 0;
5057 
5058 	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5059 }
5060 
5061 /*
5062  * Flash attach API.
5063  *
5064  * @param[in] phy - phy structure
5065  * @param[in] ioc  - ioc structure
5066  * @param[in] dev  - device structure
5067  * @param[in] trcmod - trace module
5068  * @param[in] logmod - log module
5069  */
5070 void
5071 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5072 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5073 {
5074 	phy->ioc = ioc;
5075 	phy->trcmod = trcmod;
5076 	phy->cbfn = NULL;
5077 	phy->cbarg = NULL;
5078 	phy->op_busy = 0;
5079 
5080 	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5081 	bfa_q_qe_init(&phy->ioc_notify);
5082 	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5083 	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5084 
5085 	/* min driver doesn't need phy */
5086 	if (mincfg) {
5087 		phy->dbuf_kva = NULL;
5088 		phy->dbuf_pa = 0;
5089 	}
5090 }
5091 
5092 /*
5093  * Claim memory for phy
5094  *
5095  * @param[in] phy - phy structure
5096  * @param[in] dm_kva - pointer to virtual memory address
5097  * @param[in] dm_pa - physical memory address
5098  * @param[in] mincfg - minimal cfg variable
5099  */
5100 void
5101 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5102 		bfa_boolean_t mincfg)
5103 {
5104 	if (mincfg)
5105 		return;
5106 
5107 	phy->dbuf_kva = dm_kva;
5108 	phy->dbuf_pa = dm_pa;
5109 	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5110 	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5111 	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5112 }
5113 
5114 bfa_boolean_t
5115 bfa_phy_busy(struct bfa_ioc_s *ioc)
5116 {
5117 	void __iomem	*rb;
5118 
5119 	rb = bfa_ioc_bar0(ioc);
5120 	return readl(rb + BFA_PHY_LOCK_STATUS);
5121 }
5122 
5123 /*
5124  * Get phy attribute.
5125  *
5126  * @param[in] phy - phy structure
5127  * @param[in] attr - phy attribute structure
5128  * @param[in] cbfn - callback function
5129  * @param[in] cbarg - callback argument
5130  *
5131  * Return status.
5132  */
5133 bfa_status_t
5134 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5135 		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5136 {
5137 	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5138 	bfa_trc(phy, instance);
5139 
5140 	if (!bfa_phy_present(phy))
5141 		return BFA_STATUS_PHY_NOT_PRESENT;
5142 
5143 	if (!bfa_ioc_is_operational(phy->ioc))
5144 		return BFA_STATUS_IOC_NON_OP;
5145 
5146 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5147 		bfa_trc(phy, phy->op_busy);
5148 		return BFA_STATUS_DEVBUSY;
5149 	}
5150 
5151 	phy->op_busy = 1;
5152 	phy->cbfn = cbfn;
5153 	phy->cbarg = cbarg;
5154 	phy->instance = instance;
5155 	phy->ubuf = (uint8_t *) attr;
5156 	bfa_phy_query_send(phy);
5157 
5158 	return BFA_STATUS_OK;
5159 }
5160 
5161 /*
5162  * Get phy stats.
5163  *
5164  * @param[in] phy - phy structure
5165  * @param[in] instance - phy image instance
5166  * @param[in] stats - pointer to phy stats
5167  * @param[in] cbfn - callback function
5168  * @param[in] cbarg - callback argument
5169  *
5170  * Return status.
5171  */
5172 bfa_status_t
5173 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5174 		struct bfa_phy_stats_s *stats,
5175 		bfa_cb_phy_t cbfn, void *cbarg)
5176 {
5177 	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5178 	bfa_trc(phy, instance);
5179 
5180 	if (!bfa_phy_present(phy))
5181 		return BFA_STATUS_PHY_NOT_PRESENT;
5182 
5183 	if (!bfa_ioc_is_operational(phy->ioc))
5184 		return BFA_STATUS_IOC_NON_OP;
5185 
5186 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5187 		bfa_trc(phy, phy->op_busy);
5188 		return BFA_STATUS_DEVBUSY;
5189 	}
5190 
5191 	phy->op_busy = 1;
5192 	phy->cbfn = cbfn;
5193 	phy->cbarg = cbarg;
5194 	phy->instance = instance;
5195 	phy->ubuf = (u8 *) stats;
5196 	bfa_phy_stats_send(phy);
5197 
5198 	return BFA_STATUS_OK;
5199 }
5200 
5201 /*
5202  * Update phy image.
5203  *
5204  * @param[in] phy - phy structure
5205  * @param[in] instance - phy image instance
5206  * @param[in] buf - update data buffer
5207  * @param[in] len - data buffer length
5208  * @param[in] offset - offset relative to starting address
5209  * @param[in] cbfn - callback function
5210  * @param[in] cbarg - callback argument
5211  *
5212  * Return status.
5213  */
5214 bfa_status_t
5215 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5216 		void *buf, u32 len, u32 offset,
5217 		bfa_cb_phy_t cbfn, void *cbarg)
5218 {
5219 	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5220 	bfa_trc(phy, instance);
5221 	bfa_trc(phy, len);
5222 	bfa_trc(phy, offset);
5223 
5224 	if (!bfa_phy_present(phy))
5225 		return BFA_STATUS_PHY_NOT_PRESENT;
5226 
5227 	if (!bfa_ioc_is_operational(phy->ioc))
5228 		return BFA_STATUS_IOC_NON_OP;
5229 
5230 	/* 'len' must be in word (4-byte) boundary */
5231 	if (!len || (len & 0x03))
5232 		return BFA_STATUS_FAILED;
5233 
5234 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5235 		bfa_trc(phy, phy->op_busy);
5236 		return BFA_STATUS_DEVBUSY;
5237 	}
5238 
5239 	phy->op_busy = 1;
5240 	phy->cbfn = cbfn;
5241 	phy->cbarg = cbarg;
5242 	phy->instance = instance;
5243 	phy->residue = len;
5244 	phy->offset = 0;
5245 	phy->addr_off = offset;
5246 	phy->ubuf = buf;
5247 
5248 	bfa_phy_write_send(phy);
5249 	return BFA_STATUS_OK;
5250 }
5251 
5252 /*
5253  * Read phy image.
5254  *
5255  * @param[in] phy - phy structure
5256  * @param[in] instance - phy image instance
5257  * @param[in] buf - read data buffer
5258  * @param[in] len - data buffer length
5259  * @param[in] offset - offset relative to starting address
5260  * @param[in] cbfn - callback function
5261  * @param[in] cbarg - callback argument
5262  *
5263  * Return status.
5264  */
5265 bfa_status_t
5266 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5267 		void *buf, u32 len, u32 offset,
5268 		bfa_cb_phy_t cbfn, void *cbarg)
5269 {
5270 	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5271 	bfa_trc(phy, instance);
5272 	bfa_trc(phy, len);
5273 	bfa_trc(phy, offset);
5274 
5275 	if (!bfa_phy_present(phy))
5276 		return BFA_STATUS_PHY_NOT_PRESENT;
5277 
5278 	if (!bfa_ioc_is_operational(phy->ioc))
5279 		return BFA_STATUS_IOC_NON_OP;
5280 
5281 	/* 'len' must be in word (4-byte) boundary */
5282 	if (!len || (len & 0x03))
5283 		return BFA_STATUS_FAILED;
5284 
5285 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5286 		bfa_trc(phy, phy->op_busy);
5287 		return BFA_STATUS_DEVBUSY;
5288 	}
5289 
5290 	phy->op_busy = 1;
5291 	phy->cbfn = cbfn;
5292 	phy->cbarg = cbarg;
5293 	phy->instance = instance;
5294 	phy->residue = len;
5295 	phy->offset = 0;
5296 	phy->addr_off = offset;
5297 	phy->ubuf = buf;
5298 	bfa_phy_read_send(phy);
5299 
5300 	return BFA_STATUS_OK;
5301 }
5302 
5303 /*
5304  * Process phy response messages upon receiving interrupts.
5305  *
5306  * @param[in] phyarg - phy structure
5307  * @param[in] msg - message structure
5308  */
5309 void
5310 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5311 {
5312 	struct bfa_phy_s *phy = phyarg;
5313 	u32	status;
5314 
5315 	union {
5316 		struct bfi_phy_query_rsp_s *query;
5317 		struct bfi_phy_stats_rsp_s *stats;
5318 		struct bfi_phy_write_rsp_s *write;
5319 		struct bfi_phy_read_rsp_s *read;
5320 		struct bfi_mbmsg_s   *msg;
5321 	} m;
5322 
5323 	m.msg = msg;
5324 	bfa_trc(phy, msg->mh.msg_id);
5325 
5326 	if (!phy->op_busy) {
5327 		/* receiving response after ioc failure */
5328 		bfa_trc(phy, 0x9999);
5329 		return;
5330 	}
5331 
5332 	switch (msg->mh.msg_id) {
5333 	case BFI_PHY_I2H_QUERY_RSP:
5334 		status = be32_to_cpu(m.query->status);
5335 		bfa_trc(phy, status);
5336 
5337 		if (status == BFA_STATUS_OK) {
5338 			struct bfa_phy_attr_s *attr =
5339 				(struct bfa_phy_attr_s *) phy->ubuf;
5340 			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5341 					sizeof(struct bfa_phy_attr_s));
5342 			bfa_trc(phy, attr->status);
5343 			bfa_trc(phy, attr->length);
5344 		}
5345 
5346 		phy->status = status;
5347 		phy->op_busy = 0;
5348 		if (phy->cbfn)
5349 			phy->cbfn(phy->cbarg, phy->status);
5350 		break;
5351 	case BFI_PHY_I2H_STATS_RSP:
5352 		status = be32_to_cpu(m.stats->status);
5353 		bfa_trc(phy, status);
5354 
5355 		if (status == BFA_STATUS_OK) {
5356 			struct bfa_phy_stats_s *stats =
5357 				(struct bfa_phy_stats_s *) phy->ubuf;
5358 			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5359 				sizeof(struct bfa_phy_stats_s));
5360 				bfa_trc(phy, stats->status);
5361 		}
5362 
5363 		phy->status = status;
5364 		phy->op_busy = 0;
5365 		if (phy->cbfn)
5366 			phy->cbfn(phy->cbarg, phy->status);
5367 		break;
5368 	case BFI_PHY_I2H_WRITE_RSP:
5369 		status = be32_to_cpu(m.write->status);
5370 		bfa_trc(phy, status);
5371 
5372 		if (status != BFA_STATUS_OK || phy->residue == 0) {
5373 			phy->status = status;
5374 			phy->op_busy = 0;
5375 			if (phy->cbfn)
5376 				phy->cbfn(phy->cbarg, phy->status);
5377 		} else {
5378 			bfa_trc(phy, phy->offset);
5379 			bfa_phy_write_send(phy);
5380 		}
5381 		break;
5382 	case BFI_PHY_I2H_READ_RSP:
5383 		status = be32_to_cpu(m.read->status);
5384 		bfa_trc(phy, status);
5385 
5386 		if (status != BFA_STATUS_OK) {
5387 			phy->status = status;
5388 			phy->op_busy = 0;
5389 			if (phy->cbfn)
5390 				phy->cbfn(phy->cbarg, phy->status);
5391 		} else {
5392 			u32 len = be32_to_cpu(m.read->length);
5393 			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5394 			u16 *dbuf = (u16 *)phy->dbuf_kva;
5395 			int i, sz = len >> 1;
5396 
5397 			bfa_trc(phy, phy->offset);
5398 			bfa_trc(phy, len);
5399 
5400 			for (i = 0; i < sz; i++)
5401 				buf[i] = be16_to_cpu(dbuf[i]);
5402 
5403 			phy->residue -= len;
5404 			phy->offset += len;
5405 
5406 			if (phy->residue == 0) {
5407 				phy->status = status;
5408 				phy->op_busy = 0;
5409 				if (phy->cbfn)
5410 					phy->cbfn(phy->cbarg, phy->status);
5411 			} else
5412 				bfa_phy_read_send(phy);
5413 		}
5414 		break;
5415 	default:
5416 		WARN_ON(1);
5417 	}
5418 }
5419