xref: /openbmc/linux/drivers/scsi/bfa/bfa_ioc.c (revision 9b93eb47)
1 /*
2  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3  * Copyright (c) 2014- QLogic Corporation.
4  * All rights reserved
5  * www.qlogic.com
6  *
7  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License (GPL) Version 2 as
11  * published by the Free Software Foundation
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfa_ioc.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
24 #include "bfa_defs_svc.h"
25 #include "bfi.h"
26 
27 BFA_TRC_FILE(CNA, IOC);
28 
29 /*
30  * IOC local definitions
31  */
32 #define BFA_IOC_TOV		3000	/* msecs */
33 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
34 #define BFA_IOC_HB_TOV		500	/* msecs */
35 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
36 #define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
37 
38 #define bfa_ioc_timer_start(__ioc)					\
39 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
40 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
41 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
42 
43 #define bfa_hb_timer_start(__ioc)					\
44 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
45 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
46 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
47 
48 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
49 
50 #define bfa_ioc_state_disabled(__sm)		\
51 	(((__sm) == BFI_IOC_UNINIT) ||		\
52 	((__sm) == BFI_IOC_INITING) ||		\
53 	((__sm) == BFI_IOC_HWINIT) ||		\
54 	((__sm) == BFI_IOC_DISABLED) ||		\
55 	((__sm) == BFI_IOC_FAIL) ||		\
56 	((__sm) == BFI_IOC_CFG_DISABLED))
57 
58 /*
59  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
60  */
61 
62 #define bfa_ioc_firmware_lock(__ioc)			\
63 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
64 #define bfa_ioc_firmware_unlock(__ioc)			\
65 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
66 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68 #define bfa_ioc_notify_fail(__ioc)              \
69 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
70 #define bfa_ioc_sync_start(__ioc)               \
71 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
72 #define bfa_ioc_sync_join(__ioc)                \
73 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
74 #define bfa_ioc_sync_leave(__ioc)               \
75 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
76 #define bfa_ioc_sync_ack(__ioc)                 \
77 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
78 #define bfa_ioc_sync_complete(__ioc)            \
79 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
80 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
81 			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
82 #define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
83 			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
84 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
85 		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
86 #define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
87 			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
88 
89 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
90 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
91 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
92 
93 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
94 
95 /*
96  * forward declarations
97  */
98 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
99 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
100 static void bfa_ioc_timeout(void *ioc);
101 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
102 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
103 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
104 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
105 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
106 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
107 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
108 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
109 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
110 				enum bfa_ioc_event_e event);
111 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
112 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
113 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
114 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
115 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
116 				struct bfi_ioc_image_hdr_s *base_fwhdr,
117 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
118 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
119 				struct bfa_ioc_s *ioc,
120 				struct bfi_ioc_image_hdr_s *base_fwhdr);
121 
122 /*
123  * IOC state machine definitions/declarations
124  */
125 enum ioc_event {
126 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
127 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
128 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
129 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
130 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
131 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
132 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
133 	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
134 	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
135 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
136 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
137 	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
138 };
139 
140 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
141 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
142 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
143 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
144 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
145 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
146 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
147 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
148 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
149 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
150 
151 static struct bfa_sm_table_s ioc_sm_table[] = {
152 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
153 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
154 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
155 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
156 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
157 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
158 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
159 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
160 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
161 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
162 };
163 
164 /*
165  * IOCPF state machine definitions/declarations
166  */
167 
168 #define bfa_iocpf_timer_start(__ioc)					\
169 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
170 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
171 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
172 
173 #define bfa_iocpf_poll_timer_start(__ioc)				\
174 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
175 			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
176 
177 #define bfa_sem_timer_start(__ioc)					\
178 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
179 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
180 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
181 
182 /*
183  * Forward declareations for iocpf state machine
184  */
185 static void bfa_iocpf_timeout(void *ioc_arg);
186 static void bfa_iocpf_sem_timeout(void *ioc_arg);
187 static void bfa_iocpf_poll_timeout(void *ioc_arg);
188 
189 /*
190  * IOCPF state machine events
191  */
192 enum iocpf_event {
193 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
194 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
195 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
196 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
197 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
198 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
199 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
200 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
201 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
202 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
203 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
204 	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
205 };
206 
207 /*
208  * IOCPF states
209  */
210 enum bfa_iocpf_state {
211 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
212 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
213 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
214 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
215 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
216 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
217 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
218 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
219 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
220 };
221 
222 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
223 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
224 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
225 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
226 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
227 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
228 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
229 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
230 						enum iocpf_event);
231 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
232 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
233 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
234 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
235 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
236 						enum iocpf_event);
237 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
238 
239 static struct bfa_sm_table_s iocpf_sm_table[] = {
240 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
241 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
242 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
243 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
244 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
245 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
246 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
247 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
248 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
249 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
250 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
251 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
252 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
253 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
254 };
255 
256 /*
257  * IOC State Machine
258  */
259 
260 /*
261  * Beginning state. IOC uninit state.
262  */
263 
264 static void
265 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
266 {
267 }
268 
269 /*
270  * IOC is in uninit state.
271  */
272 static void
273 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
274 {
275 	bfa_trc(ioc, event);
276 
277 	switch (event) {
278 	case IOC_E_RESET:
279 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
280 		break;
281 
282 	default:
283 		bfa_sm_fault(ioc, event);
284 	}
285 }
286 /*
287  * Reset entry actions -- initialize state machine
288  */
289 static void
290 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
291 {
292 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
293 }
294 
295 /*
296  * IOC is in reset state.
297  */
298 static void
299 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
300 {
301 	bfa_trc(ioc, event);
302 
303 	switch (event) {
304 	case IOC_E_ENABLE:
305 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
306 		break;
307 
308 	case IOC_E_DISABLE:
309 		bfa_ioc_disable_comp(ioc);
310 		break;
311 
312 	case IOC_E_DETACH:
313 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
314 		break;
315 
316 	default:
317 		bfa_sm_fault(ioc, event);
318 	}
319 }
320 
321 
322 static void
323 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
324 {
325 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
326 }
327 
328 /*
329  * Host IOC function is being enabled, awaiting response from firmware.
330  * Semaphore is acquired.
331  */
332 static void
333 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
334 {
335 	bfa_trc(ioc, event);
336 
337 	switch (event) {
338 	case IOC_E_ENABLED:
339 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
340 		break;
341 
342 	case IOC_E_PFFAILED:
343 		/* !!! fall through !!! */
344 	case IOC_E_HWERROR:
345 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
346 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
347 		if (event != IOC_E_PFFAILED)
348 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
349 		break;
350 
351 	case IOC_E_HWFAILED:
352 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
353 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
354 		break;
355 
356 	case IOC_E_DISABLE:
357 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
358 		break;
359 
360 	case IOC_E_DETACH:
361 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
362 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
363 		break;
364 
365 	case IOC_E_ENABLE:
366 		break;
367 
368 	default:
369 		bfa_sm_fault(ioc, event);
370 	}
371 }
372 
373 
374 static void
375 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
376 {
377 	bfa_ioc_timer_start(ioc);
378 	bfa_ioc_send_getattr(ioc);
379 }
380 
381 /*
382  * IOC configuration in progress. Timer is active.
383  */
384 static void
385 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
386 {
387 	bfa_trc(ioc, event);
388 
389 	switch (event) {
390 	case IOC_E_FWRSP_GETATTR:
391 		bfa_ioc_timer_stop(ioc);
392 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
393 		break;
394 
395 	case IOC_E_PFFAILED:
396 	case IOC_E_HWERROR:
397 		bfa_ioc_timer_stop(ioc);
398 		/* !!! fall through !!! */
399 	case IOC_E_TIMEOUT:
400 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
401 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
402 		if (event != IOC_E_PFFAILED)
403 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
404 		break;
405 
406 	case IOC_E_DISABLE:
407 		bfa_ioc_timer_stop(ioc);
408 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
409 		break;
410 
411 	case IOC_E_ENABLE:
412 		break;
413 
414 	default:
415 		bfa_sm_fault(ioc, event);
416 	}
417 }
418 
419 static void
420 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
421 {
422 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
423 
424 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
425 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
426 	bfa_ioc_hb_monitor(ioc);
427 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
428 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
429 }
430 
431 static void
432 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
433 {
434 	bfa_trc(ioc, event);
435 
436 	switch (event) {
437 	case IOC_E_ENABLE:
438 		break;
439 
440 	case IOC_E_DISABLE:
441 		bfa_hb_timer_stop(ioc);
442 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 		break;
444 
445 	case IOC_E_PFFAILED:
446 	case IOC_E_HWERROR:
447 		bfa_hb_timer_stop(ioc);
448 		/* !!! fall through !!! */
449 	case IOC_E_HBFAIL:
450 		if (ioc->iocpf.auto_recover)
451 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
452 		else
453 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
454 
455 		bfa_ioc_fail_notify(ioc);
456 
457 		if (event != IOC_E_PFFAILED)
458 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
459 		break;
460 
461 	default:
462 		bfa_sm_fault(ioc, event);
463 	}
464 }
465 
466 
467 static void
468 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
469 {
470 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
471 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
472 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
473 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
474 }
475 
476 /*
477  * IOC is being disabled
478  */
479 static void
480 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
481 {
482 	bfa_trc(ioc, event);
483 
484 	switch (event) {
485 	case IOC_E_DISABLED:
486 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
487 		break;
488 
489 	case IOC_E_HWERROR:
490 		/*
491 		 * No state change.  Will move to disabled state
492 		 * after iocpf sm completes failure processing and
493 		 * moves to disabled state.
494 		 */
495 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
496 		break;
497 
498 	case IOC_E_HWFAILED:
499 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
500 		bfa_ioc_disable_comp(ioc);
501 		break;
502 
503 	default:
504 		bfa_sm_fault(ioc, event);
505 	}
506 }
507 
508 /*
509  * IOC disable completion entry.
510  */
511 static void
512 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
513 {
514 	bfa_ioc_disable_comp(ioc);
515 }
516 
517 static void
518 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
519 {
520 	bfa_trc(ioc, event);
521 
522 	switch (event) {
523 	case IOC_E_ENABLE:
524 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
525 		break;
526 
527 	case IOC_E_DISABLE:
528 		ioc->cbfn->disable_cbfn(ioc->bfa);
529 		break;
530 
531 	case IOC_E_DETACH:
532 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
533 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
534 		break;
535 
536 	default:
537 		bfa_sm_fault(ioc, event);
538 	}
539 }
540 
541 
542 static void
543 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
544 {
545 	bfa_trc(ioc, 0);
546 }
547 
548 /*
549  * Hardware initialization retry.
550  */
551 static void
552 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
553 {
554 	bfa_trc(ioc, event);
555 
556 	switch (event) {
557 	case IOC_E_ENABLED:
558 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
559 		break;
560 
561 	case IOC_E_PFFAILED:
562 	case IOC_E_HWERROR:
563 		/*
564 		 * Initialization retry failed.
565 		 */
566 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
567 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
568 		if (event != IOC_E_PFFAILED)
569 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
570 		break;
571 
572 	case IOC_E_HWFAILED:
573 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
574 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
575 		break;
576 
577 	case IOC_E_ENABLE:
578 		break;
579 
580 	case IOC_E_DISABLE:
581 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
582 		break;
583 
584 	case IOC_E_DETACH:
585 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
586 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
587 		break;
588 
589 	default:
590 		bfa_sm_fault(ioc, event);
591 	}
592 }
593 
594 
595 static void
596 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
597 {
598 	bfa_trc(ioc, 0);
599 }
600 
601 /*
602  * IOC failure.
603  */
604 static void
605 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
606 {
607 	bfa_trc(ioc, event);
608 
609 	switch (event) {
610 
611 	case IOC_E_ENABLE:
612 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
613 		break;
614 
615 	case IOC_E_DISABLE:
616 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
617 		break;
618 
619 	case IOC_E_DETACH:
620 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
621 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
622 		break;
623 
624 	case IOC_E_HWERROR:
625 	case IOC_E_HWFAILED:
626 		/*
627 		 * HB failure / HW error notification, ignore.
628 		 */
629 		break;
630 	default:
631 		bfa_sm_fault(ioc, event);
632 	}
633 }
634 
635 static void
636 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
637 {
638 	bfa_trc(ioc, 0);
639 }
640 
641 static void
642 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
643 {
644 	bfa_trc(ioc, event);
645 
646 	switch (event) {
647 	case IOC_E_ENABLE:
648 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
649 		break;
650 
651 	case IOC_E_DISABLE:
652 		ioc->cbfn->disable_cbfn(ioc->bfa);
653 		break;
654 
655 	case IOC_E_DETACH:
656 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
657 		break;
658 
659 	case IOC_E_HWERROR:
660 		/* Ignore - already in hwfail state */
661 		break;
662 
663 	default:
664 		bfa_sm_fault(ioc, event);
665 	}
666 }
667 
668 /*
669  * IOCPF State Machine
670  */
671 
672 /*
673  * Reset entry actions -- initialize state machine
674  */
675 static void
676 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
677 {
678 	iocpf->fw_mismatch_notified = BFA_FALSE;
679 	iocpf->auto_recover = bfa_auto_recover;
680 }
681 
682 /*
683  * Beginning state. IOC is in reset state.
684  */
685 static void
686 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
687 {
688 	struct bfa_ioc_s *ioc = iocpf->ioc;
689 
690 	bfa_trc(ioc, event);
691 
692 	switch (event) {
693 	case IOCPF_E_ENABLE:
694 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
695 		break;
696 
697 	case IOCPF_E_STOP:
698 		break;
699 
700 	default:
701 		bfa_sm_fault(ioc, event);
702 	}
703 }
704 
705 /*
706  * Semaphore should be acquired for version check.
707  */
708 static void
709 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
710 {
711 	struct bfi_ioc_image_hdr_s	fwhdr;
712 	u32	r32, fwstate, pgnum, pgoff, loff = 0;
713 	int	i;
714 
715 	/*
716 	 * Spin on init semaphore to serialize.
717 	 */
718 	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
719 	while (r32 & 0x1) {
720 		udelay(20);
721 		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
722 	}
723 
724 	/* h/w sem init */
725 	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
726 	if (fwstate == BFI_IOC_UNINIT) {
727 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
728 		goto sem_get;
729 	}
730 
731 	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
732 
733 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
734 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
735 		goto sem_get;
736 	}
737 
738 	/*
739 	 * Clear fwver hdr
740 	 */
741 	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
742 	pgoff = PSS_SMEM_PGOFF(loff);
743 	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
744 
745 	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
746 		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
747 		loff += sizeof(u32);
748 	}
749 
750 	bfa_trc(iocpf->ioc, fwstate);
751 	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
752 	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
753 	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
754 
755 	/*
756 	 * Unlock the hw semaphore. Should be here only once per boot.
757 	 */
758 	bfa_ioc_ownership_reset(iocpf->ioc);
759 
760 	/*
761 	 * unlock init semaphore.
762 	 */
763 	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
764 
765 sem_get:
766 	bfa_ioc_hw_sem_get(iocpf->ioc);
767 }
768 
769 /*
770  * Awaiting h/w semaphore to continue with version check.
771  */
772 static void
773 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
774 {
775 	struct bfa_ioc_s *ioc = iocpf->ioc;
776 
777 	bfa_trc(ioc, event);
778 
779 	switch (event) {
780 	case IOCPF_E_SEMLOCKED:
781 		if (bfa_ioc_firmware_lock(ioc)) {
782 			if (bfa_ioc_sync_start(ioc)) {
783 				bfa_ioc_sync_join(ioc);
784 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
785 			} else {
786 				bfa_ioc_firmware_unlock(ioc);
787 				writel(1, ioc->ioc_regs.ioc_sem_reg);
788 				bfa_sem_timer_start(ioc);
789 			}
790 		} else {
791 			writel(1, ioc->ioc_regs.ioc_sem_reg);
792 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
793 		}
794 		break;
795 
796 	case IOCPF_E_SEM_ERROR:
797 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
798 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
799 		break;
800 
801 	case IOCPF_E_DISABLE:
802 		bfa_sem_timer_stop(ioc);
803 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
804 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
805 		break;
806 
807 	case IOCPF_E_STOP:
808 		bfa_sem_timer_stop(ioc);
809 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
810 		break;
811 
812 	default:
813 		bfa_sm_fault(ioc, event);
814 	}
815 }
816 
817 /*
818  * Notify enable completion callback.
819  */
820 static void
821 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
822 {
823 	/*
824 	 * Call only the first time sm enters fwmismatch state.
825 	 */
826 	if (iocpf->fw_mismatch_notified == BFA_FALSE)
827 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
828 
829 	iocpf->fw_mismatch_notified = BFA_TRUE;
830 	bfa_iocpf_timer_start(iocpf->ioc);
831 }
832 
833 /*
834  * Awaiting firmware version match.
835  */
836 static void
837 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
838 {
839 	struct bfa_ioc_s *ioc = iocpf->ioc;
840 
841 	bfa_trc(ioc, event);
842 
843 	switch (event) {
844 	case IOCPF_E_TIMEOUT:
845 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
846 		break;
847 
848 	case IOCPF_E_DISABLE:
849 		bfa_iocpf_timer_stop(ioc);
850 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
851 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
852 		break;
853 
854 	case IOCPF_E_STOP:
855 		bfa_iocpf_timer_stop(ioc);
856 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
857 		break;
858 
859 	default:
860 		bfa_sm_fault(ioc, event);
861 	}
862 }
863 
864 /*
865  * Request for semaphore.
866  */
867 static void
868 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
869 {
870 	bfa_ioc_hw_sem_get(iocpf->ioc);
871 }
872 
873 /*
874  * Awaiting semaphore for h/w initialzation.
875  */
876 static void
877 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
878 {
879 	struct bfa_ioc_s *ioc = iocpf->ioc;
880 
881 	bfa_trc(ioc, event);
882 
883 	switch (event) {
884 	case IOCPF_E_SEMLOCKED:
885 		if (bfa_ioc_sync_complete(ioc)) {
886 			bfa_ioc_sync_join(ioc);
887 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
888 		} else {
889 			writel(1, ioc->ioc_regs.ioc_sem_reg);
890 			bfa_sem_timer_start(ioc);
891 		}
892 		break;
893 
894 	case IOCPF_E_SEM_ERROR:
895 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
896 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
897 		break;
898 
899 	case IOCPF_E_DISABLE:
900 		bfa_sem_timer_stop(ioc);
901 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
902 		break;
903 
904 	default:
905 		bfa_sm_fault(ioc, event);
906 	}
907 }
908 
909 static void
910 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
911 {
912 	iocpf->poll_time = 0;
913 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
914 }
915 
916 /*
917  * Hardware is being initialized. Interrupts are enabled.
918  * Holding hardware semaphore lock.
919  */
920 static void
921 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
922 {
923 	struct bfa_ioc_s *ioc = iocpf->ioc;
924 
925 	bfa_trc(ioc, event);
926 
927 	switch (event) {
928 	case IOCPF_E_FWREADY:
929 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
930 		break;
931 
932 	case IOCPF_E_TIMEOUT:
933 		writel(1, ioc->ioc_regs.ioc_sem_reg);
934 		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
935 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
936 		break;
937 
938 	case IOCPF_E_DISABLE:
939 		bfa_iocpf_timer_stop(ioc);
940 		bfa_ioc_sync_leave(ioc);
941 		writel(1, ioc->ioc_regs.ioc_sem_reg);
942 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
943 		break;
944 
945 	default:
946 		bfa_sm_fault(ioc, event);
947 	}
948 }
949 
950 static void
951 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
952 {
953 	bfa_iocpf_timer_start(iocpf->ioc);
954 	/*
955 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
956 	 */
957 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
958 	bfa_ioc_send_enable(iocpf->ioc);
959 }
960 
961 /*
962  * Host IOC function is being enabled, awaiting response from firmware.
963  * Semaphore is acquired.
964  */
965 static void
966 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
967 {
968 	struct bfa_ioc_s *ioc = iocpf->ioc;
969 
970 	bfa_trc(ioc, event);
971 
972 	switch (event) {
973 	case IOCPF_E_FWRSP_ENABLE:
974 		bfa_iocpf_timer_stop(ioc);
975 		writel(1, ioc->ioc_regs.ioc_sem_reg);
976 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
977 		break;
978 
979 	case IOCPF_E_INITFAIL:
980 		bfa_iocpf_timer_stop(ioc);
981 		/* fall through */
982 
983 	case IOCPF_E_TIMEOUT:
984 		writel(1, ioc->ioc_regs.ioc_sem_reg);
985 		if (event == IOCPF_E_TIMEOUT)
986 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
987 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
988 		break;
989 
990 	case IOCPF_E_DISABLE:
991 		bfa_iocpf_timer_stop(ioc);
992 		writel(1, ioc->ioc_regs.ioc_sem_reg);
993 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
994 		break;
995 
996 	default:
997 		bfa_sm_fault(ioc, event);
998 	}
999 }
1000 
1001 static void
1002 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1003 {
1004 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1005 }
1006 
1007 static void
1008 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1009 {
1010 	struct bfa_ioc_s *ioc = iocpf->ioc;
1011 
1012 	bfa_trc(ioc, event);
1013 
1014 	switch (event) {
1015 	case IOCPF_E_DISABLE:
1016 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1017 		break;
1018 
1019 	case IOCPF_E_GETATTRFAIL:
1020 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1021 		break;
1022 
1023 	case IOCPF_E_FAIL:
1024 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1025 		break;
1026 
1027 	default:
1028 		bfa_sm_fault(ioc, event);
1029 	}
1030 }
1031 
1032 static void
1033 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1034 {
1035 	bfa_iocpf_timer_start(iocpf->ioc);
1036 	bfa_ioc_send_disable(iocpf->ioc);
1037 }
1038 
1039 /*
1040  * IOC is being disabled
1041  */
1042 static void
1043 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1044 {
1045 	struct bfa_ioc_s *ioc = iocpf->ioc;
1046 
1047 	bfa_trc(ioc, event);
1048 
1049 	switch (event) {
1050 	case IOCPF_E_FWRSP_DISABLE:
1051 		bfa_iocpf_timer_stop(ioc);
1052 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1053 		break;
1054 
1055 	case IOCPF_E_FAIL:
1056 		bfa_iocpf_timer_stop(ioc);
1057 		/* fall through */
1058 
1059 	case IOCPF_E_TIMEOUT:
1060 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1061 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1062 		break;
1063 
1064 	case IOCPF_E_FWRSP_ENABLE:
1065 		break;
1066 
1067 	default:
1068 		bfa_sm_fault(ioc, event);
1069 	}
1070 }
1071 
1072 static void
1073 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1074 {
1075 	bfa_ioc_hw_sem_get(iocpf->ioc);
1076 }
1077 
1078 /*
1079  * IOC hb ack request is being removed.
1080  */
1081 static void
1082 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1083 {
1084 	struct bfa_ioc_s *ioc = iocpf->ioc;
1085 
1086 	bfa_trc(ioc, event);
1087 
1088 	switch (event) {
1089 	case IOCPF_E_SEMLOCKED:
1090 		bfa_ioc_sync_leave(ioc);
1091 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1092 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1093 		break;
1094 
1095 	case IOCPF_E_SEM_ERROR:
1096 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1097 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1098 		break;
1099 
1100 	case IOCPF_E_FAIL:
1101 		break;
1102 
1103 	default:
1104 		bfa_sm_fault(ioc, event);
1105 	}
1106 }
1107 
1108 /*
1109  * IOC disable completion entry.
1110  */
1111 static void
1112 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1113 {
1114 	bfa_ioc_mbox_flush(iocpf->ioc);
1115 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1116 }
1117 
1118 static void
1119 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1120 {
1121 	struct bfa_ioc_s *ioc = iocpf->ioc;
1122 
1123 	bfa_trc(ioc, event);
1124 
1125 	switch (event) {
1126 	case IOCPF_E_ENABLE:
1127 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1128 		break;
1129 
1130 	case IOCPF_E_STOP:
1131 		bfa_ioc_firmware_unlock(ioc);
1132 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1133 		break;
1134 
1135 	default:
1136 		bfa_sm_fault(ioc, event);
1137 	}
1138 }
1139 
1140 static void
1141 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1142 {
1143 	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1144 	bfa_ioc_hw_sem_get(iocpf->ioc);
1145 }
1146 
1147 /*
1148  * Hardware initialization failed.
1149  */
1150 static void
1151 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1152 {
1153 	struct bfa_ioc_s *ioc = iocpf->ioc;
1154 
1155 	bfa_trc(ioc, event);
1156 
1157 	switch (event) {
1158 	case IOCPF_E_SEMLOCKED:
1159 		bfa_ioc_notify_fail(ioc);
1160 		bfa_ioc_sync_leave(ioc);
1161 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1162 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1163 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1164 		break;
1165 
1166 	case IOCPF_E_SEM_ERROR:
1167 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1168 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1169 		break;
1170 
1171 	case IOCPF_E_DISABLE:
1172 		bfa_sem_timer_stop(ioc);
1173 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1174 		break;
1175 
1176 	case IOCPF_E_STOP:
1177 		bfa_sem_timer_stop(ioc);
1178 		bfa_ioc_firmware_unlock(ioc);
1179 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1180 		break;
1181 
1182 	case IOCPF_E_FAIL:
1183 		break;
1184 
1185 	default:
1186 		bfa_sm_fault(ioc, event);
1187 	}
1188 }
1189 
1190 static void
1191 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1192 {
1193 	bfa_trc(iocpf->ioc, 0);
1194 }
1195 
1196 /*
1197  * Hardware initialization failed.
1198  */
1199 static void
1200 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1201 {
1202 	struct bfa_ioc_s *ioc = iocpf->ioc;
1203 
1204 	bfa_trc(ioc, event);
1205 
1206 	switch (event) {
1207 	case IOCPF_E_DISABLE:
1208 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1209 		break;
1210 
1211 	case IOCPF_E_STOP:
1212 		bfa_ioc_firmware_unlock(ioc);
1213 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1214 		break;
1215 
1216 	default:
1217 		bfa_sm_fault(ioc, event);
1218 	}
1219 }
1220 
1221 static void
1222 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1223 {
1224 	/*
1225 	 * Mark IOC as failed in hardware and stop firmware.
1226 	 */
1227 	bfa_ioc_lpu_stop(iocpf->ioc);
1228 
1229 	/*
1230 	 * Flush any queued up mailbox requests.
1231 	 */
1232 	bfa_ioc_mbox_flush(iocpf->ioc);
1233 
1234 	bfa_ioc_hw_sem_get(iocpf->ioc);
1235 }
1236 
1237 static void
1238 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1239 {
1240 	struct bfa_ioc_s *ioc = iocpf->ioc;
1241 
1242 	bfa_trc(ioc, event);
1243 
1244 	switch (event) {
1245 	case IOCPF_E_SEMLOCKED:
1246 		bfa_ioc_sync_ack(ioc);
1247 		bfa_ioc_notify_fail(ioc);
1248 		if (!iocpf->auto_recover) {
1249 			bfa_ioc_sync_leave(ioc);
1250 			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1251 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1252 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1253 		} else {
1254 			if (bfa_ioc_sync_complete(ioc))
1255 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1256 			else {
1257 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1258 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1259 			}
1260 		}
1261 		break;
1262 
1263 	case IOCPF_E_SEM_ERROR:
1264 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1265 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1266 		break;
1267 
1268 	case IOCPF_E_DISABLE:
1269 		bfa_sem_timer_stop(ioc);
1270 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1271 		break;
1272 
1273 	case IOCPF_E_FAIL:
1274 		break;
1275 
1276 	default:
1277 		bfa_sm_fault(ioc, event);
1278 	}
1279 }
1280 
1281 static void
1282 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1283 {
1284 	bfa_trc(iocpf->ioc, 0);
1285 }
1286 
1287 /*
1288  * IOC is in failed state.
1289  */
1290 static void
1291 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1292 {
1293 	struct bfa_ioc_s *ioc = iocpf->ioc;
1294 
1295 	bfa_trc(ioc, event);
1296 
1297 	switch (event) {
1298 	case IOCPF_E_DISABLE:
1299 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1300 		break;
1301 
1302 	default:
1303 		bfa_sm_fault(ioc, event);
1304 	}
1305 }
1306 
1307 /*
1308  *  BFA IOC private functions
1309  */
1310 
1311 /*
1312  * Notify common modules registered for notification.
1313  */
1314 static void
1315 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1316 {
1317 	struct bfa_ioc_notify_s	*notify;
1318 	struct list_head	*qe;
1319 
1320 	list_for_each(qe, &ioc->notify_q) {
1321 		notify = (struct bfa_ioc_notify_s *)qe;
1322 		notify->cbfn(notify->cbarg, event);
1323 	}
1324 }
1325 
1326 static void
1327 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1328 {
1329 	ioc->cbfn->disable_cbfn(ioc->bfa);
1330 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1331 }
1332 
1333 bfa_boolean_t
1334 bfa_ioc_sem_get(void __iomem *sem_reg)
1335 {
1336 	u32 r32;
1337 	int cnt = 0;
1338 #define BFA_SEM_SPINCNT	3000
1339 
1340 	r32 = readl(sem_reg);
1341 
1342 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1343 		cnt++;
1344 		udelay(2);
1345 		r32 = readl(sem_reg);
1346 	}
1347 
1348 	if (!(r32 & 1))
1349 		return BFA_TRUE;
1350 
1351 	return BFA_FALSE;
1352 }
1353 
1354 static void
1355 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1356 {
1357 	u32	r32;
1358 
1359 	/*
1360 	 * First read to the semaphore register will return 0, subsequent reads
1361 	 * will return 1. Semaphore is released by writing 1 to the register
1362 	 */
1363 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1364 	if (r32 == ~0) {
1365 		WARN_ON(r32 == ~0);
1366 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1367 		return;
1368 	}
1369 	if (!(r32 & 1)) {
1370 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1371 		return;
1372 	}
1373 
1374 	bfa_sem_timer_start(ioc);
1375 }
1376 
1377 /*
1378  * Initialize LPU local memory (aka secondary memory / SRAM)
1379  */
1380 static void
1381 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1382 {
1383 	u32	pss_ctl;
1384 	int		i;
1385 #define PSS_LMEM_INIT_TIME  10000
1386 
1387 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1388 	pss_ctl &= ~__PSS_LMEM_RESET;
1389 	pss_ctl |= __PSS_LMEM_INIT_EN;
1390 
1391 	/*
1392 	 * i2c workaround 12.5khz clock
1393 	 */
1394 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1395 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1396 
1397 	/*
1398 	 * wait for memory initialization to be complete
1399 	 */
1400 	i = 0;
1401 	do {
1402 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1403 		i++;
1404 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1405 
1406 	/*
1407 	 * If memory initialization is not successful, IOC timeout will catch
1408 	 * such failures.
1409 	 */
1410 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1411 	bfa_trc(ioc, pss_ctl);
1412 
1413 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1414 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1415 }
1416 
1417 static void
1418 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1419 {
1420 	u32	pss_ctl;
1421 
1422 	/*
1423 	 * Take processor out of reset.
1424 	 */
1425 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1426 	pss_ctl &= ~__PSS_LPU0_RESET;
1427 
1428 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1429 }
1430 
1431 static void
1432 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1433 {
1434 	u32	pss_ctl;
1435 
1436 	/*
1437 	 * Put processors in reset.
1438 	 */
1439 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1440 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1441 
1442 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1443 }
1444 
1445 /*
1446  * Get driver and firmware versions.
1447  */
1448 void
1449 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1450 {
1451 	u32	pgnum, pgoff;
1452 	u32	loff = 0;
1453 	int		i;
1454 	u32	*fwsig = (u32 *) fwhdr;
1455 
1456 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1457 	pgoff = PSS_SMEM_PGOFF(loff);
1458 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1459 
1460 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1461 	     i++) {
1462 		fwsig[i] =
1463 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1464 		loff += sizeof(u32);
1465 	}
1466 }
1467 
1468 /*
1469  * Returns TRUE if driver is willing to work with current smem f/w version.
1470  */
1471 bfa_boolean_t
1472 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1473 		struct bfi_ioc_image_hdr_s *smem_fwhdr)
1474 {
1475 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1476 	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1477 
1478 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1479 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1480 
1481 	/*
1482 	 * If smem is incompatible or old, driver should not work with it.
1483 	 */
1484 	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1485 	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1486 		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1487 		return BFA_FALSE;
1488 	}
1489 
1490 	/*
1491 	 * IF Flash has a better F/W than smem do not work with smem.
1492 	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1493 	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1494 	 */
1495 	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1496 
1497 	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1498 		return BFA_FALSE;
1499 	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1500 		return BFA_TRUE;
1501 	} else {
1502 		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1503 			BFA_TRUE : BFA_FALSE;
1504 	}
1505 }
1506 
1507 /*
1508  * Return true if current running version is valid. Firmware signature and
1509  * execution context (driver/bios) must match.
1510  */
1511 static bfa_boolean_t
1512 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1513 {
1514 	struct bfi_ioc_image_hdr_s fwhdr;
1515 
1516 	bfa_ioc_fwver_get(ioc, &fwhdr);
1517 
1518 	if (swab32(fwhdr.bootenv) != boot_env) {
1519 		bfa_trc(ioc, fwhdr.bootenv);
1520 		bfa_trc(ioc, boot_env);
1521 		return BFA_FALSE;
1522 	}
1523 
1524 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1525 }
1526 
1527 static bfa_boolean_t
1528 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1529 				struct bfi_ioc_image_hdr_s *fwhdr_2)
1530 {
1531 	int i;
1532 
1533 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1534 		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1535 			return BFA_FALSE;
1536 
1537 	return BFA_TRUE;
1538 }
1539 
1540 /*
1541  * Returns TRUE if major minor and maintainence are same.
1542  * If patch versions are same, check for MD5 Checksum to be same.
1543  */
1544 static bfa_boolean_t
1545 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1546 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1547 {
1548 	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1549 		return BFA_FALSE;
1550 
1551 	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1552 		return BFA_FALSE;
1553 
1554 	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1555 		return BFA_FALSE;
1556 
1557 	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1558 		return BFA_FALSE;
1559 
1560 	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1561 		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1562 		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1563 		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1564 	}
1565 
1566 	return BFA_TRUE;
1567 }
1568 
1569 static bfa_boolean_t
1570 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1571 {
1572 	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1573 		return BFA_FALSE;
1574 
1575 	return BFA_TRUE;
1576 }
1577 
1578 static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1579 {
1580 	if (fwhdr->fwver.phase == 0 &&
1581 		fwhdr->fwver.build == 0)
1582 		return BFA_TRUE;
1583 
1584 	return BFA_FALSE;
1585 }
1586 
1587 /*
1588  * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1589  */
1590 static enum bfi_ioc_img_ver_cmp_e
1591 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1592 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1593 {
1594 	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1595 		return BFI_IOC_IMG_VER_INCOMP;
1596 
1597 	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1598 		return BFI_IOC_IMG_VER_BETTER;
1599 
1600 	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1601 		return BFI_IOC_IMG_VER_OLD;
1602 
1603 	/*
1604 	 * GA takes priority over internal builds of the same patch stream.
1605 	 * At this point major minor maint and patch numbers are same.
1606 	 */
1607 
1608 	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1609 		if (fwhdr_is_ga(fwhdr_to_cmp))
1610 			return BFI_IOC_IMG_VER_SAME;
1611 		else
1612 			return BFI_IOC_IMG_VER_OLD;
1613 	} else {
1614 		if (fwhdr_is_ga(fwhdr_to_cmp))
1615 			return BFI_IOC_IMG_VER_BETTER;
1616 	}
1617 
1618 	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1619 		return BFI_IOC_IMG_VER_BETTER;
1620 	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1621 		return BFI_IOC_IMG_VER_OLD;
1622 
1623 	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1624 		return BFI_IOC_IMG_VER_BETTER;
1625 	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1626 		return BFI_IOC_IMG_VER_OLD;
1627 
1628 	/*
1629 	 * All Version Numbers are equal.
1630 	 * Md5 check to be done as a part of compatibility check.
1631 	 */
1632 	return BFI_IOC_IMG_VER_SAME;
1633 }
1634 
1635 #define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1636 
1637 bfa_status_t
1638 bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1639 				u32 *fwimg)
1640 {
1641 	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1642 			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1643 			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1644 }
1645 
1646 static enum bfi_ioc_img_ver_cmp_e
1647 bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1648 			struct bfi_ioc_image_hdr_s *base_fwhdr)
1649 {
1650 	struct bfi_ioc_image_hdr_s *flash_fwhdr;
1651 	bfa_status_t status;
1652 	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1653 
1654 	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1655 	if (status != BFA_STATUS_OK)
1656 		return BFI_IOC_IMG_VER_INCOMP;
1657 
1658 	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1659 	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1660 		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1661 	else
1662 		return BFI_IOC_IMG_VER_INCOMP;
1663 }
1664 
1665 
1666 /*
1667  * Invalidate fwver signature
1668  */
1669 bfa_status_t
1670 bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1671 {
1672 
1673 	u32	pgnum, pgoff;
1674 	u32	loff = 0;
1675 	enum bfi_ioc_state ioc_fwstate;
1676 
1677 	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1678 	if (!bfa_ioc_state_disabled(ioc_fwstate))
1679 		return BFA_STATUS_ADAPTER_ENABLED;
1680 
1681 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1682 	pgoff = PSS_SMEM_PGOFF(loff);
1683 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1684 	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1685 
1686 	return BFA_STATUS_OK;
1687 }
1688 
1689 /*
1690  * Conditionally flush any pending message from firmware at start.
1691  */
1692 static void
1693 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1694 {
1695 	u32	r32;
1696 
1697 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1698 	if (r32)
1699 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1700 }
1701 
1702 static void
1703 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1704 {
1705 	enum bfi_ioc_state ioc_fwstate;
1706 	bfa_boolean_t fwvalid;
1707 	u32 boot_type;
1708 	u32 boot_env;
1709 
1710 	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1711 
1712 	if (force)
1713 		ioc_fwstate = BFI_IOC_UNINIT;
1714 
1715 	bfa_trc(ioc, ioc_fwstate);
1716 
1717 	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1718 	boot_env = BFI_FWBOOT_ENV_OS;
1719 
1720 	/*
1721 	 * check if firmware is valid
1722 	 */
1723 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1724 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1725 
1726 	if (!fwvalid) {
1727 		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1728 			bfa_ioc_poll_fwinit(ioc);
1729 		return;
1730 	}
1731 
1732 	/*
1733 	 * If hardware initialization is in progress (initialized by other IOC),
1734 	 * just wait for an initialization completion interrupt.
1735 	 */
1736 	if (ioc_fwstate == BFI_IOC_INITING) {
1737 		bfa_ioc_poll_fwinit(ioc);
1738 		return;
1739 	}
1740 
1741 	/*
1742 	 * If IOC function is disabled and firmware version is same,
1743 	 * just re-enable IOC.
1744 	 *
1745 	 * If option rom, IOC must not be in operational state. With
1746 	 * convergence, IOC will be in operational state when 2nd driver
1747 	 * is loaded.
1748 	 */
1749 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1750 
1751 		/*
1752 		 * When using MSI-X any pending firmware ready event should
1753 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1754 		 */
1755 		bfa_ioc_msgflush(ioc);
1756 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1757 		return;
1758 	}
1759 
1760 	/*
1761 	 * Initialize the h/w for any other states.
1762 	 */
1763 	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1764 		bfa_ioc_poll_fwinit(ioc);
1765 }
1766 
1767 static void
1768 bfa_ioc_timeout(void *ioc_arg)
1769 {
1770 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1771 
1772 	bfa_trc(ioc, 0);
1773 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1774 }
1775 
1776 void
1777 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1778 {
1779 	u32 *msgp = (u32 *) ioc_msg;
1780 	u32 i;
1781 
1782 	bfa_trc(ioc, msgp[0]);
1783 	bfa_trc(ioc, len);
1784 
1785 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1786 
1787 	/*
1788 	 * first write msg to mailbox registers
1789 	 */
1790 	for (i = 0; i < len / sizeof(u32); i++)
1791 		writel(cpu_to_le32(msgp[i]),
1792 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1793 
1794 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1795 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1796 
1797 	/*
1798 	 * write 1 to mailbox CMD to trigger LPU event
1799 	 */
1800 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1801 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1802 }
1803 
1804 static void
1805 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1806 {
1807 	struct bfi_ioc_ctrl_req_s enable_req;
1808 
1809 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1810 		    bfa_ioc_portid(ioc));
1811 	enable_req.clscode = cpu_to_be16(ioc->clscode);
1812 	/* unsigned 32-bit time_t overflow in y2106 */
1813 	enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1814 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1815 }
1816 
1817 static void
1818 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1819 {
1820 	struct bfi_ioc_ctrl_req_s disable_req;
1821 
1822 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1823 		    bfa_ioc_portid(ioc));
1824 	disable_req.clscode = cpu_to_be16(ioc->clscode);
1825 	/* unsigned 32-bit time_t overflow in y2106 */
1826 	disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1827 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1828 }
1829 
1830 static void
1831 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1832 {
1833 	struct bfi_ioc_getattr_req_s	attr_req;
1834 
1835 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1836 		    bfa_ioc_portid(ioc));
1837 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1838 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1839 }
1840 
1841 static void
1842 bfa_ioc_hb_check(void *cbarg)
1843 {
1844 	struct bfa_ioc_s  *ioc = cbarg;
1845 	u32	hb_count;
1846 
1847 	hb_count = readl(ioc->ioc_regs.heartbeat);
1848 	if (ioc->hb_count == hb_count) {
1849 		bfa_ioc_recover(ioc);
1850 		return;
1851 	} else {
1852 		ioc->hb_count = hb_count;
1853 	}
1854 
1855 	bfa_ioc_mbox_poll(ioc);
1856 	bfa_hb_timer_start(ioc);
1857 }
1858 
1859 static void
1860 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1861 {
1862 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1863 	bfa_hb_timer_start(ioc);
1864 }
1865 
1866 /*
1867  *	Initiate a full firmware download.
1868  */
1869 static bfa_status_t
1870 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1871 		    u32 boot_env)
1872 {
1873 	u32 *fwimg;
1874 	u32 pgnum, pgoff;
1875 	u32 loff = 0;
1876 	u32 chunkno = 0;
1877 	u32 i;
1878 	u32 asicmode;
1879 	u32 fwimg_size;
1880 	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1881 	bfa_status_t status;
1882 
1883 	if (boot_env == BFI_FWBOOT_ENV_OS &&
1884 		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1885 		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1886 
1887 		status = bfa_ioc_flash_img_get_chnk(ioc,
1888 			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1889 		if (status != BFA_STATUS_OK)
1890 			return status;
1891 
1892 		fwimg = fwimg_buf;
1893 	} else {
1894 		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1895 		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1896 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1897 	}
1898 
1899 	bfa_trc(ioc, fwimg_size);
1900 
1901 
1902 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1903 	pgoff = PSS_SMEM_PGOFF(loff);
1904 
1905 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1906 
1907 	for (i = 0; i < fwimg_size; i++) {
1908 
1909 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1910 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1911 
1912 			if (boot_env == BFI_FWBOOT_ENV_OS &&
1913 				boot_type == BFI_FWBOOT_TYPE_FLASH) {
1914 				status = bfa_ioc_flash_img_get_chnk(ioc,
1915 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1916 					fwimg_buf);
1917 				if (status != BFA_STATUS_OK)
1918 					return status;
1919 
1920 				fwimg = fwimg_buf;
1921 			} else {
1922 				fwimg = bfa_cb_image_get_chunk(
1923 					bfa_ioc_asic_gen(ioc),
1924 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1925 			}
1926 		}
1927 
1928 		/*
1929 		 * write smem
1930 		 */
1931 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1932 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1933 
1934 		loff += sizeof(u32);
1935 
1936 		/*
1937 		 * handle page offset wrap around
1938 		 */
1939 		loff = PSS_SMEM_PGOFF(loff);
1940 		if (loff == 0) {
1941 			pgnum++;
1942 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1943 		}
1944 	}
1945 
1946 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1947 			ioc->ioc_regs.host_page_num_fn);
1948 
1949 	/*
1950 	 * Set boot type, env and device mode at the end.
1951 	 */
1952 	if (boot_env == BFI_FWBOOT_ENV_OS &&
1953 		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1954 		boot_type = BFI_FWBOOT_TYPE_NORMAL;
1955 	}
1956 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1957 				ioc->port0_mode, ioc->port1_mode);
1958 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1959 			swab32(asicmode));
1960 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1961 			swab32(boot_type));
1962 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1963 			swab32(boot_env));
1964 	return BFA_STATUS_OK;
1965 }
1966 
1967 
1968 /*
1969  * Update BFA configuration from firmware configuration.
1970  */
1971 static void
1972 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1973 {
1974 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1975 
1976 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1977 	attr->card_type     = be32_to_cpu(attr->card_type);
1978 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1979 	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1980 	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
1981 
1982 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1983 }
1984 
1985 /*
1986  * Attach time initialization of mbox logic.
1987  */
1988 static void
1989 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1990 {
1991 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1992 	int	mc;
1993 
1994 	INIT_LIST_HEAD(&mod->cmd_q);
1995 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1996 		mod->mbhdlr[mc].cbfn = NULL;
1997 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1998 	}
1999 }
2000 
2001 /*
2002  * Mbox poll timer -- restarts any pending mailbox requests.
2003  */
2004 static void
2005 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
2006 {
2007 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2008 	struct bfa_mbox_cmd_s		*cmd;
2009 	u32			stat;
2010 
2011 	/*
2012 	 * If no command pending, do nothing
2013 	 */
2014 	if (list_empty(&mod->cmd_q))
2015 		return;
2016 
2017 	/*
2018 	 * If previous command is not yet fetched by firmware, do nothing
2019 	 */
2020 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2021 	if (stat)
2022 		return;
2023 
2024 	/*
2025 	 * Enqueue command to firmware.
2026 	 */
2027 	bfa_q_deq(&mod->cmd_q, &cmd);
2028 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2029 }
2030 
2031 /*
2032  * Cleanup any pending requests.
2033  */
2034 static void
2035 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2036 {
2037 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2038 	struct bfa_mbox_cmd_s		*cmd;
2039 
2040 	while (!list_empty(&mod->cmd_q))
2041 		bfa_q_deq(&mod->cmd_q, &cmd);
2042 }
2043 
2044 /*
2045  * Read data from SMEM to host through PCI memmap
2046  *
2047  * @param[in]	ioc	memory for IOC
2048  * @param[in]	tbuf	app memory to store data from smem
2049  * @param[in]	soff	smem offset
2050  * @param[in]	sz	size of smem in bytes
2051  */
2052 static bfa_status_t
2053 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2054 {
2055 	u32 pgnum, loff;
2056 	__be32 r32;
2057 	int i, len;
2058 	u32 *buf = tbuf;
2059 
2060 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2061 	loff = PSS_SMEM_PGOFF(soff);
2062 	bfa_trc(ioc, pgnum);
2063 	bfa_trc(ioc, loff);
2064 	bfa_trc(ioc, sz);
2065 
2066 	/*
2067 	 *  Hold semaphore to serialize pll init and fwtrc.
2068 	 */
2069 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2070 		bfa_trc(ioc, 0);
2071 		return BFA_STATUS_FAILED;
2072 	}
2073 
2074 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2075 
2076 	len = sz/sizeof(u32);
2077 	bfa_trc(ioc, len);
2078 	for (i = 0; i < len; i++) {
2079 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2080 		buf[i] = swab32(r32);
2081 		loff += sizeof(u32);
2082 
2083 		/*
2084 		 * handle page offset wrap around
2085 		 */
2086 		loff = PSS_SMEM_PGOFF(loff);
2087 		if (loff == 0) {
2088 			pgnum++;
2089 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2090 		}
2091 	}
2092 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2093 			ioc->ioc_regs.host_page_num_fn);
2094 	/*
2095 	 *  release semaphore.
2096 	 */
2097 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2098 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2099 
2100 	bfa_trc(ioc, pgnum);
2101 	return BFA_STATUS_OK;
2102 }
2103 
2104 /*
2105  * Clear SMEM data from host through PCI memmap
2106  *
2107  * @param[in]	ioc	memory for IOC
2108  * @param[in]	soff	smem offset
2109  * @param[in]	sz	size of smem in bytes
2110  */
2111 static bfa_status_t
2112 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2113 {
2114 	int i, len;
2115 	u32 pgnum, loff;
2116 
2117 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2118 	loff = PSS_SMEM_PGOFF(soff);
2119 	bfa_trc(ioc, pgnum);
2120 	bfa_trc(ioc, loff);
2121 	bfa_trc(ioc, sz);
2122 
2123 	/*
2124 	 *  Hold semaphore to serialize pll init and fwtrc.
2125 	 */
2126 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2127 		bfa_trc(ioc, 0);
2128 		return BFA_STATUS_FAILED;
2129 	}
2130 
2131 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2132 
2133 	len = sz/sizeof(u32); /* len in words */
2134 	bfa_trc(ioc, len);
2135 	for (i = 0; i < len; i++) {
2136 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2137 		loff += sizeof(u32);
2138 
2139 		/*
2140 		 * handle page offset wrap around
2141 		 */
2142 		loff = PSS_SMEM_PGOFF(loff);
2143 		if (loff == 0) {
2144 			pgnum++;
2145 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2146 		}
2147 	}
2148 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2149 			ioc->ioc_regs.host_page_num_fn);
2150 
2151 	/*
2152 	 *  release semaphore.
2153 	 */
2154 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2155 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2156 	bfa_trc(ioc, pgnum);
2157 	return BFA_STATUS_OK;
2158 }
2159 
2160 static void
2161 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2162 {
2163 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2164 
2165 	/*
2166 	 * Notify driver and common modules registered for notification.
2167 	 */
2168 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2169 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2170 
2171 	bfa_ioc_debug_save_ftrc(ioc);
2172 
2173 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2174 		"Heart Beat of IOC has failed\n");
2175 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2176 
2177 }
2178 
2179 static void
2180 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2181 {
2182 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2183 	/*
2184 	 * Provide enable completion callback.
2185 	 */
2186 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2187 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2188 		"Running firmware version is incompatible "
2189 		"with the driver version\n");
2190 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2191 }
2192 
2193 bfa_status_t
2194 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2195 {
2196 
2197 	/*
2198 	 *  Hold semaphore so that nobody can access the chip during init.
2199 	 */
2200 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2201 
2202 	bfa_ioc_pll_init_asic(ioc);
2203 
2204 	ioc->pllinit = BFA_TRUE;
2205 
2206 	/*
2207 	 * Initialize LMEM
2208 	 */
2209 	bfa_ioc_lmem_init(ioc);
2210 
2211 	/*
2212 	 *  release semaphore.
2213 	 */
2214 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2215 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2216 
2217 	return BFA_STATUS_OK;
2218 }
2219 
2220 /*
2221  * Interface used by diag module to do firmware boot with memory test
2222  * as the entry vector.
2223  */
2224 bfa_status_t
2225 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2226 {
2227 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
2228 	bfa_status_t status;
2229 	bfa_ioc_stats(ioc, ioc_boots);
2230 
2231 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2232 		return BFA_STATUS_FAILED;
2233 
2234 	if (boot_env == BFI_FWBOOT_ENV_OS &&
2235 		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2236 
2237 		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2238 			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2239 
2240 		/*
2241 		 * Work with Flash iff flash f/w is better than driver f/w.
2242 		 * Otherwise push drivers firmware.
2243 		 */
2244 		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2245 						BFI_IOC_IMG_VER_BETTER)
2246 			boot_type = BFI_FWBOOT_TYPE_FLASH;
2247 	}
2248 
2249 	/*
2250 	 * Initialize IOC state of all functions on a chip reset.
2251 	 */
2252 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2253 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2254 		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2255 	} else {
2256 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2257 		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2258 	}
2259 
2260 	bfa_ioc_msgflush(ioc);
2261 	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2262 	if (status == BFA_STATUS_OK)
2263 		bfa_ioc_lpu_start(ioc);
2264 	else {
2265 		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2266 		bfa_iocpf_timeout(ioc);
2267 	}
2268 	return status;
2269 }
2270 
2271 /*
2272  * Enable/disable IOC failure auto recovery.
2273  */
2274 void
2275 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2276 {
2277 	bfa_auto_recover = auto_recover;
2278 }
2279 
2280 
2281 
2282 bfa_boolean_t
2283 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2284 {
2285 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2286 }
2287 
2288 bfa_boolean_t
2289 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2290 {
2291 	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2292 
2293 	return ((r32 != BFI_IOC_UNINIT) &&
2294 		(r32 != BFI_IOC_INITING) &&
2295 		(r32 != BFI_IOC_MEMTEST));
2296 }
2297 
2298 bfa_boolean_t
2299 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2300 {
2301 	__be32	*msgp = mbmsg;
2302 	u32	r32;
2303 	int		i;
2304 
2305 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2306 	if ((r32 & 1) == 0)
2307 		return BFA_FALSE;
2308 
2309 	/*
2310 	 * read the MBOX msg
2311 	 */
2312 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2313 	     i++) {
2314 		r32 = readl(ioc->ioc_regs.lpu_mbox +
2315 				   i * sizeof(u32));
2316 		msgp[i] = cpu_to_be32(r32);
2317 	}
2318 
2319 	/*
2320 	 * turn off mailbox interrupt by clearing mailbox status
2321 	 */
2322 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2323 	readl(ioc->ioc_regs.lpu_mbox_cmd);
2324 
2325 	return BFA_TRUE;
2326 }
2327 
2328 void
2329 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2330 {
2331 	union bfi_ioc_i2h_msg_u	*msg;
2332 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2333 
2334 	msg = (union bfi_ioc_i2h_msg_u *) m;
2335 
2336 	bfa_ioc_stats(ioc, ioc_isrs);
2337 
2338 	switch (msg->mh.msg_id) {
2339 	case BFI_IOC_I2H_HBEAT:
2340 		break;
2341 
2342 	case BFI_IOC_I2H_ENABLE_REPLY:
2343 		ioc->port_mode = ioc->port_mode_cfg =
2344 				(enum bfa_mode_s)msg->fw_event.port_mode;
2345 		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2346 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2347 		break;
2348 
2349 	case BFI_IOC_I2H_DISABLE_REPLY:
2350 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2351 		break;
2352 
2353 	case BFI_IOC_I2H_GETATTR_REPLY:
2354 		bfa_ioc_getattr_reply(ioc);
2355 		break;
2356 
2357 	default:
2358 		bfa_trc(ioc, msg->mh.msg_id);
2359 		WARN_ON(1);
2360 	}
2361 }
2362 
2363 /*
2364  * IOC attach time initialization and setup.
2365  *
2366  * @param[in]	ioc	memory for IOC
2367  * @param[in]	bfa	driver instance structure
2368  */
2369 void
2370 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2371 	       struct bfa_timer_mod_s *timer_mod)
2372 {
2373 	ioc->bfa	= bfa;
2374 	ioc->cbfn	= cbfn;
2375 	ioc->timer_mod	= timer_mod;
2376 	ioc->fcmode	= BFA_FALSE;
2377 	ioc->pllinit	= BFA_FALSE;
2378 	ioc->dbg_fwsave_once = BFA_TRUE;
2379 	ioc->iocpf.ioc	= ioc;
2380 
2381 	bfa_ioc_mbox_attach(ioc);
2382 	INIT_LIST_HEAD(&ioc->notify_q);
2383 
2384 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2385 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2386 }
2387 
2388 /*
2389  * Driver detach time IOC cleanup.
2390  */
2391 void
2392 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2393 {
2394 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2395 	INIT_LIST_HEAD(&ioc->notify_q);
2396 }
2397 
2398 /*
2399  * Setup IOC PCI properties.
2400  *
2401  * @param[in]	pcidev	PCI device information for this IOC
2402  */
2403 void
2404 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2405 		enum bfi_pcifn_class clscode)
2406 {
2407 	ioc->clscode	= clscode;
2408 	ioc->pcidev	= *pcidev;
2409 
2410 	/*
2411 	 * Initialize IOC and device personality
2412 	 */
2413 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2414 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2415 
2416 	switch (pcidev->device_id) {
2417 	case BFA_PCI_DEVICE_ID_FC_8G1P:
2418 	case BFA_PCI_DEVICE_ID_FC_8G2P:
2419 		ioc->asic_gen = BFI_ASIC_GEN_CB;
2420 		ioc->fcmode = BFA_TRUE;
2421 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2422 		ioc->ad_cap_bm = BFA_CM_HBA;
2423 		break;
2424 
2425 	case BFA_PCI_DEVICE_ID_CT:
2426 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2427 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2428 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2429 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2430 		ioc->ad_cap_bm = BFA_CM_CNA;
2431 		break;
2432 
2433 	case BFA_PCI_DEVICE_ID_CT_FC:
2434 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2435 		ioc->fcmode = BFA_TRUE;
2436 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2437 		ioc->ad_cap_bm = BFA_CM_HBA;
2438 		break;
2439 
2440 	case BFA_PCI_DEVICE_ID_CT2:
2441 	case BFA_PCI_DEVICE_ID_CT2_QUAD:
2442 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2443 		if (clscode == BFI_PCIFN_CLASS_FC &&
2444 		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2445 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2446 			ioc->fcmode = BFA_TRUE;
2447 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2448 			ioc->ad_cap_bm = BFA_CM_HBA;
2449 		} else {
2450 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2451 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2452 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2453 				ioc->port_mode =
2454 				ioc->port_mode_cfg = BFA_MODE_CNA;
2455 				ioc->ad_cap_bm = BFA_CM_CNA;
2456 			} else {
2457 				ioc->port_mode =
2458 				ioc->port_mode_cfg = BFA_MODE_NIC;
2459 				ioc->ad_cap_bm = BFA_CM_NIC;
2460 			}
2461 		}
2462 		break;
2463 
2464 	default:
2465 		WARN_ON(1);
2466 	}
2467 
2468 	/*
2469 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2470 	 */
2471 	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2472 		bfa_ioc_set_cb_hwif(ioc);
2473 	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2474 		bfa_ioc_set_ct_hwif(ioc);
2475 	else {
2476 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2477 		bfa_ioc_set_ct2_hwif(ioc);
2478 		bfa_ioc_ct2_poweron(ioc);
2479 	}
2480 
2481 	bfa_ioc_map_port(ioc);
2482 	bfa_ioc_reg_init(ioc);
2483 }
2484 
2485 /*
2486  * Initialize IOC dma memory
2487  *
2488  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2489  * @param[in]	dm_pa	physical address of IOC dma memory
2490  */
2491 void
2492 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2493 {
2494 	/*
2495 	 * dma memory for firmware attribute
2496 	 */
2497 	ioc->attr_dma.kva = dm_kva;
2498 	ioc->attr_dma.pa = dm_pa;
2499 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2500 }
2501 
2502 void
2503 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2504 {
2505 	bfa_ioc_stats(ioc, ioc_enables);
2506 	ioc->dbg_fwsave_once = BFA_TRUE;
2507 
2508 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2509 }
2510 
2511 void
2512 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2513 {
2514 	bfa_ioc_stats(ioc, ioc_disables);
2515 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2516 }
2517 
2518 void
2519 bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2520 {
2521 	ioc->dbg_fwsave_once = BFA_TRUE;
2522 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2523 }
2524 
2525 /*
2526  * Initialize memory for saving firmware trace. Driver must initialize
2527  * trace memory before call bfa_ioc_enable().
2528  */
2529 void
2530 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2531 {
2532 	ioc->dbg_fwsave	    = dbg_fwsave;
2533 	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2534 }
2535 
2536 /*
2537  * Register mailbox message handler functions
2538  *
2539  * @param[in]	ioc		IOC instance
2540  * @param[in]	mcfuncs		message class handler functions
2541  */
2542 void
2543 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2544 {
2545 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2546 	int				mc;
2547 
2548 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2549 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2550 }
2551 
2552 /*
2553  * Register mailbox message handler function, to be called by common modules
2554  */
2555 void
2556 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2557 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2558 {
2559 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2560 
2561 	mod->mbhdlr[mc].cbfn	= cbfn;
2562 	mod->mbhdlr[mc].cbarg	= cbarg;
2563 }
2564 
2565 /*
2566  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2567  * Responsibility of caller to serialize
2568  *
2569  * @param[in]	ioc	IOC instance
2570  * @param[i]	cmd	Mailbox command
2571  */
2572 void
2573 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2574 {
2575 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2576 	u32			stat;
2577 
2578 	/*
2579 	 * If a previous command is pending, queue new command
2580 	 */
2581 	if (!list_empty(&mod->cmd_q)) {
2582 		list_add_tail(&cmd->qe, &mod->cmd_q);
2583 		return;
2584 	}
2585 
2586 	/*
2587 	 * If mailbox is busy, queue command for poll timer
2588 	 */
2589 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2590 	if (stat) {
2591 		list_add_tail(&cmd->qe, &mod->cmd_q);
2592 		return;
2593 	}
2594 
2595 	/*
2596 	 * mailbox is free -- queue command to firmware
2597 	 */
2598 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2599 }
2600 
2601 /*
2602  * Handle mailbox interrupts
2603  */
2604 void
2605 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2606 {
2607 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2608 	struct bfi_mbmsg_s		m;
2609 	int				mc;
2610 
2611 	if (bfa_ioc_msgget(ioc, &m)) {
2612 		/*
2613 		 * Treat IOC message class as special.
2614 		 */
2615 		mc = m.mh.msg_class;
2616 		if (mc == BFI_MC_IOC) {
2617 			bfa_ioc_isr(ioc, &m);
2618 			return;
2619 		}
2620 
2621 		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2622 			return;
2623 
2624 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2625 	}
2626 
2627 	bfa_ioc_lpu_read_stat(ioc);
2628 
2629 	/*
2630 	 * Try to send pending mailbox commands
2631 	 */
2632 	bfa_ioc_mbox_poll(ioc);
2633 }
2634 
2635 void
2636 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2637 {
2638 	bfa_ioc_stats(ioc, ioc_hbfails);
2639 	ioc->stats.hb_count = ioc->hb_count;
2640 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2641 }
2642 
2643 /*
2644  * return true if IOC is disabled
2645  */
2646 bfa_boolean_t
2647 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2648 {
2649 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2650 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2651 }
2652 
2653 /*
2654  * return true if IOC firmware is different.
2655  */
2656 bfa_boolean_t
2657 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2658 {
2659 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2660 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2661 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2662 }
2663 
2664 /*
2665  * Check if adapter is disabled -- both IOCs should be in a disabled
2666  * state.
2667  */
2668 bfa_boolean_t
2669 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2670 {
2671 	u32	ioc_state;
2672 
2673 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2674 		return BFA_FALSE;
2675 
2676 	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2677 	if (!bfa_ioc_state_disabled(ioc_state))
2678 		return BFA_FALSE;
2679 
2680 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2681 		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2682 		if (!bfa_ioc_state_disabled(ioc_state))
2683 			return BFA_FALSE;
2684 	}
2685 
2686 	return BFA_TRUE;
2687 }
2688 
2689 /*
2690  * Reset IOC fwstate registers.
2691  */
2692 void
2693 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2694 {
2695 	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2696 	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2697 }
2698 
2699 #define BFA_MFG_NAME "QLogic"
2700 void
2701 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2702 			 struct bfa_adapter_attr_s *ad_attr)
2703 {
2704 	struct bfi_ioc_attr_s	*ioc_attr;
2705 
2706 	ioc_attr = ioc->attr;
2707 
2708 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2709 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2710 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2711 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2712 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2713 		      sizeof(struct bfa_mfg_vpd_s));
2714 
2715 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2716 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2717 
2718 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2719 	/* For now, model descr uses same model string */
2720 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2721 
2722 	ad_attr->card_type = ioc_attr->card_type;
2723 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2724 
2725 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2726 		ad_attr->prototype = 1;
2727 	else
2728 		ad_attr->prototype = 0;
2729 
2730 	ad_attr->pwwn = ioc->attr->pwwn;
2731 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2732 
2733 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2734 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2735 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2736 	ad_attr->asic_rev = ioc_attr->asic_rev;
2737 
2738 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2739 
2740 	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2741 	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2742 				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2743 	ad_attr->mfg_day = ioc_attr->mfg_day;
2744 	ad_attr->mfg_month = ioc_attr->mfg_month;
2745 	ad_attr->mfg_year = ioc_attr->mfg_year;
2746 	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2747 }
2748 
2749 enum bfa_ioc_type_e
2750 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2751 {
2752 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2753 		return BFA_IOC_TYPE_LL;
2754 
2755 	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2756 
2757 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2758 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2759 }
2760 
2761 void
2762 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2763 {
2764 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2765 	memcpy((void *)serial_num,
2766 			(void *)ioc->attr->brcd_serialnum,
2767 			BFA_ADAPTER_SERIAL_NUM_LEN);
2768 }
2769 
2770 void
2771 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2772 {
2773 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2774 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2775 }
2776 
2777 void
2778 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2779 {
2780 	WARN_ON(!chip_rev);
2781 
2782 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2783 
2784 	chip_rev[0] = 'R';
2785 	chip_rev[1] = 'e';
2786 	chip_rev[2] = 'v';
2787 	chip_rev[3] = '-';
2788 	chip_rev[4] = ioc->attr->asic_rev;
2789 	chip_rev[5] = '\0';
2790 }
2791 
2792 void
2793 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2794 {
2795 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2796 	memcpy(optrom_ver, ioc->attr->optrom_version,
2797 		      BFA_VERSION_LEN);
2798 }
2799 
2800 void
2801 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2802 {
2803 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2804 	strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2805 }
2806 
2807 void
2808 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2809 {
2810 	struct bfi_ioc_attr_s	*ioc_attr;
2811 	u8 nports = bfa_ioc_get_nports(ioc);
2812 
2813 	WARN_ON(!model);
2814 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2815 
2816 	ioc_attr = ioc->attr;
2817 
2818 	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2819 		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
2820 		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2821 			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2822 	else
2823 		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2824 			BFA_MFG_NAME, ioc_attr->card_type);
2825 }
2826 
2827 enum bfa_ioc_state
2828 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2829 {
2830 	enum bfa_iocpf_state iocpf_st;
2831 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2832 
2833 	if (ioc_st == BFA_IOC_ENABLING ||
2834 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2835 
2836 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2837 
2838 		switch (iocpf_st) {
2839 		case BFA_IOCPF_SEMWAIT:
2840 			ioc_st = BFA_IOC_SEMWAIT;
2841 			break;
2842 
2843 		case BFA_IOCPF_HWINIT:
2844 			ioc_st = BFA_IOC_HWINIT;
2845 			break;
2846 
2847 		case BFA_IOCPF_FWMISMATCH:
2848 			ioc_st = BFA_IOC_FWMISMATCH;
2849 			break;
2850 
2851 		case BFA_IOCPF_FAIL:
2852 			ioc_st = BFA_IOC_FAIL;
2853 			break;
2854 
2855 		case BFA_IOCPF_INITFAIL:
2856 			ioc_st = BFA_IOC_INITFAIL;
2857 			break;
2858 
2859 		default:
2860 			break;
2861 		}
2862 	}
2863 
2864 	return ioc_st;
2865 }
2866 
2867 void
2868 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2869 {
2870 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2871 
2872 	ioc_attr->state = bfa_ioc_get_state(ioc);
2873 	ioc_attr->port_id = bfa_ioc_portid(ioc);
2874 	ioc_attr->port_mode = ioc->port_mode;
2875 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2876 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2877 
2878 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2879 
2880 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2881 
2882 	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2883 	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2884 	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2885 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2886 }
2887 
2888 mac_t
2889 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2890 {
2891 	/*
2892 	 * Check the IOC type and return the appropriate MAC
2893 	 */
2894 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2895 		return ioc->attr->fcoe_mac;
2896 	else
2897 		return ioc->attr->mac;
2898 }
2899 
2900 mac_t
2901 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2902 {
2903 	mac_t	m;
2904 
2905 	m = ioc->attr->mfg_mac;
2906 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2907 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2908 	else
2909 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2910 			bfa_ioc_pcifn(ioc));
2911 
2912 	return m;
2913 }
2914 
2915 /*
2916  * Send AEN notification
2917  */
2918 void
2919 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2920 {
2921 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2922 	struct bfa_aen_entry_s	*aen_entry;
2923 	enum bfa_ioc_type_e ioc_type;
2924 
2925 	bfad_get_aen_entry(bfad, aen_entry);
2926 	if (!aen_entry)
2927 		return;
2928 
2929 	ioc_type = bfa_ioc_get_type(ioc);
2930 	switch (ioc_type) {
2931 	case BFA_IOC_TYPE_FC:
2932 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2933 		break;
2934 	case BFA_IOC_TYPE_FCoE:
2935 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2936 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2937 		break;
2938 	case BFA_IOC_TYPE_LL:
2939 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2940 		break;
2941 	default:
2942 		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2943 		break;
2944 	}
2945 
2946 	/* Send the AEN notification */
2947 	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2948 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2949 				  BFA_AEN_CAT_IOC, event);
2950 }
2951 
2952 /*
2953  * Retrieve saved firmware trace from a prior IOC failure.
2954  */
2955 bfa_status_t
2956 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2957 {
2958 	int	tlen;
2959 
2960 	if (ioc->dbg_fwsave_len == 0)
2961 		return BFA_STATUS_ENOFSAVE;
2962 
2963 	tlen = *trclen;
2964 	if (tlen > ioc->dbg_fwsave_len)
2965 		tlen = ioc->dbg_fwsave_len;
2966 
2967 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2968 	*trclen = tlen;
2969 	return BFA_STATUS_OK;
2970 }
2971 
2972 
2973 /*
2974  * Retrieve saved firmware trace from a prior IOC failure.
2975  */
2976 bfa_status_t
2977 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2978 {
2979 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2980 	int tlen;
2981 	bfa_status_t status;
2982 
2983 	bfa_trc(ioc, *trclen);
2984 
2985 	tlen = *trclen;
2986 	if (tlen > BFA_DBG_FWTRC_LEN)
2987 		tlen = BFA_DBG_FWTRC_LEN;
2988 
2989 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2990 	*trclen = tlen;
2991 	return status;
2992 }
2993 
2994 static void
2995 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2996 {
2997 	struct bfa_mbox_cmd_s cmd;
2998 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2999 
3000 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
3001 		    bfa_ioc_portid(ioc));
3002 	req->clscode = cpu_to_be16(ioc->clscode);
3003 	bfa_ioc_mbox_queue(ioc, &cmd);
3004 }
3005 
3006 static void
3007 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
3008 {
3009 	u32 fwsync_iter = 1000;
3010 
3011 	bfa_ioc_send_fwsync(ioc);
3012 
3013 	/*
3014 	 * After sending a fw sync mbox command wait for it to
3015 	 * take effect.  We will not wait for a response because
3016 	 *    1. fw_sync mbox cmd doesn't have a response.
3017 	 *    2. Even if we implement that,  interrupts might not
3018 	 *	 be enabled when we call this function.
3019 	 * So, just keep checking if any mbox cmd is pending, and
3020 	 * after waiting for a reasonable amount of time, go ahead.
3021 	 * It is possible that fw has crashed and the mbox command
3022 	 * is never acknowledged.
3023 	 */
3024 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3025 		fwsync_iter--;
3026 }
3027 
3028 /*
3029  * Dump firmware smem
3030  */
3031 bfa_status_t
3032 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3033 				u32 *offset, int *buflen)
3034 {
3035 	u32 loff;
3036 	int dlen;
3037 	bfa_status_t status;
3038 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3039 
3040 	if (*offset >= smem_len) {
3041 		*offset = *buflen = 0;
3042 		return BFA_STATUS_EINVAL;
3043 	}
3044 
3045 	loff = *offset;
3046 	dlen = *buflen;
3047 
3048 	/*
3049 	 * First smem read, sync smem before proceeding
3050 	 * No need to sync before reading every chunk.
3051 	 */
3052 	if (loff == 0)
3053 		bfa_ioc_fwsync(ioc);
3054 
3055 	if ((loff + dlen) >= smem_len)
3056 		dlen = smem_len - loff;
3057 
3058 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3059 
3060 	if (status != BFA_STATUS_OK) {
3061 		*offset = *buflen = 0;
3062 		return status;
3063 	}
3064 
3065 	*offset += dlen;
3066 
3067 	if (*offset >= smem_len)
3068 		*offset = 0;
3069 
3070 	*buflen = dlen;
3071 
3072 	return status;
3073 }
3074 
3075 /*
3076  * Firmware statistics
3077  */
3078 bfa_status_t
3079 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3080 {
3081 	u32 loff = BFI_IOC_FWSTATS_OFF + \
3082 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3083 	int tlen;
3084 	bfa_status_t status;
3085 
3086 	if (ioc->stats_busy) {
3087 		bfa_trc(ioc, ioc->stats_busy);
3088 		return BFA_STATUS_DEVBUSY;
3089 	}
3090 	ioc->stats_busy = BFA_TRUE;
3091 
3092 	tlen = sizeof(struct bfa_fw_stats_s);
3093 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3094 
3095 	ioc->stats_busy = BFA_FALSE;
3096 	return status;
3097 }
3098 
3099 bfa_status_t
3100 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3101 {
3102 	u32 loff = BFI_IOC_FWSTATS_OFF + \
3103 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3104 	int tlen;
3105 	bfa_status_t status;
3106 
3107 	if (ioc->stats_busy) {
3108 		bfa_trc(ioc, ioc->stats_busy);
3109 		return BFA_STATUS_DEVBUSY;
3110 	}
3111 	ioc->stats_busy = BFA_TRUE;
3112 
3113 	tlen = sizeof(struct bfa_fw_stats_s);
3114 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
3115 
3116 	ioc->stats_busy = BFA_FALSE;
3117 	return status;
3118 }
3119 
3120 /*
3121  * Save firmware trace if configured.
3122  */
3123 void
3124 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3125 {
3126 	int		tlen;
3127 
3128 	if (ioc->dbg_fwsave_once) {
3129 		ioc->dbg_fwsave_once = BFA_FALSE;
3130 		if (ioc->dbg_fwsave_len) {
3131 			tlen = ioc->dbg_fwsave_len;
3132 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3133 		}
3134 	}
3135 }
3136 
3137 /*
3138  * Firmware failure detected. Start recovery actions.
3139  */
3140 static void
3141 bfa_ioc_recover(struct bfa_ioc_s *ioc)
3142 {
3143 	bfa_ioc_stats(ioc, ioc_hbfails);
3144 	ioc->stats.hb_count = ioc->hb_count;
3145 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3146 }
3147 
3148 /*
3149  *  BFA IOC PF private functions
3150  */
3151 static void
3152 bfa_iocpf_timeout(void *ioc_arg)
3153 {
3154 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3155 
3156 	bfa_trc(ioc, 0);
3157 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3158 }
3159 
3160 static void
3161 bfa_iocpf_sem_timeout(void *ioc_arg)
3162 {
3163 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3164 
3165 	bfa_ioc_hw_sem_get(ioc);
3166 }
3167 
3168 static void
3169 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3170 {
3171 	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3172 
3173 	bfa_trc(ioc, fwstate);
3174 
3175 	if (fwstate == BFI_IOC_DISABLED) {
3176 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3177 		return;
3178 	}
3179 
3180 	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3181 		bfa_iocpf_timeout(ioc);
3182 	else {
3183 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3184 		bfa_iocpf_poll_timer_start(ioc);
3185 	}
3186 }
3187 
3188 static void
3189 bfa_iocpf_poll_timeout(void *ioc_arg)
3190 {
3191 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3192 
3193 	bfa_ioc_poll_fwinit(ioc);
3194 }
3195 
3196 /*
3197  *  bfa timer function
3198  */
3199 void
3200 bfa_timer_beat(struct bfa_timer_mod_s *mod)
3201 {
3202 	struct list_head *qh = &mod->timer_q;
3203 	struct list_head *qe, *qe_next;
3204 	struct bfa_timer_s *elem;
3205 	struct list_head timedout_q;
3206 
3207 	INIT_LIST_HEAD(&timedout_q);
3208 
3209 	qe = bfa_q_next(qh);
3210 
3211 	while (qe != qh) {
3212 		qe_next = bfa_q_next(qe);
3213 
3214 		elem = (struct bfa_timer_s *) qe;
3215 		if (elem->timeout <= BFA_TIMER_FREQ) {
3216 			elem->timeout = 0;
3217 			list_del(&elem->qe);
3218 			list_add_tail(&elem->qe, &timedout_q);
3219 		} else {
3220 			elem->timeout -= BFA_TIMER_FREQ;
3221 		}
3222 
3223 		qe = qe_next;	/* go to next elem */
3224 	}
3225 
3226 	/*
3227 	 * Pop all the timeout entries
3228 	 */
3229 	while (!list_empty(&timedout_q)) {
3230 		bfa_q_deq(&timedout_q, &elem);
3231 		elem->timercb(elem->arg);
3232 	}
3233 }
3234 
3235 /*
3236  * Should be called with lock protection
3237  */
3238 void
3239 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3240 		    void (*timercb) (void *), void *arg, unsigned int timeout)
3241 {
3242 
3243 	WARN_ON(timercb == NULL);
3244 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3245 
3246 	timer->timeout = timeout;
3247 	timer->timercb = timercb;
3248 	timer->arg = arg;
3249 
3250 	list_add_tail(&timer->qe, &mod->timer_q);
3251 }
3252 
3253 /*
3254  * Should be called with lock protection
3255  */
3256 void
3257 bfa_timer_stop(struct bfa_timer_s *timer)
3258 {
3259 	WARN_ON(list_empty(&timer->qe));
3260 
3261 	list_del(&timer->qe);
3262 }
3263 
3264 /*
3265  *	ASIC block related
3266  */
3267 static void
3268 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3269 {
3270 	struct bfa_ablk_cfg_inst_s *cfg_inst;
3271 	int i, j;
3272 	u16	be16;
3273 
3274 	for (i = 0; i < BFA_ABLK_MAX; i++) {
3275 		cfg_inst = &cfg->inst[i];
3276 		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3277 			be16 = cfg_inst->pf_cfg[j].pers;
3278 			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3279 			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3280 			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3281 			be16 = cfg_inst->pf_cfg[j].num_vectors;
3282 			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3283 			be16 = cfg_inst->pf_cfg[j].bw_min;
3284 			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3285 			be16 = cfg_inst->pf_cfg[j].bw_max;
3286 			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3287 		}
3288 	}
3289 }
3290 
3291 static void
3292 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3293 {
3294 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3295 	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3296 	bfa_ablk_cbfn_t cbfn;
3297 
3298 	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3299 	bfa_trc(ablk->ioc, msg->mh.msg_id);
3300 
3301 	switch (msg->mh.msg_id) {
3302 	case BFI_ABLK_I2H_QUERY:
3303 		if (rsp->status == BFA_STATUS_OK) {
3304 			memcpy(ablk->cfg, ablk->dma_addr.kva,
3305 				sizeof(struct bfa_ablk_cfg_s));
3306 			bfa_ablk_config_swap(ablk->cfg);
3307 			ablk->cfg = NULL;
3308 		}
3309 		break;
3310 
3311 	case BFI_ABLK_I2H_ADPT_CONFIG:
3312 	case BFI_ABLK_I2H_PORT_CONFIG:
3313 		/* update config port mode */
3314 		ablk->ioc->port_mode_cfg = rsp->port_mode;
3315 
3316 	case BFI_ABLK_I2H_PF_DELETE:
3317 	case BFI_ABLK_I2H_PF_UPDATE:
3318 	case BFI_ABLK_I2H_OPTROM_ENABLE:
3319 	case BFI_ABLK_I2H_OPTROM_DISABLE:
3320 		/* No-op */
3321 		break;
3322 
3323 	case BFI_ABLK_I2H_PF_CREATE:
3324 		*(ablk->pcifn) = rsp->pcifn;
3325 		ablk->pcifn = NULL;
3326 		break;
3327 
3328 	default:
3329 		WARN_ON(1);
3330 	}
3331 
3332 	ablk->busy = BFA_FALSE;
3333 	if (ablk->cbfn) {
3334 		cbfn = ablk->cbfn;
3335 		ablk->cbfn = NULL;
3336 		cbfn(ablk->cbarg, rsp->status);
3337 	}
3338 }
3339 
3340 static void
3341 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3342 {
3343 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3344 
3345 	bfa_trc(ablk->ioc, event);
3346 
3347 	switch (event) {
3348 	case BFA_IOC_E_ENABLED:
3349 		WARN_ON(ablk->busy != BFA_FALSE);
3350 		break;
3351 
3352 	case BFA_IOC_E_DISABLED:
3353 	case BFA_IOC_E_FAILED:
3354 		/* Fail any pending requests */
3355 		ablk->pcifn = NULL;
3356 		if (ablk->busy) {
3357 			if (ablk->cbfn)
3358 				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3359 			ablk->cbfn = NULL;
3360 			ablk->busy = BFA_FALSE;
3361 		}
3362 		break;
3363 
3364 	default:
3365 		WARN_ON(1);
3366 		break;
3367 	}
3368 }
3369 
3370 u32
3371 bfa_ablk_meminfo(void)
3372 {
3373 	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3374 }
3375 
3376 void
3377 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3378 {
3379 	ablk->dma_addr.kva = dma_kva;
3380 	ablk->dma_addr.pa  = dma_pa;
3381 }
3382 
3383 void
3384 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3385 {
3386 	ablk->ioc = ioc;
3387 
3388 	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3389 	bfa_q_qe_init(&ablk->ioc_notify);
3390 	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3391 	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3392 }
3393 
3394 bfa_status_t
3395 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3396 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3397 {
3398 	struct bfi_ablk_h2i_query_s *m;
3399 
3400 	WARN_ON(!ablk_cfg);
3401 
3402 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3403 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3404 		return BFA_STATUS_IOC_FAILURE;
3405 	}
3406 
3407 	if (ablk->busy) {
3408 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3409 		return  BFA_STATUS_DEVBUSY;
3410 	}
3411 
3412 	ablk->cfg = ablk_cfg;
3413 	ablk->cbfn  = cbfn;
3414 	ablk->cbarg = cbarg;
3415 	ablk->busy  = BFA_TRUE;
3416 
3417 	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3418 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3419 		    bfa_ioc_portid(ablk->ioc));
3420 	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3421 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3422 
3423 	return BFA_STATUS_OK;
3424 }
3425 
3426 bfa_status_t
3427 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3428 		u8 port, enum bfi_pcifn_class personality,
3429 		u16 bw_min, u16 bw_max,
3430 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3431 {
3432 	struct bfi_ablk_h2i_pf_req_s *m;
3433 
3434 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3435 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3436 		return BFA_STATUS_IOC_FAILURE;
3437 	}
3438 
3439 	if (ablk->busy) {
3440 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3441 		return  BFA_STATUS_DEVBUSY;
3442 	}
3443 
3444 	ablk->pcifn = pcifn;
3445 	ablk->cbfn = cbfn;
3446 	ablk->cbarg = cbarg;
3447 	ablk->busy  = BFA_TRUE;
3448 
3449 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3450 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3451 		    bfa_ioc_portid(ablk->ioc));
3452 	m->pers = cpu_to_be16((u16)personality);
3453 	m->bw_min = cpu_to_be16(bw_min);
3454 	m->bw_max = cpu_to_be16(bw_max);
3455 	m->port = port;
3456 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3457 
3458 	return BFA_STATUS_OK;
3459 }
3460 
3461 bfa_status_t
3462 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3463 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3464 {
3465 	struct bfi_ablk_h2i_pf_req_s *m;
3466 
3467 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3468 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3469 		return BFA_STATUS_IOC_FAILURE;
3470 	}
3471 
3472 	if (ablk->busy) {
3473 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3474 		return  BFA_STATUS_DEVBUSY;
3475 	}
3476 
3477 	ablk->cbfn  = cbfn;
3478 	ablk->cbarg = cbarg;
3479 	ablk->busy  = BFA_TRUE;
3480 
3481 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3482 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3483 		    bfa_ioc_portid(ablk->ioc));
3484 	m->pcifn = (u8)pcifn;
3485 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3486 
3487 	return BFA_STATUS_OK;
3488 }
3489 
3490 bfa_status_t
3491 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3492 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3493 {
3494 	struct bfi_ablk_h2i_cfg_req_s *m;
3495 
3496 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3497 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3498 		return BFA_STATUS_IOC_FAILURE;
3499 	}
3500 
3501 	if (ablk->busy) {
3502 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3503 		return  BFA_STATUS_DEVBUSY;
3504 	}
3505 
3506 	ablk->cbfn  = cbfn;
3507 	ablk->cbarg = cbarg;
3508 	ablk->busy  = BFA_TRUE;
3509 
3510 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3511 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3512 		    bfa_ioc_portid(ablk->ioc));
3513 	m->mode = (u8)mode;
3514 	m->max_pf = (u8)max_pf;
3515 	m->max_vf = (u8)max_vf;
3516 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3517 
3518 	return BFA_STATUS_OK;
3519 }
3520 
3521 bfa_status_t
3522 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3523 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3524 {
3525 	struct bfi_ablk_h2i_cfg_req_s *m;
3526 
3527 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3528 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3529 		return BFA_STATUS_IOC_FAILURE;
3530 	}
3531 
3532 	if (ablk->busy) {
3533 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3534 		return  BFA_STATUS_DEVBUSY;
3535 	}
3536 
3537 	ablk->cbfn  = cbfn;
3538 	ablk->cbarg = cbarg;
3539 	ablk->busy  = BFA_TRUE;
3540 
3541 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3542 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3543 		bfa_ioc_portid(ablk->ioc));
3544 	m->port = (u8)port;
3545 	m->mode = (u8)mode;
3546 	m->max_pf = (u8)max_pf;
3547 	m->max_vf = (u8)max_vf;
3548 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3549 
3550 	return BFA_STATUS_OK;
3551 }
3552 
3553 bfa_status_t
3554 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3555 		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3556 {
3557 	struct bfi_ablk_h2i_pf_req_s *m;
3558 
3559 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3560 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3561 		return BFA_STATUS_IOC_FAILURE;
3562 	}
3563 
3564 	if (ablk->busy) {
3565 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3566 		return  BFA_STATUS_DEVBUSY;
3567 	}
3568 
3569 	ablk->cbfn  = cbfn;
3570 	ablk->cbarg = cbarg;
3571 	ablk->busy  = BFA_TRUE;
3572 
3573 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3574 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3575 		bfa_ioc_portid(ablk->ioc));
3576 	m->pcifn = (u8)pcifn;
3577 	m->bw_min = cpu_to_be16(bw_min);
3578 	m->bw_max = cpu_to_be16(bw_max);
3579 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3580 
3581 	return BFA_STATUS_OK;
3582 }
3583 
3584 bfa_status_t
3585 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3586 {
3587 	struct bfi_ablk_h2i_optrom_s *m;
3588 
3589 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3590 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3591 		return BFA_STATUS_IOC_FAILURE;
3592 	}
3593 
3594 	if (ablk->busy) {
3595 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3596 		return  BFA_STATUS_DEVBUSY;
3597 	}
3598 
3599 	ablk->cbfn  = cbfn;
3600 	ablk->cbarg = cbarg;
3601 	ablk->busy  = BFA_TRUE;
3602 
3603 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3604 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3605 		bfa_ioc_portid(ablk->ioc));
3606 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3607 
3608 	return BFA_STATUS_OK;
3609 }
3610 
3611 bfa_status_t
3612 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3613 {
3614 	struct bfi_ablk_h2i_optrom_s *m;
3615 
3616 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3617 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3618 		return BFA_STATUS_IOC_FAILURE;
3619 	}
3620 
3621 	if (ablk->busy) {
3622 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3623 		return  BFA_STATUS_DEVBUSY;
3624 	}
3625 
3626 	ablk->cbfn  = cbfn;
3627 	ablk->cbarg = cbarg;
3628 	ablk->busy  = BFA_TRUE;
3629 
3630 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3631 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3632 		bfa_ioc_portid(ablk->ioc));
3633 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3634 
3635 	return BFA_STATUS_OK;
3636 }
3637 
3638 /*
3639  *	SFP module specific
3640  */
3641 
3642 /* forward declarations */
3643 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3644 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3645 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3646 				enum bfa_port_speed portspeed);
3647 
3648 static void
3649 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3650 {
3651 	bfa_trc(sfp, sfp->lock);
3652 	if (sfp->cbfn)
3653 		sfp->cbfn(sfp->cbarg, sfp->status);
3654 	sfp->lock = 0;
3655 	sfp->cbfn = NULL;
3656 }
3657 
3658 static void
3659 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3660 {
3661 	bfa_trc(sfp, sfp->portspeed);
3662 	if (sfp->media) {
3663 		bfa_sfp_media_get(sfp);
3664 		if (sfp->state_query_cbfn)
3665 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3666 					sfp->status);
3667 		sfp->media = NULL;
3668 	}
3669 
3670 	if (sfp->portspeed) {
3671 		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3672 		if (sfp->state_query_cbfn)
3673 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3674 					sfp->status);
3675 		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3676 	}
3677 
3678 	sfp->state_query_lock = 0;
3679 	sfp->state_query_cbfn = NULL;
3680 }
3681 
3682 /*
3683  *	IOC event handler.
3684  */
3685 static void
3686 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3687 {
3688 	struct bfa_sfp_s *sfp = sfp_arg;
3689 
3690 	bfa_trc(sfp, event);
3691 	bfa_trc(sfp, sfp->lock);
3692 	bfa_trc(sfp, sfp->state_query_lock);
3693 
3694 	switch (event) {
3695 	case BFA_IOC_E_DISABLED:
3696 	case BFA_IOC_E_FAILED:
3697 		if (sfp->lock) {
3698 			sfp->status = BFA_STATUS_IOC_FAILURE;
3699 			bfa_cb_sfp_show(sfp);
3700 		}
3701 
3702 		if (sfp->state_query_lock) {
3703 			sfp->status = BFA_STATUS_IOC_FAILURE;
3704 			bfa_cb_sfp_state_query(sfp);
3705 		}
3706 		break;
3707 
3708 	default:
3709 		break;
3710 	}
3711 }
3712 
3713 /*
3714  * SFP's State Change Notification post to AEN
3715  */
3716 static void
3717 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3718 {
3719 	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3720 	struct bfa_aen_entry_s  *aen_entry;
3721 	enum bfa_port_aen_event aen_evt = 0;
3722 
3723 	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3724 		      ((u64)rsp->event));
3725 
3726 	bfad_get_aen_entry(bfad, aen_entry);
3727 	if (!aen_entry)
3728 		return;
3729 
3730 	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3731 	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3732 	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3733 
3734 	switch (rsp->event) {
3735 	case BFA_SFP_SCN_INSERTED:
3736 		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3737 		break;
3738 	case BFA_SFP_SCN_REMOVED:
3739 		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3740 		break;
3741 	case BFA_SFP_SCN_FAILED:
3742 		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3743 		break;
3744 	case BFA_SFP_SCN_UNSUPPORT:
3745 		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3746 		break;
3747 	case BFA_SFP_SCN_POM:
3748 		aen_evt = BFA_PORT_AEN_SFP_POM;
3749 		aen_entry->aen_data.port.level = rsp->pomlvl;
3750 		break;
3751 	default:
3752 		bfa_trc(sfp, rsp->event);
3753 		WARN_ON(1);
3754 	}
3755 
3756 	/* Send the AEN notification */
3757 	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3758 				  BFA_AEN_CAT_PORT, aen_evt);
3759 }
3760 
3761 /*
3762  *	SFP get data send
3763  */
3764 static void
3765 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3766 {
3767 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3768 
3769 	bfa_trc(sfp, req->memtype);
3770 
3771 	/* build host command */
3772 	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3773 			bfa_ioc_portid(sfp->ioc));
3774 
3775 	/* send mbox cmd */
3776 	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3777 }
3778 
3779 /*
3780  *	SFP is valid, read sfp data
3781  */
3782 static void
3783 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3784 {
3785 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3786 
3787 	WARN_ON(sfp->lock != 0);
3788 	bfa_trc(sfp, sfp->state);
3789 
3790 	sfp->lock = 1;
3791 	sfp->memtype = memtype;
3792 	req->memtype = memtype;
3793 
3794 	/* Setup SG list */
3795 	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3796 
3797 	bfa_sfp_getdata_send(sfp);
3798 }
3799 
3800 /*
3801  *	SFP scn handler
3802  */
3803 static void
3804 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3805 {
3806 	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3807 
3808 	switch (rsp->event) {
3809 	case BFA_SFP_SCN_INSERTED:
3810 		sfp->state = BFA_SFP_STATE_INSERTED;
3811 		sfp->data_valid = 0;
3812 		bfa_sfp_scn_aen_post(sfp, rsp);
3813 		break;
3814 	case BFA_SFP_SCN_REMOVED:
3815 		sfp->state = BFA_SFP_STATE_REMOVED;
3816 		sfp->data_valid = 0;
3817 		bfa_sfp_scn_aen_post(sfp, rsp);
3818 		break;
3819 	case BFA_SFP_SCN_FAILED:
3820 		sfp->state = BFA_SFP_STATE_FAILED;
3821 		sfp->data_valid = 0;
3822 		bfa_sfp_scn_aen_post(sfp, rsp);
3823 		break;
3824 	case BFA_SFP_SCN_UNSUPPORT:
3825 		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3826 		bfa_sfp_scn_aen_post(sfp, rsp);
3827 		if (!sfp->lock)
3828 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3829 		break;
3830 	case BFA_SFP_SCN_POM:
3831 		bfa_sfp_scn_aen_post(sfp, rsp);
3832 		break;
3833 	case BFA_SFP_SCN_VALID:
3834 		sfp->state = BFA_SFP_STATE_VALID;
3835 		if (!sfp->lock)
3836 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3837 		break;
3838 	default:
3839 		bfa_trc(sfp, rsp->event);
3840 		WARN_ON(1);
3841 	}
3842 }
3843 
3844 /*
3845  * SFP show complete
3846  */
3847 static void
3848 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3849 {
3850 	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3851 
3852 	if (!sfp->lock) {
3853 		/*
3854 		 * receiving response after ioc failure
3855 		 */
3856 		bfa_trc(sfp, sfp->lock);
3857 		return;
3858 	}
3859 
3860 	bfa_trc(sfp, rsp->status);
3861 	if (rsp->status == BFA_STATUS_OK) {
3862 		sfp->data_valid = 1;
3863 		if (sfp->state == BFA_SFP_STATE_VALID)
3864 			sfp->status = BFA_STATUS_OK;
3865 		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3866 			sfp->status = BFA_STATUS_SFP_UNSUPP;
3867 		else
3868 			bfa_trc(sfp, sfp->state);
3869 	} else {
3870 		sfp->data_valid = 0;
3871 		sfp->status = rsp->status;
3872 		/* sfpshow shouldn't change sfp state */
3873 	}
3874 
3875 	bfa_trc(sfp, sfp->memtype);
3876 	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3877 		bfa_trc(sfp, sfp->data_valid);
3878 		if (sfp->data_valid) {
3879 			u32	size = sizeof(struct sfp_mem_s);
3880 			u8 *des = (u8 *)(sfp->sfpmem);
3881 			memcpy(des, sfp->dbuf_kva, size);
3882 		}
3883 		/*
3884 		 * Queue completion callback.
3885 		 */
3886 		bfa_cb_sfp_show(sfp);
3887 	} else
3888 		sfp->lock = 0;
3889 
3890 	bfa_trc(sfp, sfp->state_query_lock);
3891 	if (sfp->state_query_lock) {
3892 		sfp->state = rsp->state;
3893 		/* Complete callback */
3894 		bfa_cb_sfp_state_query(sfp);
3895 	}
3896 }
3897 
3898 /*
3899  *	SFP query fw sfp state
3900  */
3901 static void
3902 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3903 {
3904 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3905 
3906 	/* Should not be doing query if not in _INIT state */
3907 	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3908 	WARN_ON(sfp->state_query_lock != 0);
3909 	bfa_trc(sfp, sfp->state);
3910 
3911 	sfp->state_query_lock = 1;
3912 	req->memtype = 0;
3913 
3914 	if (!sfp->lock)
3915 		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3916 }
3917 
3918 static void
3919 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3920 {
3921 	enum bfa_defs_sfp_media_e *media = sfp->media;
3922 
3923 	*media = BFA_SFP_MEDIA_UNKNOWN;
3924 
3925 	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3926 		*media = BFA_SFP_MEDIA_UNSUPPORT;
3927 	else if (sfp->state == BFA_SFP_STATE_VALID) {
3928 		union sfp_xcvr_e10g_code_u e10g;
3929 		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3930 		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3931 				(sfpmem->srlid_base.xcvr[5] >> 1);
3932 
3933 		e10g.b = sfpmem->srlid_base.xcvr[0];
3934 		bfa_trc(sfp, e10g.b);
3935 		bfa_trc(sfp, xmtr_tech);
3936 		/* check fc transmitter tech */
3937 		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3938 		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3939 		    (xmtr_tech & SFP_XMTR_TECH_CA))
3940 			*media = BFA_SFP_MEDIA_CU;
3941 		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3942 			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3943 			*media = BFA_SFP_MEDIA_EL;
3944 		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3945 			 (xmtr_tech & SFP_XMTR_TECH_LC))
3946 			*media = BFA_SFP_MEDIA_LW;
3947 		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3948 			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3949 			 (xmtr_tech & SFP_XMTR_TECH_SA))
3950 			*media = BFA_SFP_MEDIA_SW;
3951 		/* Check 10G Ethernet Compilance code */
3952 		else if (e10g.r.e10g_sr)
3953 			*media = BFA_SFP_MEDIA_SW;
3954 		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3955 			*media = BFA_SFP_MEDIA_LW;
3956 		else if (e10g.r.e10g_unall)
3957 			*media = BFA_SFP_MEDIA_UNKNOWN;
3958 		else
3959 			bfa_trc(sfp, 0);
3960 	} else
3961 		bfa_trc(sfp, sfp->state);
3962 }
3963 
3964 static bfa_status_t
3965 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3966 {
3967 	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3968 	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3969 	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3970 	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3971 
3972 	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3973 		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3974 			return BFA_STATUS_OK;
3975 		else {
3976 			bfa_trc(sfp, e10g.b);
3977 			return BFA_STATUS_UNSUPP_SPEED;
3978 		}
3979 	}
3980 	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3981 	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3982 	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3983 	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3984 	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3985 		return BFA_STATUS_OK;
3986 	else {
3987 		bfa_trc(sfp, portspeed);
3988 		bfa_trc(sfp, fc3.b);
3989 		bfa_trc(sfp, e10g.b);
3990 		return BFA_STATUS_UNSUPP_SPEED;
3991 	}
3992 }
3993 
3994 /*
3995  *	SFP hmbox handler
3996  */
3997 void
3998 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3999 {
4000 	struct bfa_sfp_s *sfp = sfparg;
4001 
4002 	switch (msg->mh.msg_id) {
4003 	case BFI_SFP_I2H_SHOW:
4004 		bfa_sfp_show_comp(sfp, msg);
4005 		break;
4006 
4007 	case BFI_SFP_I2H_SCN:
4008 		bfa_sfp_scn(sfp, msg);
4009 		break;
4010 
4011 	default:
4012 		bfa_trc(sfp, msg->mh.msg_id);
4013 		WARN_ON(1);
4014 	}
4015 }
4016 
4017 /*
4018  *	Return DMA memory needed by sfp module.
4019  */
4020 u32
4021 bfa_sfp_meminfo(void)
4022 {
4023 	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4024 }
4025 
4026 /*
4027  *	Attach virtual and physical memory for SFP.
4028  */
4029 void
4030 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4031 		struct bfa_trc_mod_s *trcmod)
4032 {
4033 	sfp->dev = dev;
4034 	sfp->ioc = ioc;
4035 	sfp->trcmod = trcmod;
4036 
4037 	sfp->cbfn = NULL;
4038 	sfp->cbarg = NULL;
4039 	sfp->sfpmem = NULL;
4040 	sfp->lock = 0;
4041 	sfp->data_valid = 0;
4042 	sfp->state = BFA_SFP_STATE_INIT;
4043 	sfp->state_query_lock = 0;
4044 	sfp->state_query_cbfn = NULL;
4045 	sfp->state_query_cbarg = NULL;
4046 	sfp->media = NULL;
4047 	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4048 	sfp->is_elb = BFA_FALSE;
4049 
4050 	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4051 	bfa_q_qe_init(&sfp->ioc_notify);
4052 	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4053 	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4054 }
4055 
4056 /*
4057  *	Claim Memory for SFP
4058  */
4059 void
4060 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4061 {
4062 	sfp->dbuf_kva   = dm_kva;
4063 	sfp->dbuf_pa    = dm_pa;
4064 	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4065 
4066 	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4067 	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4068 }
4069 
4070 /*
4071  * Show SFP eeprom content
4072  *
4073  * @param[in] sfp   - bfa sfp module
4074  *
4075  * @param[out] sfpmem - sfp eeprom data
4076  *
4077  */
4078 bfa_status_t
4079 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4080 		bfa_cb_sfp_t cbfn, void *cbarg)
4081 {
4082 
4083 	if (!bfa_ioc_is_operational(sfp->ioc)) {
4084 		bfa_trc(sfp, 0);
4085 		return BFA_STATUS_IOC_NON_OP;
4086 	}
4087 
4088 	if (sfp->lock) {
4089 		bfa_trc(sfp, 0);
4090 		return BFA_STATUS_DEVBUSY;
4091 	}
4092 
4093 	sfp->cbfn = cbfn;
4094 	sfp->cbarg = cbarg;
4095 	sfp->sfpmem = sfpmem;
4096 
4097 	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4098 	return BFA_STATUS_OK;
4099 }
4100 
4101 /*
4102  * Return SFP Media type
4103  *
4104  * @param[in] sfp   - bfa sfp module
4105  *
4106  * @param[out] media - port speed from user
4107  *
4108  */
4109 bfa_status_t
4110 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4111 		bfa_cb_sfp_t cbfn, void *cbarg)
4112 {
4113 	if (!bfa_ioc_is_operational(sfp->ioc)) {
4114 		bfa_trc(sfp, 0);
4115 		return BFA_STATUS_IOC_NON_OP;
4116 	}
4117 
4118 	sfp->media = media;
4119 	if (sfp->state == BFA_SFP_STATE_INIT) {
4120 		if (sfp->state_query_lock) {
4121 			bfa_trc(sfp, 0);
4122 			return BFA_STATUS_DEVBUSY;
4123 		} else {
4124 			sfp->state_query_cbfn = cbfn;
4125 			sfp->state_query_cbarg = cbarg;
4126 			bfa_sfp_state_query(sfp);
4127 			return BFA_STATUS_SFP_NOT_READY;
4128 		}
4129 	}
4130 
4131 	bfa_sfp_media_get(sfp);
4132 	return BFA_STATUS_OK;
4133 }
4134 
4135 /*
4136  * Check if user set port speed is allowed by the SFP
4137  *
4138  * @param[in] sfp   - bfa sfp module
4139  * @param[in] portspeed - port speed from user
4140  *
4141  */
4142 bfa_status_t
4143 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4144 		bfa_cb_sfp_t cbfn, void *cbarg)
4145 {
4146 	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4147 
4148 	if (!bfa_ioc_is_operational(sfp->ioc))
4149 		return BFA_STATUS_IOC_NON_OP;
4150 
4151 	/* For Mezz card, all speed is allowed */
4152 	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4153 		return BFA_STATUS_OK;
4154 
4155 	/* Check SFP state */
4156 	sfp->portspeed = portspeed;
4157 	if (sfp->state == BFA_SFP_STATE_INIT) {
4158 		if (sfp->state_query_lock) {
4159 			bfa_trc(sfp, 0);
4160 			return BFA_STATUS_DEVBUSY;
4161 		} else {
4162 			sfp->state_query_cbfn = cbfn;
4163 			sfp->state_query_cbarg = cbarg;
4164 			bfa_sfp_state_query(sfp);
4165 			return BFA_STATUS_SFP_NOT_READY;
4166 		}
4167 	}
4168 
4169 	if (sfp->state == BFA_SFP_STATE_REMOVED ||
4170 	    sfp->state == BFA_SFP_STATE_FAILED) {
4171 		bfa_trc(sfp, sfp->state);
4172 		return BFA_STATUS_NO_SFP_DEV;
4173 	}
4174 
4175 	if (sfp->state == BFA_SFP_STATE_INSERTED) {
4176 		bfa_trc(sfp, sfp->state);
4177 		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4178 	}
4179 
4180 	/* For eloopback, all speed is allowed */
4181 	if (sfp->is_elb)
4182 		return BFA_STATUS_OK;
4183 
4184 	return bfa_sfp_speed_valid(sfp, portspeed);
4185 }
4186 
4187 /*
4188  *	Flash module specific
4189  */
4190 
4191 /*
4192  * FLASH DMA buffer should be big enough to hold both MFG block and
4193  * asic block(64k) at the same time and also should be 2k aligned to
4194  * avoid write segement to cross sector boundary.
4195  */
4196 #define BFA_FLASH_SEG_SZ	2048
4197 #define BFA_FLASH_DMA_BUF_SZ	\
4198 	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4199 
4200 static void
4201 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4202 			int inst, int type)
4203 {
4204 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4205 	struct bfa_aen_entry_s  *aen_entry;
4206 
4207 	bfad_get_aen_entry(bfad, aen_entry);
4208 	if (!aen_entry)
4209 		return;
4210 
4211 	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4212 	aen_entry->aen_data.audit.partition_inst = inst;
4213 	aen_entry->aen_data.audit.partition_type = type;
4214 
4215 	/* Send the AEN notification */
4216 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4217 				  BFA_AEN_CAT_AUDIT, event);
4218 }
4219 
4220 static void
4221 bfa_flash_cb(struct bfa_flash_s *flash)
4222 {
4223 	flash->op_busy = 0;
4224 	if (flash->cbfn)
4225 		flash->cbfn(flash->cbarg, flash->status);
4226 }
4227 
4228 static void
4229 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4230 {
4231 	struct bfa_flash_s	*flash = cbarg;
4232 
4233 	bfa_trc(flash, event);
4234 	switch (event) {
4235 	case BFA_IOC_E_DISABLED:
4236 	case BFA_IOC_E_FAILED:
4237 		if (flash->op_busy) {
4238 			flash->status = BFA_STATUS_IOC_FAILURE;
4239 			flash->cbfn(flash->cbarg, flash->status);
4240 			flash->op_busy = 0;
4241 		}
4242 		break;
4243 
4244 	default:
4245 		break;
4246 	}
4247 }
4248 
4249 /*
4250  * Send flash attribute query request.
4251  *
4252  * @param[in] cbarg - callback argument
4253  */
4254 static void
4255 bfa_flash_query_send(void *cbarg)
4256 {
4257 	struct bfa_flash_s *flash = cbarg;
4258 	struct bfi_flash_query_req_s *msg =
4259 			(struct bfi_flash_query_req_s *) flash->mb.msg;
4260 
4261 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4262 		bfa_ioc_portid(flash->ioc));
4263 	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4264 		flash->dbuf_pa);
4265 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4266 }
4267 
4268 /*
4269  * Send flash write request.
4270  *
4271  * @param[in] cbarg - callback argument
4272  */
4273 static void
4274 bfa_flash_write_send(struct bfa_flash_s *flash)
4275 {
4276 	struct bfi_flash_write_req_s *msg =
4277 			(struct bfi_flash_write_req_s *) flash->mb.msg;
4278 	u32	len;
4279 
4280 	msg->type = be32_to_cpu(flash->type);
4281 	msg->instance = flash->instance;
4282 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4283 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4284 		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4285 	msg->length = be32_to_cpu(len);
4286 
4287 	/* indicate if it's the last msg of the whole write operation */
4288 	msg->last = (len == flash->residue) ? 1 : 0;
4289 
4290 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4291 			bfa_ioc_portid(flash->ioc));
4292 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4293 	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4294 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4295 
4296 	flash->residue -= len;
4297 	flash->offset += len;
4298 }
4299 
4300 /*
4301  * Send flash read request.
4302  *
4303  * @param[in] cbarg - callback argument
4304  */
4305 static void
4306 bfa_flash_read_send(void *cbarg)
4307 {
4308 	struct bfa_flash_s *flash = cbarg;
4309 	struct bfi_flash_read_req_s *msg =
4310 			(struct bfi_flash_read_req_s *) flash->mb.msg;
4311 	u32	len;
4312 
4313 	msg->type = be32_to_cpu(flash->type);
4314 	msg->instance = flash->instance;
4315 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4316 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4317 			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4318 	msg->length = be32_to_cpu(len);
4319 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4320 		bfa_ioc_portid(flash->ioc));
4321 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4322 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4323 }
4324 
4325 /*
4326  * Send flash erase request.
4327  *
4328  * @param[in] cbarg - callback argument
4329  */
4330 static void
4331 bfa_flash_erase_send(void *cbarg)
4332 {
4333 	struct bfa_flash_s *flash = cbarg;
4334 	struct bfi_flash_erase_req_s *msg =
4335 			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4336 
4337 	msg->type = be32_to_cpu(flash->type);
4338 	msg->instance = flash->instance;
4339 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4340 			bfa_ioc_portid(flash->ioc));
4341 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4342 }
4343 
4344 /*
4345  * Process flash response messages upon receiving interrupts.
4346  *
4347  * @param[in] flasharg - flash structure
4348  * @param[in] msg - message structure
4349  */
4350 static void
4351 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4352 {
4353 	struct bfa_flash_s *flash = flasharg;
4354 	u32	status;
4355 
4356 	union {
4357 		struct bfi_flash_query_rsp_s *query;
4358 		struct bfi_flash_erase_rsp_s *erase;
4359 		struct bfi_flash_write_rsp_s *write;
4360 		struct bfi_flash_read_rsp_s *read;
4361 		struct bfi_flash_event_s *event;
4362 		struct bfi_mbmsg_s   *msg;
4363 	} m;
4364 
4365 	m.msg = msg;
4366 	bfa_trc(flash, msg->mh.msg_id);
4367 
4368 	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4369 		/* receiving response after ioc failure */
4370 		bfa_trc(flash, 0x9999);
4371 		return;
4372 	}
4373 
4374 	switch (msg->mh.msg_id) {
4375 	case BFI_FLASH_I2H_QUERY_RSP:
4376 		status = be32_to_cpu(m.query->status);
4377 		bfa_trc(flash, status);
4378 		if (status == BFA_STATUS_OK) {
4379 			u32	i;
4380 			struct bfa_flash_attr_s *attr, *f;
4381 
4382 			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4383 			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4384 			attr->status = be32_to_cpu(f->status);
4385 			attr->npart = be32_to_cpu(f->npart);
4386 			bfa_trc(flash, attr->status);
4387 			bfa_trc(flash, attr->npart);
4388 			for (i = 0; i < attr->npart; i++) {
4389 				attr->part[i].part_type =
4390 					be32_to_cpu(f->part[i].part_type);
4391 				attr->part[i].part_instance =
4392 					be32_to_cpu(f->part[i].part_instance);
4393 				attr->part[i].part_off =
4394 					be32_to_cpu(f->part[i].part_off);
4395 				attr->part[i].part_size =
4396 					be32_to_cpu(f->part[i].part_size);
4397 				attr->part[i].part_len =
4398 					be32_to_cpu(f->part[i].part_len);
4399 				attr->part[i].part_status =
4400 					be32_to_cpu(f->part[i].part_status);
4401 			}
4402 		}
4403 		flash->status = status;
4404 		bfa_flash_cb(flash);
4405 		break;
4406 	case BFI_FLASH_I2H_ERASE_RSP:
4407 		status = be32_to_cpu(m.erase->status);
4408 		bfa_trc(flash, status);
4409 		flash->status = status;
4410 		bfa_flash_cb(flash);
4411 		break;
4412 	case BFI_FLASH_I2H_WRITE_RSP:
4413 		status = be32_to_cpu(m.write->status);
4414 		bfa_trc(flash, status);
4415 		if (status != BFA_STATUS_OK || flash->residue == 0) {
4416 			flash->status = status;
4417 			bfa_flash_cb(flash);
4418 		} else {
4419 			bfa_trc(flash, flash->offset);
4420 			bfa_flash_write_send(flash);
4421 		}
4422 		break;
4423 	case BFI_FLASH_I2H_READ_RSP:
4424 		status = be32_to_cpu(m.read->status);
4425 		bfa_trc(flash, status);
4426 		if (status != BFA_STATUS_OK) {
4427 			flash->status = status;
4428 			bfa_flash_cb(flash);
4429 		} else {
4430 			u32 len = be32_to_cpu(m.read->length);
4431 			bfa_trc(flash, flash->offset);
4432 			bfa_trc(flash, len);
4433 			memcpy(flash->ubuf + flash->offset,
4434 				flash->dbuf_kva, len);
4435 			flash->residue -= len;
4436 			flash->offset += len;
4437 			if (flash->residue == 0) {
4438 				flash->status = status;
4439 				bfa_flash_cb(flash);
4440 			} else
4441 				bfa_flash_read_send(flash);
4442 		}
4443 		break;
4444 	case BFI_FLASH_I2H_BOOT_VER_RSP:
4445 		break;
4446 	case BFI_FLASH_I2H_EVENT:
4447 		status = be32_to_cpu(m.event->status);
4448 		bfa_trc(flash, status);
4449 		if (status == BFA_STATUS_BAD_FWCFG)
4450 			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4451 		else if (status == BFA_STATUS_INVALID_VENDOR) {
4452 			u32 param;
4453 			param = be32_to_cpu(m.event->param);
4454 			bfa_trc(flash, param);
4455 			bfa_ioc_aen_post(flash->ioc,
4456 				BFA_IOC_AEN_INVALID_VENDOR);
4457 		}
4458 		break;
4459 
4460 	default:
4461 		WARN_ON(1);
4462 	}
4463 }
4464 
4465 /*
4466  * Flash memory info API.
4467  *
4468  * @param[in] mincfg - minimal cfg variable
4469  */
4470 u32
4471 bfa_flash_meminfo(bfa_boolean_t mincfg)
4472 {
4473 	/* min driver doesn't need flash */
4474 	if (mincfg)
4475 		return 0;
4476 	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4477 }
4478 
4479 /*
4480  * Flash attach API.
4481  *
4482  * @param[in] flash - flash structure
4483  * @param[in] ioc  - ioc structure
4484  * @param[in] dev  - device structure
4485  * @param[in] trcmod - trace module
4486  * @param[in] logmod - log module
4487  */
4488 void
4489 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4490 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4491 {
4492 	flash->ioc = ioc;
4493 	flash->trcmod = trcmod;
4494 	flash->cbfn = NULL;
4495 	flash->cbarg = NULL;
4496 	flash->op_busy = 0;
4497 
4498 	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4499 	bfa_q_qe_init(&flash->ioc_notify);
4500 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4501 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4502 
4503 	/* min driver doesn't need flash */
4504 	if (mincfg) {
4505 		flash->dbuf_kva = NULL;
4506 		flash->dbuf_pa = 0;
4507 	}
4508 }
4509 
4510 /*
4511  * Claim memory for flash
4512  *
4513  * @param[in] flash - flash structure
4514  * @param[in] dm_kva - pointer to virtual memory address
4515  * @param[in] dm_pa - physical memory address
4516  * @param[in] mincfg - minimal cfg variable
4517  */
4518 void
4519 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4520 		bfa_boolean_t mincfg)
4521 {
4522 	if (mincfg)
4523 		return;
4524 
4525 	flash->dbuf_kva = dm_kva;
4526 	flash->dbuf_pa = dm_pa;
4527 	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4528 	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4529 	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4530 }
4531 
4532 /*
4533  * Get flash attribute.
4534  *
4535  * @param[in] flash - flash structure
4536  * @param[in] attr - flash attribute structure
4537  * @param[in] cbfn - callback function
4538  * @param[in] cbarg - callback argument
4539  *
4540  * Return status.
4541  */
4542 bfa_status_t
4543 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4544 		bfa_cb_flash_t cbfn, void *cbarg)
4545 {
4546 	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4547 
4548 	if (!bfa_ioc_is_operational(flash->ioc))
4549 		return BFA_STATUS_IOC_NON_OP;
4550 
4551 	if (flash->op_busy) {
4552 		bfa_trc(flash, flash->op_busy);
4553 		return BFA_STATUS_DEVBUSY;
4554 	}
4555 
4556 	flash->op_busy = 1;
4557 	flash->cbfn = cbfn;
4558 	flash->cbarg = cbarg;
4559 	flash->ubuf = (u8 *) attr;
4560 	bfa_flash_query_send(flash);
4561 
4562 	return BFA_STATUS_OK;
4563 }
4564 
4565 /*
4566  * Erase flash partition.
4567  *
4568  * @param[in] flash - flash structure
4569  * @param[in] type - flash partition type
4570  * @param[in] instance - flash partition instance
4571  * @param[in] cbfn - callback function
4572  * @param[in] cbarg - callback argument
4573  *
4574  * Return status.
4575  */
4576 bfa_status_t
4577 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4578 		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4579 {
4580 	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4581 	bfa_trc(flash, type);
4582 	bfa_trc(flash, instance);
4583 
4584 	if (!bfa_ioc_is_operational(flash->ioc))
4585 		return BFA_STATUS_IOC_NON_OP;
4586 
4587 	if (flash->op_busy) {
4588 		bfa_trc(flash, flash->op_busy);
4589 		return BFA_STATUS_DEVBUSY;
4590 	}
4591 
4592 	flash->op_busy = 1;
4593 	flash->cbfn = cbfn;
4594 	flash->cbarg = cbarg;
4595 	flash->type = type;
4596 	flash->instance = instance;
4597 
4598 	bfa_flash_erase_send(flash);
4599 	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4600 				instance, type);
4601 	return BFA_STATUS_OK;
4602 }
4603 
4604 /*
4605  * Update flash partition.
4606  *
4607  * @param[in] flash - flash structure
4608  * @param[in] type - flash partition type
4609  * @param[in] instance - flash partition instance
4610  * @param[in] buf - update data buffer
4611  * @param[in] len - data buffer length
4612  * @param[in] offset - offset relative to the partition starting address
4613  * @param[in] cbfn - callback function
4614  * @param[in] cbarg - callback argument
4615  *
4616  * Return status.
4617  */
4618 bfa_status_t
4619 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4620 		u8 instance, void *buf, u32 len, u32 offset,
4621 		bfa_cb_flash_t cbfn, void *cbarg)
4622 {
4623 	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4624 	bfa_trc(flash, type);
4625 	bfa_trc(flash, instance);
4626 	bfa_trc(flash, len);
4627 	bfa_trc(flash, offset);
4628 
4629 	if (!bfa_ioc_is_operational(flash->ioc))
4630 		return BFA_STATUS_IOC_NON_OP;
4631 
4632 	/*
4633 	 * 'len' must be in word (4-byte) boundary
4634 	 * 'offset' must be in sector (16kb) boundary
4635 	 */
4636 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4637 		return BFA_STATUS_FLASH_BAD_LEN;
4638 
4639 	if (type == BFA_FLASH_PART_MFG)
4640 		return BFA_STATUS_EINVAL;
4641 
4642 	if (flash->op_busy) {
4643 		bfa_trc(flash, flash->op_busy);
4644 		return BFA_STATUS_DEVBUSY;
4645 	}
4646 
4647 	flash->op_busy = 1;
4648 	flash->cbfn = cbfn;
4649 	flash->cbarg = cbarg;
4650 	flash->type = type;
4651 	flash->instance = instance;
4652 	flash->residue = len;
4653 	flash->offset = 0;
4654 	flash->addr_off = offset;
4655 	flash->ubuf = buf;
4656 
4657 	bfa_flash_write_send(flash);
4658 	return BFA_STATUS_OK;
4659 }
4660 
4661 /*
4662  * Read flash partition.
4663  *
4664  * @param[in] flash - flash structure
4665  * @param[in] type - flash partition type
4666  * @param[in] instance - flash partition instance
4667  * @param[in] buf - read data buffer
4668  * @param[in] len - data buffer length
4669  * @param[in] offset - offset relative to the partition starting address
4670  * @param[in] cbfn - callback function
4671  * @param[in] cbarg - callback argument
4672  *
4673  * Return status.
4674  */
4675 bfa_status_t
4676 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4677 		u8 instance, void *buf, u32 len, u32 offset,
4678 		bfa_cb_flash_t cbfn, void *cbarg)
4679 {
4680 	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4681 	bfa_trc(flash, type);
4682 	bfa_trc(flash, instance);
4683 	bfa_trc(flash, len);
4684 	bfa_trc(flash, offset);
4685 
4686 	if (!bfa_ioc_is_operational(flash->ioc))
4687 		return BFA_STATUS_IOC_NON_OP;
4688 
4689 	/*
4690 	 * 'len' must be in word (4-byte) boundary
4691 	 * 'offset' must be in sector (16kb) boundary
4692 	 */
4693 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4694 		return BFA_STATUS_FLASH_BAD_LEN;
4695 
4696 	if (flash->op_busy) {
4697 		bfa_trc(flash, flash->op_busy);
4698 		return BFA_STATUS_DEVBUSY;
4699 	}
4700 
4701 	flash->op_busy = 1;
4702 	flash->cbfn = cbfn;
4703 	flash->cbarg = cbarg;
4704 	flash->type = type;
4705 	flash->instance = instance;
4706 	flash->residue = len;
4707 	flash->offset = 0;
4708 	flash->addr_off = offset;
4709 	flash->ubuf = buf;
4710 	bfa_flash_read_send(flash);
4711 
4712 	return BFA_STATUS_OK;
4713 }
4714 
4715 /*
4716  *	DIAG module specific
4717  */
4718 
4719 #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4720 #define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
4721 
4722 /* IOC event handler */
4723 static void
4724 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4725 {
4726 	struct bfa_diag_s *diag = diag_arg;
4727 
4728 	bfa_trc(diag, event);
4729 	bfa_trc(diag, diag->block);
4730 	bfa_trc(diag, diag->fwping.lock);
4731 	bfa_trc(diag, diag->tsensor.lock);
4732 
4733 	switch (event) {
4734 	case BFA_IOC_E_DISABLED:
4735 	case BFA_IOC_E_FAILED:
4736 		if (diag->fwping.lock) {
4737 			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4738 			diag->fwping.cbfn(diag->fwping.cbarg,
4739 					diag->fwping.status);
4740 			diag->fwping.lock = 0;
4741 		}
4742 
4743 		if (diag->tsensor.lock) {
4744 			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4745 			diag->tsensor.cbfn(diag->tsensor.cbarg,
4746 					   diag->tsensor.status);
4747 			diag->tsensor.lock = 0;
4748 		}
4749 
4750 		if (diag->block) {
4751 			if (diag->timer_active) {
4752 				bfa_timer_stop(&diag->timer);
4753 				diag->timer_active = 0;
4754 			}
4755 
4756 			diag->status = BFA_STATUS_IOC_FAILURE;
4757 			diag->cbfn(diag->cbarg, diag->status);
4758 			diag->block = 0;
4759 		}
4760 		break;
4761 
4762 	default:
4763 		break;
4764 	}
4765 }
4766 
4767 static void
4768 bfa_diag_memtest_done(void *cbarg)
4769 {
4770 	struct bfa_diag_s *diag = cbarg;
4771 	struct bfa_ioc_s  *ioc = diag->ioc;
4772 	struct bfa_diag_memtest_result *res = diag->result;
4773 	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4774 	u32	pgnum, pgoff, i;
4775 
4776 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4777 	pgoff = PSS_SMEM_PGOFF(loff);
4778 
4779 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4780 
4781 	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4782 			 sizeof(u32)); i++) {
4783 		/* read test result from smem */
4784 		*((u32 *) res + i) =
4785 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4786 		loff += sizeof(u32);
4787 	}
4788 
4789 	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4790 	bfa_ioc_reset_fwstate(ioc);
4791 
4792 	res->status = swab32(res->status);
4793 	bfa_trc(diag, res->status);
4794 
4795 	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4796 		diag->status = BFA_STATUS_OK;
4797 	else {
4798 		diag->status = BFA_STATUS_MEMTEST_FAILED;
4799 		res->addr = swab32(res->addr);
4800 		res->exp = swab32(res->exp);
4801 		res->act = swab32(res->act);
4802 		res->err_status = swab32(res->err_status);
4803 		res->err_status1 = swab32(res->err_status1);
4804 		res->err_addr = swab32(res->err_addr);
4805 		bfa_trc(diag, res->addr);
4806 		bfa_trc(diag, res->exp);
4807 		bfa_trc(diag, res->act);
4808 		bfa_trc(diag, res->err_status);
4809 		bfa_trc(diag, res->err_status1);
4810 		bfa_trc(diag, res->err_addr);
4811 	}
4812 	diag->timer_active = 0;
4813 	diag->cbfn(diag->cbarg, diag->status);
4814 	diag->block = 0;
4815 }
4816 
4817 /*
4818  * Firmware ping
4819  */
4820 
4821 /*
4822  * Perform DMA test directly
4823  */
4824 static void
4825 diag_fwping_send(struct bfa_diag_s *diag)
4826 {
4827 	struct bfi_diag_fwping_req_s *fwping_req;
4828 	u32	i;
4829 
4830 	bfa_trc(diag, diag->fwping.dbuf_pa);
4831 
4832 	/* fill DMA area with pattern */
4833 	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4834 		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4835 
4836 	/* Fill mbox msg */
4837 	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4838 
4839 	/* Setup SG list */
4840 	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4841 			diag->fwping.dbuf_pa);
4842 	/* Set up dma count */
4843 	fwping_req->count = cpu_to_be32(diag->fwping.count);
4844 	/* Set up data pattern */
4845 	fwping_req->data = diag->fwping.data;
4846 
4847 	/* build host command */
4848 	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4849 		bfa_ioc_portid(diag->ioc));
4850 
4851 	/* send mbox cmd */
4852 	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4853 }
4854 
4855 static void
4856 diag_fwping_comp(struct bfa_diag_s *diag,
4857 		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4858 {
4859 	u32	rsp_data = diag_rsp->data;
4860 	u8	rsp_dma_status = diag_rsp->dma_status;
4861 
4862 	bfa_trc(diag, rsp_data);
4863 	bfa_trc(diag, rsp_dma_status);
4864 
4865 	if (rsp_dma_status == BFA_STATUS_OK) {
4866 		u32	i, pat;
4867 		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4868 			diag->fwping.data;
4869 		/* Check mbox data */
4870 		if (diag->fwping.data != rsp_data) {
4871 			bfa_trc(diag, rsp_data);
4872 			diag->fwping.result->dmastatus =
4873 					BFA_STATUS_DATACORRUPTED;
4874 			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4875 			diag->fwping.cbfn(diag->fwping.cbarg,
4876 					diag->fwping.status);
4877 			diag->fwping.lock = 0;
4878 			return;
4879 		}
4880 		/* Check dma pattern */
4881 		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4882 			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4883 				bfa_trc(diag, i);
4884 				bfa_trc(diag, pat);
4885 				bfa_trc(diag,
4886 					*((u32 *)diag->fwping.dbuf_kva + i));
4887 				diag->fwping.result->dmastatus =
4888 						BFA_STATUS_DATACORRUPTED;
4889 				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4890 				diag->fwping.cbfn(diag->fwping.cbarg,
4891 						diag->fwping.status);
4892 				diag->fwping.lock = 0;
4893 				return;
4894 			}
4895 		}
4896 		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4897 		diag->fwping.status = BFA_STATUS_OK;
4898 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4899 		diag->fwping.lock = 0;
4900 	} else {
4901 		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4902 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4903 		diag->fwping.lock = 0;
4904 	}
4905 }
4906 
4907 /*
4908  * Temperature Sensor
4909  */
4910 
4911 static void
4912 diag_tempsensor_send(struct bfa_diag_s *diag)
4913 {
4914 	struct bfi_diag_ts_req_s *msg;
4915 
4916 	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4917 	bfa_trc(diag, msg->temp);
4918 	/* build host command */
4919 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4920 		bfa_ioc_portid(diag->ioc));
4921 	/* send mbox cmd */
4922 	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4923 }
4924 
4925 static void
4926 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4927 {
4928 	if (!diag->tsensor.lock) {
4929 		/* receiving response after ioc failure */
4930 		bfa_trc(diag, diag->tsensor.lock);
4931 		return;
4932 	}
4933 
4934 	/*
4935 	 * ASIC junction tempsensor is a reg read operation
4936 	 * it will always return OK
4937 	 */
4938 	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4939 	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4940 	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4941 
4942 	if (rsp->ts_brd) {
4943 		/* tsensor.temp->status is brd_temp status */
4944 		diag->tsensor.temp->status = rsp->status;
4945 		if (rsp->status == BFA_STATUS_OK) {
4946 			diag->tsensor.temp->brd_temp =
4947 				be16_to_cpu(rsp->brd_temp);
4948 		} else
4949 			diag->tsensor.temp->brd_temp = 0;
4950 	}
4951 
4952 	bfa_trc(diag, rsp->status);
4953 	bfa_trc(diag, rsp->ts_junc);
4954 	bfa_trc(diag, rsp->temp);
4955 	bfa_trc(diag, rsp->ts_brd);
4956 	bfa_trc(diag, rsp->brd_temp);
4957 
4958 	/* tsensor status is always good bcos we always have junction temp */
4959 	diag->tsensor.status = BFA_STATUS_OK;
4960 	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4961 	diag->tsensor.lock = 0;
4962 }
4963 
4964 /*
4965  *	LED Test command
4966  */
4967 static void
4968 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4969 {
4970 	struct bfi_diag_ledtest_req_s  *msg;
4971 
4972 	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4973 	/* build host command */
4974 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4975 			bfa_ioc_portid(diag->ioc));
4976 
4977 	/*
4978 	 * convert the freq from N blinks per 10 sec to
4979 	 * crossbow ontime value. We do it here because division is need
4980 	 */
4981 	if (ledtest->freq)
4982 		ledtest->freq = 500 / ledtest->freq;
4983 
4984 	if (ledtest->freq == 0)
4985 		ledtest->freq = 1;
4986 
4987 	bfa_trc(diag, ledtest->freq);
4988 	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4989 	msg->cmd = (u8) ledtest->cmd;
4990 	msg->color = (u8) ledtest->color;
4991 	msg->portid = bfa_ioc_portid(diag->ioc);
4992 	msg->led = ledtest->led;
4993 	msg->freq = cpu_to_be16(ledtest->freq);
4994 
4995 	/* send mbox cmd */
4996 	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4997 }
4998 
4999 static void
5000 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
5001 {
5002 	bfa_trc(diag, diag->ledtest.lock);
5003 	diag->ledtest.lock = BFA_FALSE;
5004 	/* no bfa_cb_queue is needed because driver is not waiting */
5005 }
5006 
5007 /*
5008  * Port beaconing
5009  */
5010 static void
5011 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
5012 {
5013 	struct bfi_diag_portbeacon_req_s *msg;
5014 
5015 	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5016 	/* build host command */
5017 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5018 		bfa_ioc_portid(diag->ioc));
5019 	msg->beacon = beacon;
5020 	msg->period = cpu_to_be32(sec);
5021 	/* send mbox cmd */
5022 	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5023 }
5024 
5025 static void
5026 diag_portbeacon_comp(struct bfa_diag_s *diag)
5027 {
5028 	bfa_trc(diag, diag->beacon.state);
5029 	diag->beacon.state = BFA_FALSE;
5030 	if (diag->cbfn_beacon)
5031 		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5032 }
5033 
5034 /*
5035  *	Diag hmbox handler
5036  */
5037 void
5038 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5039 {
5040 	struct bfa_diag_s *diag = diagarg;
5041 
5042 	switch (msg->mh.msg_id) {
5043 	case BFI_DIAG_I2H_PORTBEACON:
5044 		diag_portbeacon_comp(diag);
5045 		break;
5046 	case BFI_DIAG_I2H_FWPING:
5047 		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5048 		break;
5049 	case BFI_DIAG_I2H_TEMPSENSOR:
5050 		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5051 		break;
5052 	case BFI_DIAG_I2H_LEDTEST:
5053 		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5054 		break;
5055 	default:
5056 		bfa_trc(diag, msg->mh.msg_id);
5057 		WARN_ON(1);
5058 	}
5059 }
5060 
5061 /*
5062  * Gen RAM Test
5063  *
5064  *   @param[in] *diag           - diag data struct
5065  *   @param[in] *memtest        - mem test params input from upper layer,
5066  *   @param[in] pattern         - mem test pattern
5067  *   @param[in] *result         - mem test result
5068  *   @param[in] cbfn            - mem test callback functioin
5069  *   @param[in] cbarg           - callback functioin arg
5070  *
5071  *   @param[out]
5072  */
5073 bfa_status_t
5074 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5075 		u32 pattern, struct bfa_diag_memtest_result *result,
5076 		bfa_cb_diag_t cbfn, void *cbarg)
5077 {
5078 	u32	memtest_tov;
5079 
5080 	bfa_trc(diag, pattern);
5081 
5082 	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5083 		return BFA_STATUS_ADAPTER_ENABLED;
5084 
5085 	/* check to see if there is another destructive diag cmd running */
5086 	if (diag->block) {
5087 		bfa_trc(diag, diag->block);
5088 		return BFA_STATUS_DEVBUSY;
5089 	} else
5090 		diag->block = 1;
5091 
5092 	diag->result = result;
5093 	diag->cbfn = cbfn;
5094 	diag->cbarg = cbarg;
5095 
5096 	/* download memtest code and take LPU0 out of reset */
5097 	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5098 
5099 	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5100 		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5101 	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5102 			bfa_diag_memtest_done, diag, memtest_tov);
5103 	diag->timer_active = 1;
5104 	return BFA_STATUS_OK;
5105 }
5106 
5107 /*
5108  * DIAG firmware ping command
5109  *
5110  *   @param[in] *diag           - diag data struct
5111  *   @param[in] cnt             - dma loop count for testing PCIE
5112  *   @param[in] data            - data pattern to pass in fw
5113  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5114  *   @param[in] cbfn            - callback function
5115  *   @param[in] *cbarg          - callback functioin arg
5116  *
5117  *   @param[out]
5118  */
5119 bfa_status_t
5120 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5121 		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5122 		void *cbarg)
5123 {
5124 	bfa_trc(diag, cnt);
5125 	bfa_trc(diag, data);
5126 
5127 	if (!bfa_ioc_is_operational(diag->ioc))
5128 		return BFA_STATUS_IOC_NON_OP;
5129 
5130 	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5131 	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5132 		return BFA_STATUS_CMD_NOTSUPP;
5133 
5134 	/* check to see if there is another destructive diag cmd running */
5135 	if (diag->block || diag->fwping.lock) {
5136 		bfa_trc(diag, diag->block);
5137 		bfa_trc(diag, diag->fwping.lock);
5138 		return BFA_STATUS_DEVBUSY;
5139 	}
5140 
5141 	/* Initialization */
5142 	diag->fwping.lock = 1;
5143 	diag->fwping.cbfn = cbfn;
5144 	diag->fwping.cbarg = cbarg;
5145 	diag->fwping.result = result;
5146 	diag->fwping.data = data;
5147 	diag->fwping.count = cnt;
5148 
5149 	/* Init test results */
5150 	diag->fwping.result->data = 0;
5151 	diag->fwping.result->status = BFA_STATUS_OK;
5152 
5153 	/* kick off the first ping */
5154 	diag_fwping_send(diag);
5155 	return BFA_STATUS_OK;
5156 }
5157 
5158 /*
5159  * Read Temperature Sensor
5160  *
5161  *   @param[in] *diag           - diag data struct
5162  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5163  *   @param[in] cbfn            - callback function
5164  *   @param[in] *cbarg          - callback functioin arg
5165  *
5166  *   @param[out]
5167  */
5168 bfa_status_t
5169 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5170 		struct bfa_diag_results_tempsensor_s *result,
5171 		bfa_cb_diag_t cbfn, void *cbarg)
5172 {
5173 	/* check to see if there is a destructive diag cmd running */
5174 	if (diag->block || diag->tsensor.lock) {
5175 		bfa_trc(diag, diag->block);
5176 		bfa_trc(diag, diag->tsensor.lock);
5177 		return BFA_STATUS_DEVBUSY;
5178 	}
5179 
5180 	if (!bfa_ioc_is_operational(diag->ioc))
5181 		return BFA_STATUS_IOC_NON_OP;
5182 
5183 	/* Init diag mod params */
5184 	diag->tsensor.lock = 1;
5185 	diag->tsensor.temp = result;
5186 	diag->tsensor.cbfn = cbfn;
5187 	diag->tsensor.cbarg = cbarg;
5188 	diag->tsensor.status = BFA_STATUS_OK;
5189 
5190 	/* Send msg to fw */
5191 	diag_tempsensor_send(diag);
5192 
5193 	return BFA_STATUS_OK;
5194 }
5195 
5196 /*
5197  * LED Test command
5198  *
5199  *   @param[in] *diag           - diag data struct
5200  *   @param[in] *ledtest        - pt to ledtest data structure
5201  *
5202  *   @param[out]
5203  */
5204 bfa_status_t
5205 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5206 {
5207 	bfa_trc(diag, ledtest->cmd);
5208 
5209 	if (!bfa_ioc_is_operational(diag->ioc))
5210 		return BFA_STATUS_IOC_NON_OP;
5211 
5212 	if (diag->beacon.state)
5213 		return BFA_STATUS_BEACON_ON;
5214 
5215 	if (diag->ledtest.lock)
5216 		return BFA_STATUS_LEDTEST_OP;
5217 
5218 	/* Send msg to fw */
5219 	diag->ledtest.lock = BFA_TRUE;
5220 	diag_ledtest_send(diag, ledtest);
5221 
5222 	return BFA_STATUS_OK;
5223 }
5224 
5225 /*
5226  * Port beaconing command
5227  *
5228  *   @param[in] *diag           - diag data struct
5229  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5230  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5231  *   @param[in] sec             - beaconing duration in seconds
5232  *
5233  *   @param[out]
5234  */
5235 bfa_status_t
5236 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5237 		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5238 {
5239 	bfa_trc(diag, beacon);
5240 	bfa_trc(diag, link_e2e_beacon);
5241 	bfa_trc(diag, sec);
5242 
5243 	if (!bfa_ioc_is_operational(diag->ioc))
5244 		return BFA_STATUS_IOC_NON_OP;
5245 
5246 	if (diag->ledtest.lock)
5247 		return BFA_STATUS_LEDTEST_OP;
5248 
5249 	if (diag->beacon.state && beacon)       /* beacon alread on */
5250 		return BFA_STATUS_BEACON_ON;
5251 
5252 	diag->beacon.state	= beacon;
5253 	diag->beacon.link_e2e	= link_e2e_beacon;
5254 	if (diag->cbfn_beacon)
5255 		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5256 
5257 	/* Send msg to fw */
5258 	diag_portbeacon_send(diag, beacon, sec);
5259 
5260 	return BFA_STATUS_OK;
5261 }
5262 
5263 /*
5264  * Return DMA memory needed by diag module.
5265  */
5266 u32
5267 bfa_diag_meminfo(void)
5268 {
5269 	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5270 }
5271 
5272 /*
5273  *	Attach virtual and physical memory for Diag.
5274  */
5275 void
5276 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5277 	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5278 {
5279 	diag->dev = dev;
5280 	diag->ioc = ioc;
5281 	diag->trcmod = trcmod;
5282 
5283 	diag->block = 0;
5284 	diag->cbfn = NULL;
5285 	diag->cbarg = NULL;
5286 	diag->result = NULL;
5287 	diag->cbfn_beacon = cbfn_beacon;
5288 
5289 	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5290 	bfa_q_qe_init(&diag->ioc_notify);
5291 	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5292 	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5293 }
5294 
5295 void
5296 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5297 {
5298 	diag->fwping.dbuf_kva = dm_kva;
5299 	diag->fwping.dbuf_pa = dm_pa;
5300 	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5301 }
5302 
5303 /*
5304  *	PHY module specific
5305  */
5306 #define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5307 #define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5308 
5309 static void
5310 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5311 {
5312 	int i, m = sz >> 2;
5313 
5314 	for (i = 0; i < m; i++)
5315 		obuf[i] = be32_to_cpu(ibuf[i]);
5316 }
5317 
5318 static bfa_boolean_t
5319 bfa_phy_present(struct bfa_phy_s *phy)
5320 {
5321 	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5322 }
5323 
5324 static void
5325 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5326 {
5327 	struct bfa_phy_s *phy = cbarg;
5328 
5329 	bfa_trc(phy, event);
5330 
5331 	switch (event) {
5332 	case BFA_IOC_E_DISABLED:
5333 	case BFA_IOC_E_FAILED:
5334 		if (phy->op_busy) {
5335 			phy->status = BFA_STATUS_IOC_FAILURE;
5336 			phy->cbfn(phy->cbarg, phy->status);
5337 			phy->op_busy = 0;
5338 		}
5339 		break;
5340 
5341 	default:
5342 		break;
5343 	}
5344 }
5345 
5346 /*
5347  * Send phy attribute query request.
5348  *
5349  * @param[in] cbarg - callback argument
5350  */
5351 static void
5352 bfa_phy_query_send(void *cbarg)
5353 {
5354 	struct bfa_phy_s *phy = cbarg;
5355 	struct bfi_phy_query_req_s *msg =
5356 			(struct bfi_phy_query_req_s *) phy->mb.msg;
5357 
5358 	msg->instance = phy->instance;
5359 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5360 		bfa_ioc_portid(phy->ioc));
5361 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5362 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5363 }
5364 
5365 /*
5366  * Send phy write request.
5367  *
5368  * @param[in] cbarg - callback argument
5369  */
5370 static void
5371 bfa_phy_write_send(void *cbarg)
5372 {
5373 	struct bfa_phy_s *phy = cbarg;
5374 	struct bfi_phy_write_req_s *msg =
5375 			(struct bfi_phy_write_req_s *) phy->mb.msg;
5376 	u32	len;
5377 	u16	*buf, *dbuf;
5378 	int	i, sz;
5379 
5380 	msg->instance = phy->instance;
5381 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5382 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5383 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5384 	msg->length = cpu_to_be32(len);
5385 
5386 	/* indicate if it's the last msg of the whole write operation */
5387 	msg->last = (len == phy->residue) ? 1 : 0;
5388 
5389 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5390 		bfa_ioc_portid(phy->ioc));
5391 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5392 
5393 	buf = (u16 *) (phy->ubuf + phy->offset);
5394 	dbuf = (u16 *)phy->dbuf_kva;
5395 	sz = len >> 1;
5396 	for (i = 0; i < sz; i++)
5397 		buf[i] = cpu_to_be16(dbuf[i]);
5398 
5399 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5400 
5401 	phy->residue -= len;
5402 	phy->offset += len;
5403 }
5404 
5405 /*
5406  * Send phy read request.
5407  *
5408  * @param[in] cbarg - callback argument
5409  */
5410 static void
5411 bfa_phy_read_send(void *cbarg)
5412 {
5413 	struct bfa_phy_s *phy = cbarg;
5414 	struct bfi_phy_read_req_s *msg =
5415 			(struct bfi_phy_read_req_s *) phy->mb.msg;
5416 	u32	len;
5417 
5418 	msg->instance = phy->instance;
5419 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5420 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5421 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5422 	msg->length = cpu_to_be32(len);
5423 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5424 		bfa_ioc_portid(phy->ioc));
5425 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5426 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5427 }
5428 
5429 /*
5430  * Send phy stats request.
5431  *
5432  * @param[in] cbarg - callback argument
5433  */
5434 static void
5435 bfa_phy_stats_send(void *cbarg)
5436 {
5437 	struct bfa_phy_s *phy = cbarg;
5438 	struct bfi_phy_stats_req_s *msg =
5439 			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5440 
5441 	msg->instance = phy->instance;
5442 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5443 		bfa_ioc_portid(phy->ioc));
5444 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5445 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5446 }
5447 
5448 /*
5449  * Flash memory info API.
5450  *
5451  * @param[in] mincfg - minimal cfg variable
5452  */
5453 u32
5454 bfa_phy_meminfo(bfa_boolean_t mincfg)
5455 {
5456 	/* min driver doesn't need phy */
5457 	if (mincfg)
5458 		return 0;
5459 
5460 	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5461 }
5462 
5463 /*
5464  * Flash attach API.
5465  *
5466  * @param[in] phy - phy structure
5467  * @param[in] ioc  - ioc structure
5468  * @param[in] dev  - device structure
5469  * @param[in] trcmod - trace module
5470  * @param[in] logmod - log module
5471  */
5472 void
5473 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5474 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5475 {
5476 	phy->ioc = ioc;
5477 	phy->trcmod = trcmod;
5478 	phy->cbfn = NULL;
5479 	phy->cbarg = NULL;
5480 	phy->op_busy = 0;
5481 
5482 	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5483 	bfa_q_qe_init(&phy->ioc_notify);
5484 	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5485 	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5486 
5487 	/* min driver doesn't need phy */
5488 	if (mincfg) {
5489 		phy->dbuf_kva = NULL;
5490 		phy->dbuf_pa = 0;
5491 	}
5492 }
5493 
5494 /*
5495  * Claim memory for phy
5496  *
5497  * @param[in] phy - phy structure
5498  * @param[in] dm_kva - pointer to virtual memory address
5499  * @param[in] dm_pa - physical memory address
5500  * @param[in] mincfg - minimal cfg variable
5501  */
5502 void
5503 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5504 		bfa_boolean_t mincfg)
5505 {
5506 	if (mincfg)
5507 		return;
5508 
5509 	phy->dbuf_kva = dm_kva;
5510 	phy->dbuf_pa = dm_pa;
5511 	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5512 	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5513 	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5514 }
5515 
5516 bfa_boolean_t
5517 bfa_phy_busy(struct bfa_ioc_s *ioc)
5518 {
5519 	void __iomem	*rb;
5520 
5521 	rb = bfa_ioc_bar0(ioc);
5522 	return readl(rb + BFA_PHY_LOCK_STATUS);
5523 }
5524 
5525 /*
5526  * Get phy attribute.
5527  *
5528  * @param[in] phy - phy structure
5529  * @param[in] attr - phy attribute structure
5530  * @param[in] cbfn - callback function
5531  * @param[in] cbarg - callback argument
5532  *
5533  * Return status.
5534  */
5535 bfa_status_t
5536 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5537 		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5538 {
5539 	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5540 	bfa_trc(phy, instance);
5541 
5542 	if (!bfa_phy_present(phy))
5543 		return BFA_STATUS_PHY_NOT_PRESENT;
5544 
5545 	if (!bfa_ioc_is_operational(phy->ioc))
5546 		return BFA_STATUS_IOC_NON_OP;
5547 
5548 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5549 		bfa_trc(phy, phy->op_busy);
5550 		return BFA_STATUS_DEVBUSY;
5551 	}
5552 
5553 	phy->op_busy = 1;
5554 	phy->cbfn = cbfn;
5555 	phy->cbarg = cbarg;
5556 	phy->instance = instance;
5557 	phy->ubuf = (uint8_t *) attr;
5558 	bfa_phy_query_send(phy);
5559 
5560 	return BFA_STATUS_OK;
5561 }
5562 
5563 /*
5564  * Get phy stats.
5565  *
5566  * @param[in] phy - phy structure
5567  * @param[in] instance - phy image instance
5568  * @param[in] stats - pointer to phy stats
5569  * @param[in] cbfn - callback function
5570  * @param[in] cbarg - callback argument
5571  *
5572  * Return status.
5573  */
5574 bfa_status_t
5575 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5576 		struct bfa_phy_stats_s *stats,
5577 		bfa_cb_phy_t cbfn, void *cbarg)
5578 {
5579 	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5580 	bfa_trc(phy, instance);
5581 
5582 	if (!bfa_phy_present(phy))
5583 		return BFA_STATUS_PHY_NOT_PRESENT;
5584 
5585 	if (!bfa_ioc_is_operational(phy->ioc))
5586 		return BFA_STATUS_IOC_NON_OP;
5587 
5588 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5589 		bfa_trc(phy, phy->op_busy);
5590 		return BFA_STATUS_DEVBUSY;
5591 	}
5592 
5593 	phy->op_busy = 1;
5594 	phy->cbfn = cbfn;
5595 	phy->cbarg = cbarg;
5596 	phy->instance = instance;
5597 	phy->ubuf = (u8 *) stats;
5598 	bfa_phy_stats_send(phy);
5599 
5600 	return BFA_STATUS_OK;
5601 }
5602 
5603 /*
5604  * Update phy image.
5605  *
5606  * @param[in] phy - phy structure
5607  * @param[in] instance - phy image instance
5608  * @param[in] buf - update data buffer
5609  * @param[in] len - data buffer length
5610  * @param[in] offset - offset relative to starting address
5611  * @param[in] cbfn - callback function
5612  * @param[in] cbarg - callback argument
5613  *
5614  * Return status.
5615  */
5616 bfa_status_t
5617 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5618 		void *buf, u32 len, u32 offset,
5619 		bfa_cb_phy_t cbfn, void *cbarg)
5620 {
5621 	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5622 	bfa_trc(phy, instance);
5623 	bfa_trc(phy, len);
5624 	bfa_trc(phy, offset);
5625 
5626 	if (!bfa_phy_present(phy))
5627 		return BFA_STATUS_PHY_NOT_PRESENT;
5628 
5629 	if (!bfa_ioc_is_operational(phy->ioc))
5630 		return BFA_STATUS_IOC_NON_OP;
5631 
5632 	/* 'len' must be in word (4-byte) boundary */
5633 	if (!len || (len & 0x03))
5634 		return BFA_STATUS_FAILED;
5635 
5636 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5637 		bfa_trc(phy, phy->op_busy);
5638 		return BFA_STATUS_DEVBUSY;
5639 	}
5640 
5641 	phy->op_busy = 1;
5642 	phy->cbfn = cbfn;
5643 	phy->cbarg = cbarg;
5644 	phy->instance = instance;
5645 	phy->residue = len;
5646 	phy->offset = 0;
5647 	phy->addr_off = offset;
5648 	phy->ubuf = buf;
5649 
5650 	bfa_phy_write_send(phy);
5651 	return BFA_STATUS_OK;
5652 }
5653 
5654 /*
5655  * Read phy image.
5656  *
5657  * @param[in] phy - phy structure
5658  * @param[in] instance - phy image instance
5659  * @param[in] buf - read data buffer
5660  * @param[in] len - data buffer length
5661  * @param[in] offset - offset relative to starting address
5662  * @param[in] cbfn - callback function
5663  * @param[in] cbarg - callback argument
5664  *
5665  * Return status.
5666  */
5667 bfa_status_t
5668 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5669 		void *buf, u32 len, u32 offset,
5670 		bfa_cb_phy_t cbfn, void *cbarg)
5671 {
5672 	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5673 	bfa_trc(phy, instance);
5674 	bfa_trc(phy, len);
5675 	bfa_trc(phy, offset);
5676 
5677 	if (!bfa_phy_present(phy))
5678 		return BFA_STATUS_PHY_NOT_PRESENT;
5679 
5680 	if (!bfa_ioc_is_operational(phy->ioc))
5681 		return BFA_STATUS_IOC_NON_OP;
5682 
5683 	/* 'len' must be in word (4-byte) boundary */
5684 	if (!len || (len & 0x03))
5685 		return BFA_STATUS_FAILED;
5686 
5687 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5688 		bfa_trc(phy, phy->op_busy);
5689 		return BFA_STATUS_DEVBUSY;
5690 	}
5691 
5692 	phy->op_busy = 1;
5693 	phy->cbfn = cbfn;
5694 	phy->cbarg = cbarg;
5695 	phy->instance = instance;
5696 	phy->residue = len;
5697 	phy->offset = 0;
5698 	phy->addr_off = offset;
5699 	phy->ubuf = buf;
5700 	bfa_phy_read_send(phy);
5701 
5702 	return BFA_STATUS_OK;
5703 }
5704 
5705 /*
5706  * Process phy response messages upon receiving interrupts.
5707  *
5708  * @param[in] phyarg - phy structure
5709  * @param[in] msg - message structure
5710  */
5711 void
5712 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5713 {
5714 	struct bfa_phy_s *phy = phyarg;
5715 	u32	status;
5716 
5717 	union {
5718 		struct bfi_phy_query_rsp_s *query;
5719 		struct bfi_phy_stats_rsp_s *stats;
5720 		struct bfi_phy_write_rsp_s *write;
5721 		struct bfi_phy_read_rsp_s *read;
5722 		struct bfi_mbmsg_s   *msg;
5723 	} m;
5724 
5725 	m.msg = msg;
5726 	bfa_trc(phy, msg->mh.msg_id);
5727 
5728 	if (!phy->op_busy) {
5729 		/* receiving response after ioc failure */
5730 		bfa_trc(phy, 0x9999);
5731 		return;
5732 	}
5733 
5734 	switch (msg->mh.msg_id) {
5735 	case BFI_PHY_I2H_QUERY_RSP:
5736 		status = be32_to_cpu(m.query->status);
5737 		bfa_trc(phy, status);
5738 
5739 		if (status == BFA_STATUS_OK) {
5740 			struct bfa_phy_attr_s *attr =
5741 				(struct bfa_phy_attr_s *) phy->ubuf;
5742 			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5743 					sizeof(struct bfa_phy_attr_s));
5744 			bfa_trc(phy, attr->status);
5745 			bfa_trc(phy, attr->length);
5746 		}
5747 
5748 		phy->status = status;
5749 		phy->op_busy = 0;
5750 		if (phy->cbfn)
5751 			phy->cbfn(phy->cbarg, phy->status);
5752 		break;
5753 	case BFI_PHY_I2H_STATS_RSP:
5754 		status = be32_to_cpu(m.stats->status);
5755 		bfa_trc(phy, status);
5756 
5757 		if (status == BFA_STATUS_OK) {
5758 			struct bfa_phy_stats_s *stats =
5759 				(struct bfa_phy_stats_s *) phy->ubuf;
5760 			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5761 				sizeof(struct bfa_phy_stats_s));
5762 			bfa_trc(phy, stats->status);
5763 		}
5764 
5765 		phy->status = status;
5766 		phy->op_busy = 0;
5767 		if (phy->cbfn)
5768 			phy->cbfn(phy->cbarg, phy->status);
5769 		break;
5770 	case BFI_PHY_I2H_WRITE_RSP:
5771 		status = be32_to_cpu(m.write->status);
5772 		bfa_trc(phy, status);
5773 
5774 		if (status != BFA_STATUS_OK || phy->residue == 0) {
5775 			phy->status = status;
5776 			phy->op_busy = 0;
5777 			if (phy->cbfn)
5778 				phy->cbfn(phy->cbarg, phy->status);
5779 		} else {
5780 			bfa_trc(phy, phy->offset);
5781 			bfa_phy_write_send(phy);
5782 		}
5783 		break;
5784 	case BFI_PHY_I2H_READ_RSP:
5785 		status = be32_to_cpu(m.read->status);
5786 		bfa_trc(phy, status);
5787 
5788 		if (status != BFA_STATUS_OK) {
5789 			phy->status = status;
5790 			phy->op_busy = 0;
5791 			if (phy->cbfn)
5792 				phy->cbfn(phy->cbarg, phy->status);
5793 		} else {
5794 			u32 len = be32_to_cpu(m.read->length);
5795 			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5796 			u16 *dbuf = (u16 *)phy->dbuf_kva;
5797 			int i, sz = len >> 1;
5798 
5799 			bfa_trc(phy, phy->offset);
5800 			bfa_trc(phy, len);
5801 
5802 			for (i = 0; i < sz; i++)
5803 				buf[i] = be16_to_cpu(dbuf[i]);
5804 
5805 			phy->residue -= len;
5806 			phy->offset += len;
5807 
5808 			if (phy->residue == 0) {
5809 				phy->status = status;
5810 				phy->op_busy = 0;
5811 				if (phy->cbfn)
5812 					phy->cbfn(phy->cbarg, phy->status);
5813 			} else
5814 				bfa_phy_read_send(phy);
5815 		}
5816 		break;
5817 	default:
5818 		WARN_ON(1);
5819 	}
5820 }
5821 
5822 /*
5823  * DCONF state machine events
5824  */
5825 enum bfa_dconf_event {
5826 	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5827 	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5828 	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5829 	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5830 	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5831 	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5832 };
5833 
5834 /* forward declaration of DCONF state machine */
5835 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5836 				enum bfa_dconf_event event);
5837 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5838 				enum bfa_dconf_event event);
5839 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5840 				enum bfa_dconf_event event);
5841 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5842 				enum bfa_dconf_event event);
5843 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5844 				enum bfa_dconf_event event);
5845 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5846 				enum bfa_dconf_event event);
5847 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5848 				enum bfa_dconf_event event);
5849 
5850 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5851 static void bfa_dconf_timer(void *cbarg);
5852 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5853 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5854 
5855 /*
5856  * Beginning state of dconf module. Waiting for an event to start.
5857  */
5858 static void
5859 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5860 {
5861 	bfa_status_t bfa_status;
5862 	bfa_trc(dconf->bfa, event);
5863 
5864 	switch (event) {
5865 	case BFA_DCONF_SM_INIT:
5866 		if (dconf->min_cfg) {
5867 			bfa_trc(dconf->bfa, dconf->min_cfg);
5868 			bfa_fsm_send_event(&dconf->bfa->iocfc,
5869 					IOCFC_E_DCONF_DONE);
5870 			return;
5871 		}
5872 		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5873 		bfa_timer_start(dconf->bfa, &dconf->timer,
5874 			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5875 		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5876 					BFA_FLASH_PART_DRV, dconf->instance,
5877 					dconf->dconf,
5878 					sizeof(struct bfa_dconf_s), 0,
5879 					bfa_dconf_init_cb, dconf->bfa);
5880 		if (bfa_status != BFA_STATUS_OK) {
5881 			bfa_timer_stop(&dconf->timer);
5882 			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5883 			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5884 			return;
5885 		}
5886 		break;
5887 	case BFA_DCONF_SM_EXIT:
5888 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5889 	case BFA_DCONF_SM_IOCDISABLE:
5890 	case BFA_DCONF_SM_WR:
5891 	case BFA_DCONF_SM_FLASH_COMP:
5892 		break;
5893 	default:
5894 		bfa_sm_fault(dconf->bfa, event);
5895 	}
5896 }
5897 
5898 /*
5899  * Read flash for dconf entries and make a call back to the driver once done.
5900  */
5901 static void
5902 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5903 			enum bfa_dconf_event event)
5904 {
5905 	bfa_trc(dconf->bfa, event);
5906 
5907 	switch (event) {
5908 	case BFA_DCONF_SM_FLASH_COMP:
5909 		bfa_timer_stop(&dconf->timer);
5910 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5911 		break;
5912 	case BFA_DCONF_SM_TIMEOUT:
5913 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5914 		bfa_ioc_suspend(&dconf->bfa->ioc);
5915 		break;
5916 	case BFA_DCONF_SM_EXIT:
5917 		bfa_timer_stop(&dconf->timer);
5918 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5919 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5920 		break;
5921 	case BFA_DCONF_SM_IOCDISABLE:
5922 		bfa_timer_stop(&dconf->timer);
5923 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5924 		break;
5925 	default:
5926 		bfa_sm_fault(dconf->bfa, event);
5927 	}
5928 }
5929 
5930 /*
5931  * DCONF Module is in ready state. Has completed the initialization.
5932  */
5933 static void
5934 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5935 {
5936 	bfa_trc(dconf->bfa, event);
5937 
5938 	switch (event) {
5939 	case BFA_DCONF_SM_WR:
5940 		bfa_timer_start(dconf->bfa, &dconf->timer,
5941 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5942 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5943 		break;
5944 	case BFA_DCONF_SM_EXIT:
5945 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5946 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5947 		break;
5948 	case BFA_DCONF_SM_INIT:
5949 	case BFA_DCONF_SM_IOCDISABLE:
5950 		break;
5951 	default:
5952 		bfa_sm_fault(dconf->bfa, event);
5953 	}
5954 }
5955 
5956 /*
5957  * entries are dirty, write back to the flash.
5958  */
5959 
5960 static void
5961 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5962 {
5963 	bfa_trc(dconf->bfa, event);
5964 
5965 	switch (event) {
5966 	case BFA_DCONF_SM_TIMEOUT:
5967 		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5968 		bfa_dconf_flash_write(dconf);
5969 		break;
5970 	case BFA_DCONF_SM_WR:
5971 		bfa_timer_stop(&dconf->timer);
5972 		bfa_timer_start(dconf->bfa, &dconf->timer,
5973 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5974 		break;
5975 	case BFA_DCONF_SM_EXIT:
5976 		bfa_timer_stop(&dconf->timer);
5977 		bfa_timer_start(dconf->bfa, &dconf->timer,
5978 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5979 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5980 		bfa_dconf_flash_write(dconf);
5981 		break;
5982 	case BFA_DCONF_SM_FLASH_COMP:
5983 		break;
5984 	case BFA_DCONF_SM_IOCDISABLE:
5985 		bfa_timer_stop(&dconf->timer);
5986 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5987 		break;
5988 	default:
5989 		bfa_sm_fault(dconf->bfa, event);
5990 	}
5991 }
5992 
5993 /*
5994  * Sync the dconf entries to the flash.
5995  */
5996 static void
5997 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5998 			enum bfa_dconf_event event)
5999 {
6000 	bfa_trc(dconf->bfa, event);
6001 
6002 	switch (event) {
6003 	case BFA_DCONF_SM_IOCDISABLE:
6004 	case BFA_DCONF_SM_FLASH_COMP:
6005 		bfa_timer_stop(&dconf->timer);
6006 		/* fall through */
6007 	case BFA_DCONF_SM_TIMEOUT:
6008 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6009 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6010 		break;
6011 	default:
6012 		bfa_sm_fault(dconf->bfa, event);
6013 	}
6014 }
6015 
6016 static void
6017 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6018 {
6019 	bfa_trc(dconf->bfa, event);
6020 
6021 	switch (event) {
6022 	case BFA_DCONF_SM_FLASH_COMP:
6023 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6024 		break;
6025 	case BFA_DCONF_SM_WR:
6026 		bfa_timer_start(dconf->bfa, &dconf->timer,
6027 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6028 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6029 		break;
6030 	case BFA_DCONF_SM_EXIT:
6031 		bfa_timer_start(dconf->bfa, &dconf->timer,
6032 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6033 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6034 		break;
6035 	case BFA_DCONF_SM_IOCDISABLE:
6036 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6037 		break;
6038 	default:
6039 		bfa_sm_fault(dconf->bfa, event);
6040 	}
6041 }
6042 
6043 static void
6044 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6045 			enum bfa_dconf_event event)
6046 {
6047 	bfa_trc(dconf->bfa, event);
6048 
6049 	switch (event) {
6050 	case BFA_DCONF_SM_INIT:
6051 		bfa_timer_start(dconf->bfa, &dconf->timer,
6052 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6053 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6054 		break;
6055 	case BFA_DCONF_SM_EXIT:
6056 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6057 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6058 		break;
6059 	case BFA_DCONF_SM_IOCDISABLE:
6060 		break;
6061 	default:
6062 		bfa_sm_fault(dconf->bfa, event);
6063 	}
6064 }
6065 
6066 /*
6067  * Compute and return memory needed by DRV_CFG module.
6068  */
6069 void
6070 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6071 		  struct bfa_s *bfa)
6072 {
6073 	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6074 
6075 	if (cfg->drvcfg.min_cfg)
6076 		bfa_mem_kva_setup(meminfo, dconf_kva,
6077 				sizeof(struct bfa_dconf_hdr_s));
6078 	else
6079 		bfa_mem_kva_setup(meminfo, dconf_kva,
6080 				sizeof(struct bfa_dconf_s));
6081 }
6082 
6083 void
6084 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6085 {
6086 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6087 
6088 	dconf->bfad = bfad;
6089 	dconf->bfa = bfa;
6090 	dconf->instance = bfa->ioc.port_id;
6091 	bfa_trc(bfa, dconf->instance);
6092 
6093 	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6094 	if (cfg->drvcfg.min_cfg) {
6095 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6096 		dconf->min_cfg = BFA_TRUE;
6097 	} else {
6098 		dconf->min_cfg = BFA_FALSE;
6099 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6100 	}
6101 
6102 	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6103 	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6104 }
6105 
6106 static void
6107 bfa_dconf_init_cb(void *arg, bfa_status_t status)
6108 {
6109 	struct bfa_s *bfa = arg;
6110 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6111 
6112 	if (status == BFA_STATUS_OK) {
6113 		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6114 		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6115 			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6116 		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6117 			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6118 	}
6119 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6120 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6121 }
6122 
6123 void
6124 bfa_dconf_modinit(struct bfa_s *bfa)
6125 {
6126 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6127 	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6128 }
6129 
6130 static void bfa_dconf_timer(void *cbarg)
6131 {
6132 	struct bfa_dconf_mod_s *dconf = cbarg;
6133 	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6134 }
6135 
6136 void
6137 bfa_dconf_iocdisable(struct bfa_s *bfa)
6138 {
6139 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6140 	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6141 }
6142 
6143 static bfa_status_t
6144 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6145 {
6146 	bfa_status_t bfa_status;
6147 	bfa_trc(dconf->bfa, 0);
6148 
6149 	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6150 				BFA_FLASH_PART_DRV, dconf->instance,
6151 				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6152 				bfa_dconf_cbfn, dconf);
6153 	if (bfa_status != BFA_STATUS_OK)
6154 		WARN_ON(bfa_status);
6155 	bfa_trc(dconf->bfa, bfa_status);
6156 
6157 	return bfa_status;
6158 }
6159 
6160 bfa_status_t
6161 bfa_dconf_update(struct bfa_s *bfa)
6162 {
6163 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6164 	bfa_trc(dconf->bfa, 0);
6165 	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6166 		return BFA_STATUS_FAILED;
6167 
6168 	if (dconf->min_cfg) {
6169 		bfa_trc(dconf->bfa, dconf->min_cfg);
6170 		return BFA_STATUS_FAILED;
6171 	}
6172 
6173 	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6174 	return BFA_STATUS_OK;
6175 }
6176 
6177 static void
6178 bfa_dconf_cbfn(void *arg, bfa_status_t status)
6179 {
6180 	struct bfa_dconf_mod_s *dconf = arg;
6181 	WARN_ON(status);
6182 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6183 }
6184 
6185 void
6186 bfa_dconf_modexit(struct bfa_s *bfa)
6187 {
6188 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6189 	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6190 }
6191 
6192 /*
6193  * FRU specific functions
6194  */
6195 
6196 #define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
6197 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6198 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6199 
6200 static void
6201 bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6202 {
6203 	struct bfa_fru_s *fru = cbarg;
6204 
6205 	bfa_trc(fru, event);
6206 
6207 	switch (event) {
6208 	case BFA_IOC_E_DISABLED:
6209 	case BFA_IOC_E_FAILED:
6210 		if (fru->op_busy) {
6211 			fru->status = BFA_STATUS_IOC_FAILURE;
6212 			fru->cbfn(fru->cbarg, fru->status);
6213 			fru->op_busy = 0;
6214 		}
6215 		break;
6216 
6217 	default:
6218 		break;
6219 	}
6220 }
6221 
6222 /*
6223  * Send fru write request.
6224  *
6225  * @param[in] cbarg - callback argument
6226  */
6227 static void
6228 bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6229 {
6230 	struct bfa_fru_s *fru = cbarg;
6231 	struct bfi_fru_write_req_s *msg =
6232 			(struct bfi_fru_write_req_s *) fru->mb.msg;
6233 	u32 len;
6234 
6235 	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6236 	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6237 				fru->residue : BFA_FRU_DMA_BUF_SZ;
6238 	msg->length = cpu_to_be32(len);
6239 
6240 	/*
6241 	 * indicate if it's the last msg of the whole write operation
6242 	 */
6243 	msg->last = (len == fru->residue) ? 1 : 0;
6244 
6245 	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6246 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6247 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6248 
6249 	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6250 	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6251 
6252 	fru->residue -= len;
6253 	fru->offset += len;
6254 }
6255 
6256 /*
6257  * Send fru read request.
6258  *
6259  * @param[in] cbarg - callback argument
6260  */
6261 static void
6262 bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6263 {
6264 	struct bfa_fru_s *fru = cbarg;
6265 	struct bfi_fru_read_req_s *msg =
6266 			(struct bfi_fru_read_req_s *) fru->mb.msg;
6267 	u32 len;
6268 
6269 	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6270 	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6271 				fru->residue : BFA_FRU_DMA_BUF_SZ;
6272 	msg->length = cpu_to_be32(len);
6273 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6274 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6275 	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6276 }
6277 
6278 /*
6279  * Flash memory info API.
6280  *
6281  * @param[in] mincfg - minimal cfg variable
6282  */
6283 u32
6284 bfa_fru_meminfo(bfa_boolean_t mincfg)
6285 {
6286 	/* min driver doesn't need fru */
6287 	if (mincfg)
6288 		return 0;
6289 
6290 	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6291 }
6292 
6293 /*
6294  * Flash attach API.
6295  *
6296  * @param[in] fru - fru structure
6297  * @param[in] ioc  - ioc structure
6298  * @param[in] dev  - device structure
6299  * @param[in] trcmod - trace module
6300  * @param[in] logmod - log module
6301  */
6302 void
6303 bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6304 	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6305 {
6306 	fru->ioc = ioc;
6307 	fru->trcmod = trcmod;
6308 	fru->cbfn = NULL;
6309 	fru->cbarg = NULL;
6310 	fru->op_busy = 0;
6311 
6312 	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6313 	bfa_q_qe_init(&fru->ioc_notify);
6314 	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6315 	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6316 
6317 	/* min driver doesn't need fru */
6318 	if (mincfg) {
6319 		fru->dbuf_kva = NULL;
6320 		fru->dbuf_pa = 0;
6321 	}
6322 }
6323 
6324 /*
6325  * Claim memory for fru
6326  *
6327  * @param[in] fru - fru structure
6328  * @param[in] dm_kva - pointer to virtual memory address
6329  * @param[in] dm_pa - frusical memory address
6330  * @param[in] mincfg - minimal cfg variable
6331  */
6332 void
6333 bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6334 	bfa_boolean_t mincfg)
6335 {
6336 	if (mincfg)
6337 		return;
6338 
6339 	fru->dbuf_kva = dm_kva;
6340 	fru->dbuf_pa = dm_pa;
6341 	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6342 	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6343 	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6344 }
6345 
6346 /*
6347  * Update fru vpd image.
6348  *
6349  * @param[in] fru - fru structure
6350  * @param[in] buf - update data buffer
6351  * @param[in] len - data buffer length
6352  * @param[in] offset - offset relative to starting address
6353  * @param[in] cbfn - callback function
6354  * @param[in] cbarg - callback argument
6355  *
6356  * Return status.
6357  */
6358 bfa_status_t
6359 bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6360 		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6361 {
6362 	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6363 	bfa_trc(fru, len);
6364 	bfa_trc(fru, offset);
6365 
6366 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6367 		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6368 		return BFA_STATUS_FRU_NOT_PRESENT;
6369 
6370 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6371 		return BFA_STATUS_CMD_NOTSUPP;
6372 
6373 	if (!bfa_ioc_is_operational(fru->ioc))
6374 		return BFA_STATUS_IOC_NON_OP;
6375 
6376 	if (fru->op_busy) {
6377 		bfa_trc(fru, fru->op_busy);
6378 		return BFA_STATUS_DEVBUSY;
6379 	}
6380 
6381 	fru->op_busy = 1;
6382 
6383 	fru->cbfn = cbfn;
6384 	fru->cbarg = cbarg;
6385 	fru->residue = len;
6386 	fru->offset = 0;
6387 	fru->addr_off = offset;
6388 	fru->ubuf = buf;
6389 	fru->trfr_cmpl = trfr_cmpl;
6390 
6391 	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6392 
6393 	return BFA_STATUS_OK;
6394 }
6395 
6396 /*
6397  * Read fru vpd image.
6398  *
6399  * @param[in] fru - fru structure
6400  * @param[in] buf - read data buffer
6401  * @param[in] len - data buffer length
6402  * @param[in] offset - offset relative to starting address
6403  * @param[in] cbfn - callback function
6404  * @param[in] cbarg - callback argument
6405  *
6406  * Return status.
6407  */
6408 bfa_status_t
6409 bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6410 		bfa_cb_fru_t cbfn, void *cbarg)
6411 {
6412 	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6413 	bfa_trc(fru, len);
6414 	bfa_trc(fru, offset);
6415 
6416 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6417 		return BFA_STATUS_FRU_NOT_PRESENT;
6418 
6419 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6420 		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6421 		return BFA_STATUS_CMD_NOTSUPP;
6422 
6423 	if (!bfa_ioc_is_operational(fru->ioc))
6424 		return BFA_STATUS_IOC_NON_OP;
6425 
6426 	if (fru->op_busy) {
6427 		bfa_trc(fru, fru->op_busy);
6428 		return BFA_STATUS_DEVBUSY;
6429 	}
6430 
6431 	fru->op_busy = 1;
6432 
6433 	fru->cbfn = cbfn;
6434 	fru->cbarg = cbarg;
6435 	fru->residue = len;
6436 	fru->offset = 0;
6437 	fru->addr_off = offset;
6438 	fru->ubuf = buf;
6439 	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6440 
6441 	return BFA_STATUS_OK;
6442 }
6443 
6444 /*
6445  * Get maximum size fru vpd image.
6446  *
6447  * @param[in] fru - fru structure
6448  * @param[out] size - maximum size of fru vpd data
6449  *
6450  * Return status.
6451  */
6452 bfa_status_t
6453 bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6454 {
6455 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6456 		return BFA_STATUS_FRU_NOT_PRESENT;
6457 
6458 	if (!bfa_ioc_is_operational(fru->ioc))
6459 		return BFA_STATUS_IOC_NON_OP;
6460 
6461 	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6462 		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6463 		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6464 	else
6465 		return BFA_STATUS_CMD_NOTSUPP;
6466 	return BFA_STATUS_OK;
6467 }
6468 /*
6469  * tfru write.
6470  *
6471  * @param[in] fru - fru structure
6472  * @param[in] buf - update data buffer
6473  * @param[in] len - data buffer length
6474  * @param[in] offset - offset relative to starting address
6475  * @param[in] cbfn - callback function
6476  * @param[in] cbarg - callback argument
6477  *
6478  * Return status.
6479  */
6480 bfa_status_t
6481 bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6482 	       bfa_cb_fru_t cbfn, void *cbarg)
6483 {
6484 	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6485 	bfa_trc(fru, len);
6486 	bfa_trc(fru, offset);
6487 	bfa_trc(fru, *((u8 *) buf));
6488 
6489 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6490 		return BFA_STATUS_FRU_NOT_PRESENT;
6491 
6492 	if (!bfa_ioc_is_operational(fru->ioc))
6493 		return BFA_STATUS_IOC_NON_OP;
6494 
6495 	if (fru->op_busy) {
6496 		bfa_trc(fru, fru->op_busy);
6497 		return BFA_STATUS_DEVBUSY;
6498 	}
6499 
6500 	fru->op_busy = 1;
6501 
6502 	fru->cbfn = cbfn;
6503 	fru->cbarg = cbarg;
6504 	fru->residue = len;
6505 	fru->offset = 0;
6506 	fru->addr_off = offset;
6507 	fru->ubuf = buf;
6508 
6509 	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6510 
6511 	return BFA_STATUS_OK;
6512 }
6513 
6514 /*
6515  * tfru read.
6516  *
6517  * @param[in] fru - fru structure
6518  * @param[in] buf - read data buffer
6519  * @param[in] len - data buffer length
6520  * @param[in] offset - offset relative to starting address
6521  * @param[in] cbfn - callback function
6522  * @param[in] cbarg - callback argument
6523  *
6524  * Return status.
6525  */
6526 bfa_status_t
6527 bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6528 	      bfa_cb_fru_t cbfn, void *cbarg)
6529 {
6530 	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6531 	bfa_trc(fru, len);
6532 	bfa_trc(fru, offset);
6533 
6534 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6535 		return BFA_STATUS_FRU_NOT_PRESENT;
6536 
6537 	if (!bfa_ioc_is_operational(fru->ioc))
6538 		return BFA_STATUS_IOC_NON_OP;
6539 
6540 	if (fru->op_busy) {
6541 		bfa_trc(fru, fru->op_busy);
6542 		return BFA_STATUS_DEVBUSY;
6543 	}
6544 
6545 	fru->op_busy = 1;
6546 
6547 	fru->cbfn = cbfn;
6548 	fru->cbarg = cbarg;
6549 	fru->residue = len;
6550 	fru->offset = 0;
6551 	fru->addr_off = offset;
6552 	fru->ubuf = buf;
6553 	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6554 
6555 	return BFA_STATUS_OK;
6556 }
6557 
6558 /*
6559  * Process fru response messages upon receiving interrupts.
6560  *
6561  * @param[in] fruarg - fru structure
6562  * @param[in] msg - message structure
6563  */
6564 void
6565 bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6566 {
6567 	struct bfa_fru_s *fru = fruarg;
6568 	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6569 	u32 status;
6570 
6571 	bfa_trc(fru, msg->mh.msg_id);
6572 
6573 	if (!fru->op_busy) {
6574 		/*
6575 		 * receiving response after ioc failure
6576 		 */
6577 		bfa_trc(fru, 0x9999);
6578 		return;
6579 	}
6580 
6581 	switch (msg->mh.msg_id) {
6582 	case BFI_FRUVPD_I2H_WRITE_RSP:
6583 	case BFI_TFRU_I2H_WRITE_RSP:
6584 		status = be32_to_cpu(rsp->status);
6585 		bfa_trc(fru, status);
6586 
6587 		if (status != BFA_STATUS_OK || fru->residue == 0) {
6588 			fru->status = status;
6589 			fru->op_busy = 0;
6590 			if (fru->cbfn)
6591 				fru->cbfn(fru->cbarg, fru->status);
6592 		} else {
6593 			bfa_trc(fru, fru->offset);
6594 			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6595 				bfa_fru_write_send(fru,
6596 					BFI_FRUVPD_H2I_WRITE_REQ);
6597 			else
6598 				bfa_fru_write_send(fru,
6599 					BFI_TFRU_H2I_WRITE_REQ);
6600 		}
6601 		break;
6602 	case BFI_FRUVPD_I2H_READ_RSP:
6603 	case BFI_TFRU_I2H_READ_RSP:
6604 		status = be32_to_cpu(rsp->status);
6605 		bfa_trc(fru, status);
6606 
6607 		if (status != BFA_STATUS_OK) {
6608 			fru->status = status;
6609 			fru->op_busy = 0;
6610 			if (fru->cbfn)
6611 				fru->cbfn(fru->cbarg, fru->status);
6612 		} else {
6613 			u32 len = be32_to_cpu(rsp->length);
6614 
6615 			bfa_trc(fru, fru->offset);
6616 			bfa_trc(fru, len);
6617 
6618 			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6619 			fru->residue -= len;
6620 			fru->offset += len;
6621 
6622 			if (fru->residue == 0) {
6623 				fru->status = status;
6624 				fru->op_busy = 0;
6625 				if (fru->cbfn)
6626 					fru->cbfn(fru->cbarg, fru->status);
6627 			} else {
6628 				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6629 					bfa_fru_read_send(fru,
6630 						BFI_FRUVPD_H2I_READ_REQ);
6631 				else
6632 					bfa_fru_read_send(fru,
6633 						BFI_TFRU_H2I_READ_REQ);
6634 			}
6635 		}
6636 		break;
6637 	default:
6638 		WARN_ON(1);
6639 	}
6640 }
6641 
6642 /*
6643  * register definitions
6644  */
6645 #define FLI_CMD_REG			0x0001d000
6646 #define FLI_RDDATA_REG			0x0001d010
6647 #define FLI_ADDR_REG			0x0001d004
6648 #define FLI_DEV_STATUS_REG		0x0001d014
6649 
6650 #define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
6651 #define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
6652 #define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
6653 #define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
6654 
6655 enum bfa_flash_cmd {
6656 	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
6657 	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
6658 };
6659 
6660 /**
6661  * @brief hardware error definition
6662  */
6663 enum bfa_flash_err {
6664 	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
6665 	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
6666 	BFA_FLASH_BAD		= -3,	/*!< flash bad */
6667 	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
6668 	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
6669 	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
6670 	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
6671 	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
6672 	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
6673 };
6674 
6675 /**
6676  * @brief flash command register data structure
6677  */
6678 union bfa_flash_cmd_reg_u {
6679 	struct {
6680 #ifdef __BIG_ENDIAN
6681 		u32	act:1;
6682 		u32	rsv:1;
6683 		u32	write_cnt:9;
6684 		u32	read_cnt:9;
6685 		u32	addr_cnt:4;
6686 		u32	cmd:8;
6687 #else
6688 		u32	cmd:8;
6689 		u32	addr_cnt:4;
6690 		u32	read_cnt:9;
6691 		u32	write_cnt:9;
6692 		u32	rsv:1;
6693 		u32	act:1;
6694 #endif
6695 	} r;
6696 	u32	i;
6697 };
6698 
6699 /**
6700  * @brief flash device status register data structure
6701  */
6702 union bfa_flash_dev_status_reg_u {
6703 	struct {
6704 #ifdef __BIG_ENDIAN
6705 		u32	rsv:21;
6706 		u32	fifo_cnt:6;
6707 		u32	busy:1;
6708 		u32	init_status:1;
6709 		u32	present:1;
6710 		u32	bad:1;
6711 		u32	good:1;
6712 #else
6713 		u32	good:1;
6714 		u32	bad:1;
6715 		u32	present:1;
6716 		u32	init_status:1;
6717 		u32	busy:1;
6718 		u32	fifo_cnt:6;
6719 		u32	rsv:21;
6720 #endif
6721 	} r;
6722 	u32	i;
6723 };
6724 
6725 /**
6726  * @brief flash address register data structure
6727  */
6728 union bfa_flash_addr_reg_u {
6729 	struct {
6730 #ifdef __BIG_ENDIAN
6731 		u32	addr:24;
6732 		u32	dummy:8;
6733 #else
6734 		u32	dummy:8;
6735 		u32	addr:24;
6736 #endif
6737 	} r;
6738 	u32	i;
6739 };
6740 
6741 /**
6742  * dg flash_raw_private Flash raw private functions
6743  */
6744 static void
6745 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6746 		  u8 rd_cnt, u8 ad_cnt, u8 op)
6747 {
6748 	union bfa_flash_cmd_reg_u cmd;
6749 
6750 	cmd.i = 0;
6751 	cmd.r.act = 1;
6752 	cmd.r.write_cnt = wr_cnt;
6753 	cmd.r.read_cnt = rd_cnt;
6754 	cmd.r.addr_cnt = ad_cnt;
6755 	cmd.r.cmd = op;
6756 	writel(cmd.i, (pci_bar + FLI_CMD_REG));
6757 }
6758 
6759 static void
6760 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6761 {
6762 	union bfa_flash_addr_reg_u addr;
6763 
6764 	addr.r.addr = address & 0x00ffffff;
6765 	addr.r.dummy = 0;
6766 	writel(addr.i, (pci_bar + FLI_ADDR_REG));
6767 }
6768 
6769 static int
6770 bfa_flash_cmd_act_check(void __iomem *pci_bar)
6771 {
6772 	union bfa_flash_cmd_reg_u cmd;
6773 
6774 	cmd.i = readl(pci_bar + FLI_CMD_REG);
6775 
6776 	if (cmd.r.act)
6777 		return BFA_FLASH_ERR_CMD_ACT;
6778 
6779 	return 0;
6780 }
6781 
6782 /**
6783  * @brief
6784  * Flush FLI data fifo.
6785  *
6786  * @param[in] pci_bar - pci bar address
6787  * @param[in] dev_status - device status
6788  *
6789  * Return 0 on success, negative error number on error.
6790  */
6791 static u32
6792 bfa_flash_fifo_flush(void __iomem *pci_bar)
6793 {
6794 	u32 i;
6795 	u32 t;
6796 	union bfa_flash_dev_status_reg_u dev_status;
6797 
6798 	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6799 
6800 	if (!dev_status.r.fifo_cnt)
6801 		return 0;
6802 
6803 	/* fifo counter in terms of words */
6804 	for (i = 0; i < dev_status.r.fifo_cnt; i++)
6805 		t = readl(pci_bar + FLI_RDDATA_REG);
6806 
6807 	/*
6808 	 * Check the device status. It may take some time.
6809 	 */
6810 	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6811 		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6812 		if (!dev_status.r.fifo_cnt)
6813 			break;
6814 	}
6815 
6816 	if (dev_status.r.fifo_cnt)
6817 		return BFA_FLASH_ERR_FIFO_CNT;
6818 
6819 	return 0;
6820 }
6821 
6822 /**
6823  * @brief
6824  * Read flash status.
6825  *
6826  * @param[in] pci_bar - pci bar address
6827  *
6828  * Return 0 on success, negative error number on error.
6829 */
6830 static u32
6831 bfa_flash_status_read(void __iomem *pci_bar)
6832 {
6833 	union bfa_flash_dev_status_reg_u	dev_status;
6834 	int				status;
6835 	u32			ret_status;
6836 	int				i;
6837 
6838 	status = bfa_flash_fifo_flush(pci_bar);
6839 	if (status < 0)
6840 		return status;
6841 
6842 	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6843 
6844 	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6845 		status = bfa_flash_cmd_act_check(pci_bar);
6846 		if (!status)
6847 			break;
6848 	}
6849 
6850 	if (status)
6851 		return status;
6852 
6853 	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6854 	if (!dev_status.r.fifo_cnt)
6855 		return BFA_FLASH_BUSY;
6856 
6857 	ret_status = readl(pci_bar + FLI_RDDATA_REG);
6858 	ret_status >>= 24;
6859 
6860 	status = bfa_flash_fifo_flush(pci_bar);
6861 	if (status < 0)
6862 		return status;
6863 
6864 	return ret_status;
6865 }
6866 
6867 /**
6868  * @brief
6869  * Start flash read operation.
6870  *
6871  * @param[in] pci_bar - pci bar address
6872  * @param[in] offset - flash address offset
6873  * @param[in] len - read data length
6874  * @param[in] buf - read data buffer
6875  *
6876  * Return 0 on success, negative error number on error.
6877  */
6878 static u32
6879 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6880 			 char *buf)
6881 {
6882 	int status;
6883 
6884 	/*
6885 	 * len must be mutiple of 4 and not exceeding fifo size
6886 	 */
6887 	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6888 		return BFA_FLASH_ERR_LEN;
6889 
6890 	/*
6891 	 * check status
6892 	 */
6893 	status = bfa_flash_status_read(pci_bar);
6894 	if (status == BFA_FLASH_BUSY)
6895 		status = bfa_flash_status_read(pci_bar);
6896 
6897 	if (status < 0)
6898 		return status;
6899 
6900 	/*
6901 	 * check if write-in-progress bit is cleared
6902 	 */
6903 	if (status & BFA_FLASH_WIP_MASK)
6904 		return BFA_FLASH_ERR_WIP;
6905 
6906 	bfa_flash_set_addr(pci_bar, offset);
6907 
6908 	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6909 
6910 	return 0;
6911 }
6912 
6913 /**
6914  * @brief
6915  * Check flash read operation.
6916  *
6917  * @param[in] pci_bar - pci bar address
6918  *
6919  * Return flash device status, 1 if busy, 0 if not.
6920  */
6921 static u32
6922 bfa_flash_read_check(void __iomem *pci_bar)
6923 {
6924 	if (bfa_flash_cmd_act_check(pci_bar))
6925 		return 1;
6926 
6927 	return 0;
6928 }
6929 /**
6930  * @brief
6931  * End flash read operation.
6932  *
6933  * @param[in] pci_bar - pci bar address
6934  * @param[in] len - read data length
6935  * @param[in] buf - read data buffer
6936  *
6937  */
6938 static void
6939 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6940 {
6941 
6942 	u32 i;
6943 
6944 	/*
6945 	 * read data fifo up to 32 words
6946 	 */
6947 	for (i = 0; i < len; i += 4) {
6948 		u32 w = readl(pci_bar + FLI_RDDATA_REG);
6949 		*((u32 *) (buf + i)) = swab32(w);
6950 	}
6951 
6952 	bfa_flash_fifo_flush(pci_bar);
6953 }
6954 
6955 /**
6956  * @brief
6957  * Perform flash raw read.
6958  *
6959  * @param[in] pci_bar - pci bar address
6960  * @param[in] offset - flash partition address offset
6961  * @param[in] buf - read data buffer
6962  * @param[in] len - read data length
6963  *
6964  * Return status.
6965  */
6966 
6967 
6968 #define FLASH_BLOCKING_OP_MAX   500
6969 #define FLASH_SEM_LOCK_REG	0x18820
6970 
6971 static int
6972 bfa_raw_sem_get(void __iomem *bar)
6973 {
6974 	int	locked;
6975 
6976 	locked = readl((bar + FLASH_SEM_LOCK_REG));
6977 	return !locked;
6978 
6979 }
6980 
6981 bfa_status_t
6982 bfa_flash_sem_get(void __iomem *bar)
6983 {
6984 	u32 n = FLASH_BLOCKING_OP_MAX;
6985 
6986 	while (!bfa_raw_sem_get(bar)) {
6987 		if (--n <= 0)
6988 			return BFA_STATUS_BADFLASH;
6989 		mdelay(10);
6990 	}
6991 	return BFA_STATUS_OK;
6992 }
6993 
6994 void
6995 bfa_flash_sem_put(void __iomem *bar)
6996 {
6997 	writel(0, (bar + FLASH_SEM_LOCK_REG));
6998 }
6999 
7000 bfa_status_t
7001 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
7002 		       u32 len)
7003 {
7004 	u32 n;
7005 	int status;
7006 	u32 off, l, s, residue, fifo_sz;
7007 
7008 	residue = len;
7009 	off = 0;
7010 	fifo_sz = BFA_FLASH_FIFO_SIZE;
7011 	status = bfa_flash_sem_get(pci_bar);
7012 	if (status != BFA_STATUS_OK)
7013 		return status;
7014 
7015 	while (residue) {
7016 		s = offset + off;
7017 		n = s / fifo_sz;
7018 		l = (n + 1) * fifo_sz - s;
7019 		if (l > residue)
7020 			l = residue;
7021 
7022 		status = bfa_flash_read_start(pci_bar, offset + off, l,
7023 								&buf[off]);
7024 		if (status < 0) {
7025 			bfa_flash_sem_put(pci_bar);
7026 			return BFA_STATUS_FAILED;
7027 		}
7028 
7029 		n = BFA_FLASH_BLOCKING_OP_MAX;
7030 		while (bfa_flash_read_check(pci_bar)) {
7031 			if (--n <= 0) {
7032 				bfa_flash_sem_put(pci_bar);
7033 				return BFA_STATUS_FAILED;
7034 			}
7035 		}
7036 
7037 		bfa_flash_read_end(pci_bar, l, &buf[off]);
7038 
7039 		residue -= l;
7040 		off += l;
7041 	}
7042 	bfa_flash_sem_put(pci_bar);
7043 
7044 	return BFA_STATUS_OK;
7045 }
7046