1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22 
23 /* IOC local definitions */
24 
25 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
26 
27 #define bfa_ioc_firmware_lock(__ioc)			\
28 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
29 #define bfa_ioc_firmware_unlock(__ioc)			\
30 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
31 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
32 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
33 #define bfa_ioc_notify_fail(__ioc)			\
34 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
35 #define bfa_ioc_sync_start(__ioc)               \
36 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
37 #define bfa_ioc_sync_join(__ioc)			\
38 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
39 #define bfa_ioc_sync_leave(__ioc)			\
40 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
41 #define bfa_ioc_sync_ack(__ioc)				\
42 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
43 #define bfa_ioc_sync_complete(__ioc)			\
44 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
45 
46 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
47 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
48 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
49 
50 static bool bfa_nw_auto_recover = true;
51 
52 /*
53  * forward declarations
54  */
55 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
56 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
57 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
58 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
59 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
60 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
61 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
62 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
63 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
64 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
65 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
66 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
67 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
68 static void bfa_ioc_recover(struct bfa_ioc *ioc);
69 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
70 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
71 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
72 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
73 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
74 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
75 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
76 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
79 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
80 			 u32 boot_param);
81 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
82 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
83 						char *serial_num);
84 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
85 						char *fw_ver);
86 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
87 						char *chip_rev);
88 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
89 						char *optrom_ver);
90 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
91 						char *manufacturer);
92 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
93 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
94 
95 /* IOC state machine definitions/declarations */
96 enum ioc_event {
97 	IOC_E_RESET		= 1,	/*!< IOC reset request		*/
98 	IOC_E_ENABLE		= 2,	/*!< IOC enable request		*/
99 	IOC_E_DISABLE		= 3,	/*!< IOC disable request	*/
100 	IOC_E_DETACH		= 4,	/*!< driver detach cleanup	*/
101 	IOC_E_ENABLED		= 5,	/*!< f/w enabled		*/
102 	IOC_E_FWRSP_GETATTR	= 6,	/*!< IOC get attribute response	*/
103 	IOC_E_DISABLED		= 7,	/*!< f/w disabled		*/
104 	IOC_E_PFFAILED		= 8,	/*!< failure notice by iocpf sm	*/
105 	IOC_E_HBFAIL		= 9,	/*!< heartbeat failure		*/
106 	IOC_E_HWERROR		= 10,	/*!< hardware error interrupt	*/
107 	IOC_E_TIMEOUT		= 11,	/*!< timeout			*/
108 	IOC_E_HWFAILED		= 12,	/*!< PCI mapping failure notice	*/
109 };
110 
111 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
112 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
121 
122 static struct bfa_sm_table ioc_sm_table[] = {
123 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
124 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
125 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
126 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
127 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
128 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
129 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
130 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
131 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
132 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
133 };
134 
135 /*
136  * Forward declareations for iocpf state machine
137  */
138 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
139 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
140 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
141 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
142 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
143 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
144 
145 /* IOCPF state machine events */
146 enum iocpf_event {
147 	IOCPF_E_ENABLE		= 1,	/*!< IOCPF enable request	*/
148 	IOCPF_E_DISABLE		= 2,	/*!< IOCPF disable request	*/
149 	IOCPF_E_STOP		= 3,	/*!< stop on driver detach	*/
150 	IOCPF_E_FWREADY		= 4,	/*!< f/w initialization done	*/
151 	IOCPF_E_FWRSP_ENABLE	= 5,	/*!< enable f/w response	*/
152 	IOCPF_E_FWRSP_DISABLE	= 6,	/*!< disable f/w response	*/
153 	IOCPF_E_FAIL		= 7,	/*!< failure notice by ioc sm	*/
154 	IOCPF_E_INITFAIL	= 8,	/*!< init fail notice by ioc sm	*/
155 	IOCPF_E_GETATTRFAIL	= 9,	/*!< init fail notice by ioc sm	*/
156 	IOCPF_E_SEMLOCKED	= 10,   /*!< h/w semaphore is locked	*/
157 	IOCPF_E_TIMEOUT		= 11,   /*!< f/w response timeout	*/
158 	IOCPF_E_SEM_ERROR	= 12,   /*!< h/w sem mapping error	*/
159 };
160 
161 /* IOCPF states */
162 enum bfa_iocpf_state {
163 	BFA_IOCPF_RESET		= 1,	/*!< IOC is in reset state */
164 	BFA_IOCPF_SEMWAIT	= 2,	/*!< Waiting for IOC h/w semaphore */
165 	BFA_IOCPF_HWINIT	= 3,	/*!< IOC h/w is being initialized */
166 	BFA_IOCPF_READY		= 4,	/*!< IOCPF is initialized */
167 	BFA_IOCPF_INITFAIL	= 5,	/*!< IOCPF failed */
168 	BFA_IOCPF_FAIL		= 6,	/*!< IOCPF failed */
169 	BFA_IOCPF_DISABLING	= 7,	/*!< IOCPF is being disabled */
170 	BFA_IOCPF_DISABLED	= 8,	/*!< IOCPF is disabled */
171 	BFA_IOCPF_FWMISMATCH	= 9,	/*!< IOC f/w different from drivers */
172 };
173 
174 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
175 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
176 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
177 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
178 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
179 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
180 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
181 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
182 						enum iocpf_event);
183 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
184 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
188 						enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
190 
191 static struct bfa_sm_table iocpf_sm_table[] = {
192 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
193 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
194 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
195 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
196 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
197 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
198 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
199 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
200 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
201 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
202 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
203 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
204 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
205 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
206 };
207 
208 /* IOC State Machine */
209 
210 /* Beginning state. IOC uninit state. */
211 static void
212 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
213 {
214 }
215 
216 /* IOC is in uninit state. */
217 static void
218 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
219 {
220 	switch (event) {
221 	case IOC_E_RESET:
222 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
223 		break;
224 
225 	default:
226 		bfa_sm_fault(event);
227 	}
228 }
229 
230 /* Reset entry actions -- initialize state machine */
231 static void
232 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
233 {
234 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
235 }
236 
237 /* IOC is in reset state. */
238 static void
239 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
240 {
241 	switch (event) {
242 	case IOC_E_ENABLE:
243 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
244 		break;
245 
246 	case IOC_E_DISABLE:
247 		bfa_ioc_disable_comp(ioc);
248 		break;
249 
250 	case IOC_E_DETACH:
251 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
252 		break;
253 
254 	default:
255 		bfa_sm_fault(event);
256 	}
257 }
258 
259 static void
260 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
261 {
262 	bfa_iocpf_enable(ioc);
263 }
264 
265 /* Host IOC function is being enabled, awaiting response from firmware.
266  * Semaphore is acquired.
267  */
268 static void
269 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
270 {
271 	switch (event) {
272 	case IOC_E_ENABLED:
273 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
274 		break;
275 
276 	case IOC_E_PFFAILED:
277 		/* !!! fall through !!! */
278 	case IOC_E_HWERROR:
279 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
280 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
281 		if (event != IOC_E_PFFAILED)
282 			bfa_iocpf_initfail(ioc);
283 		break;
284 
285 	case IOC_E_HWFAILED:
286 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
287 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
288 		break;
289 
290 	case IOC_E_DISABLE:
291 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
292 		break;
293 
294 	case IOC_E_DETACH:
295 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
296 		bfa_iocpf_stop(ioc);
297 		break;
298 
299 	case IOC_E_ENABLE:
300 		break;
301 
302 	default:
303 		bfa_sm_fault(event);
304 	}
305 }
306 
307 /* Semaphore should be acquired for version check. */
308 static void
309 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
310 {
311 	mod_timer(&ioc->ioc_timer, jiffies +
312 		msecs_to_jiffies(BFA_IOC_TOV));
313 	bfa_ioc_send_getattr(ioc);
314 }
315 
316 /* IOC configuration in progress. Timer is active. */
317 static void
318 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
319 {
320 	switch (event) {
321 	case IOC_E_FWRSP_GETATTR:
322 		del_timer(&ioc->ioc_timer);
323 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
324 		break;
325 
326 	case IOC_E_PFFAILED:
327 	case IOC_E_HWERROR:
328 		del_timer(&ioc->ioc_timer);
329 		/* fall through */
330 	case IOC_E_TIMEOUT:
331 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
332 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
333 		if (event != IOC_E_PFFAILED)
334 			bfa_iocpf_getattrfail(ioc);
335 		break;
336 
337 	case IOC_E_DISABLE:
338 		del_timer(&ioc->ioc_timer);
339 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
340 		break;
341 
342 	case IOC_E_ENABLE:
343 		break;
344 
345 	default:
346 		bfa_sm_fault(event);
347 	}
348 }
349 
350 static void
351 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
352 {
353 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
354 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
355 	bfa_ioc_hb_monitor(ioc);
356 }
357 
358 static void
359 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
360 {
361 	switch (event) {
362 	case IOC_E_ENABLE:
363 		break;
364 
365 	case IOC_E_DISABLE:
366 		bfa_ioc_hb_stop(ioc);
367 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
368 		break;
369 
370 	case IOC_E_PFFAILED:
371 	case IOC_E_HWERROR:
372 		bfa_ioc_hb_stop(ioc);
373 		/* !!! fall through !!! */
374 	case IOC_E_HBFAIL:
375 		if (ioc->iocpf.auto_recover)
376 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
377 		else
378 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
379 
380 		bfa_ioc_fail_notify(ioc);
381 
382 		if (event != IOC_E_PFFAILED)
383 			bfa_iocpf_fail(ioc);
384 		break;
385 
386 	default:
387 		bfa_sm_fault(event);
388 	}
389 }
390 
391 static void
392 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
393 {
394 	bfa_iocpf_disable(ioc);
395 }
396 
397 /* IOC is being disabled */
398 static void
399 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
400 {
401 	switch (event) {
402 	case IOC_E_DISABLED:
403 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
404 		break;
405 
406 	case IOC_E_HWERROR:
407 		/*
408 		 * No state change.  Will move to disabled state
409 		 * after iocpf sm completes failure processing and
410 		 * moves to disabled state.
411 		 */
412 		bfa_iocpf_fail(ioc);
413 		break;
414 
415 	case IOC_E_HWFAILED:
416 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
417 		bfa_ioc_disable_comp(ioc);
418 		break;
419 
420 	default:
421 		bfa_sm_fault(event);
422 	}
423 }
424 
425 /* IOC disable completion entry. */
426 static void
427 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
428 {
429 	bfa_ioc_disable_comp(ioc);
430 }
431 
432 static void
433 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
434 {
435 	switch (event) {
436 	case IOC_E_ENABLE:
437 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
438 		break;
439 
440 	case IOC_E_DISABLE:
441 		ioc->cbfn->disable_cbfn(ioc->bfa);
442 		break;
443 
444 	case IOC_E_DETACH:
445 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
446 		bfa_iocpf_stop(ioc);
447 		break;
448 
449 	default:
450 		bfa_sm_fault(event);
451 	}
452 }
453 
454 static void
455 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
456 {
457 }
458 
459 /* Hardware initialization retry. */
460 static void
461 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
462 {
463 	switch (event) {
464 	case IOC_E_ENABLED:
465 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
466 		break;
467 
468 	case IOC_E_PFFAILED:
469 	case IOC_E_HWERROR:
470 		/**
471 		 * Initialization retry failed.
472 		 */
473 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
474 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
475 		if (event != IOC_E_PFFAILED)
476 			bfa_iocpf_initfail(ioc);
477 		break;
478 
479 	case IOC_E_HWFAILED:
480 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
481 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
482 		break;
483 
484 	case IOC_E_ENABLE:
485 		break;
486 
487 	case IOC_E_DISABLE:
488 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
489 		break;
490 
491 	case IOC_E_DETACH:
492 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
493 		bfa_iocpf_stop(ioc);
494 		break;
495 
496 	default:
497 		bfa_sm_fault(event);
498 	}
499 }
500 
501 static void
502 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
503 {
504 }
505 
506 /* IOC failure. */
507 static void
508 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
509 {
510 	switch (event) {
511 	case IOC_E_ENABLE:
512 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
513 		break;
514 
515 	case IOC_E_DISABLE:
516 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
517 		break;
518 
519 	case IOC_E_DETACH:
520 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
521 		bfa_iocpf_stop(ioc);
522 		break;
523 
524 	case IOC_E_HWERROR:
525 		/* HB failure notification, ignore. */
526 		break;
527 
528 	default:
529 		bfa_sm_fault(event);
530 	}
531 }
532 
533 static void
534 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
535 {
536 }
537 
538 /* IOC failure. */
539 static void
540 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
541 {
542 	switch (event) {
543 
544 	case IOC_E_ENABLE:
545 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
546 		break;
547 
548 	case IOC_E_DISABLE:
549 		ioc->cbfn->disable_cbfn(ioc->bfa);
550 		break;
551 
552 	case IOC_E_DETACH:
553 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
554 		break;
555 
556 	default:
557 		bfa_sm_fault(event);
558 	}
559 }
560 
561 /* IOCPF State Machine */
562 
563 /* Reset entry actions -- initialize state machine */
564 static void
565 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
566 {
567 	iocpf->fw_mismatch_notified = false;
568 	iocpf->auto_recover = bfa_nw_auto_recover;
569 }
570 
571 /* Beginning state. IOC is in reset state. */
572 static void
573 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
574 {
575 	switch (event) {
576 	case IOCPF_E_ENABLE:
577 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
578 		break;
579 
580 	case IOCPF_E_STOP:
581 		break;
582 
583 	default:
584 		bfa_sm_fault(event);
585 	}
586 }
587 
588 /* Semaphore should be acquired for version check. */
589 static void
590 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
591 {
592 	bfa_ioc_hw_sem_init(iocpf->ioc);
593 	bfa_ioc_hw_sem_get(iocpf->ioc);
594 }
595 
596 /* Awaiting h/w semaphore to continue with version check. */
597 static void
598 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599 {
600 	struct bfa_ioc *ioc = iocpf->ioc;
601 
602 	switch (event) {
603 	case IOCPF_E_SEMLOCKED:
604 		if (bfa_ioc_firmware_lock(ioc)) {
605 			if (bfa_ioc_sync_start(ioc)) {
606 				bfa_ioc_sync_join(ioc);
607 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
608 			} else {
609 				bfa_ioc_firmware_unlock(ioc);
610 				bfa_nw_ioc_hw_sem_release(ioc);
611 				mod_timer(&ioc->sem_timer, jiffies +
612 					msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
613 			}
614 		} else {
615 			bfa_nw_ioc_hw_sem_release(ioc);
616 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
617 		}
618 		break;
619 
620 	case IOCPF_E_SEM_ERROR:
621 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
622 		bfa_ioc_pf_hwfailed(ioc);
623 		break;
624 
625 	case IOCPF_E_DISABLE:
626 		bfa_ioc_hw_sem_get_cancel(ioc);
627 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
628 		bfa_ioc_pf_disabled(ioc);
629 		break;
630 
631 	case IOCPF_E_STOP:
632 		bfa_ioc_hw_sem_get_cancel(ioc);
633 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
634 		break;
635 
636 	default:
637 		bfa_sm_fault(event);
638 	}
639 }
640 
641 /* Notify enable completion callback */
642 static void
643 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
644 {
645 	/* Call only the first time sm enters fwmismatch state. */
646 	if (!iocpf->fw_mismatch_notified)
647 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
648 
649 	iocpf->fw_mismatch_notified = true;
650 	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
651 		msecs_to_jiffies(BFA_IOC_TOV));
652 }
653 
654 /* Awaiting firmware version match. */
655 static void
656 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657 {
658 	struct bfa_ioc *ioc = iocpf->ioc;
659 
660 	switch (event) {
661 	case IOCPF_E_TIMEOUT:
662 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663 		break;
664 
665 	case IOCPF_E_DISABLE:
666 		del_timer(&ioc->iocpf_timer);
667 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 		bfa_ioc_pf_disabled(ioc);
669 		break;
670 
671 	case IOCPF_E_STOP:
672 		del_timer(&ioc->iocpf_timer);
673 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 		break;
675 
676 	default:
677 		bfa_sm_fault(event);
678 	}
679 }
680 
681 /* Request for semaphore. */
682 static void
683 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
684 {
685 	bfa_ioc_hw_sem_get(iocpf->ioc);
686 }
687 
688 /* Awaiting semaphore for h/w initialzation. */
689 static void
690 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
691 {
692 	struct bfa_ioc *ioc = iocpf->ioc;
693 
694 	switch (event) {
695 	case IOCPF_E_SEMLOCKED:
696 		if (bfa_ioc_sync_complete(ioc)) {
697 			bfa_ioc_sync_join(ioc);
698 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
699 		} else {
700 			bfa_nw_ioc_hw_sem_release(ioc);
701 			mod_timer(&ioc->sem_timer, jiffies +
702 				msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
703 		}
704 		break;
705 
706 	case IOCPF_E_SEM_ERROR:
707 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
708 		bfa_ioc_pf_hwfailed(ioc);
709 		break;
710 
711 	case IOCPF_E_DISABLE:
712 		bfa_ioc_hw_sem_get_cancel(ioc);
713 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
714 		break;
715 
716 	default:
717 		bfa_sm_fault(event);
718 	}
719 }
720 
721 static void
722 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
723 {
724 	iocpf->poll_time = 0;
725 	bfa_ioc_reset(iocpf->ioc, false);
726 }
727 
728 /* Hardware is being initialized. Interrupts are enabled.
729  * Holding hardware semaphore lock.
730  */
731 static void
732 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
733 {
734 	struct bfa_ioc *ioc = iocpf->ioc;
735 
736 	switch (event) {
737 	case IOCPF_E_FWREADY:
738 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
739 		break;
740 
741 	case IOCPF_E_TIMEOUT:
742 		bfa_nw_ioc_hw_sem_release(ioc);
743 			bfa_ioc_pf_failed(ioc);
744 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
745 		break;
746 
747 	case IOCPF_E_DISABLE:
748 		del_timer(&ioc->iocpf_timer);
749 		bfa_ioc_sync_leave(ioc);
750 		bfa_nw_ioc_hw_sem_release(ioc);
751 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
752 		break;
753 
754 	default:
755 		bfa_sm_fault(event);
756 	}
757 }
758 
759 static void
760 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
761 {
762 	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
763 		msecs_to_jiffies(BFA_IOC_TOV));
764 	/**
765 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
766 	 */
767 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
768 	bfa_ioc_send_enable(iocpf->ioc);
769 }
770 
771 /* Host IOC function is being enabled, awaiting response from firmware.
772  * Semaphore is acquired.
773  */
774 static void
775 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
776 {
777 	struct bfa_ioc *ioc = iocpf->ioc;
778 
779 	switch (event) {
780 	case IOCPF_E_FWRSP_ENABLE:
781 		del_timer(&ioc->iocpf_timer);
782 		bfa_nw_ioc_hw_sem_release(ioc);
783 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
784 		break;
785 
786 	case IOCPF_E_INITFAIL:
787 		del_timer(&ioc->iocpf_timer);
788 		/*
789 		 * !!! fall through !!!
790 		 */
791 	case IOCPF_E_TIMEOUT:
792 		bfa_nw_ioc_hw_sem_release(ioc);
793 		if (event == IOCPF_E_TIMEOUT)
794 			bfa_ioc_pf_failed(ioc);
795 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
796 		break;
797 
798 	case IOCPF_E_DISABLE:
799 		del_timer(&ioc->iocpf_timer);
800 		bfa_nw_ioc_hw_sem_release(ioc);
801 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
802 		break;
803 
804 	default:
805 		bfa_sm_fault(event);
806 	}
807 }
808 
809 static void
810 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
811 {
812 	bfa_ioc_pf_enabled(iocpf->ioc);
813 }
814 
815 static void
816 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
817 {
818 	switch (event) {
819 	case IOCPF_E_DISABLE:
820 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
821 		break;
822 
823 	case IOCPF_E_GETATTRFAIL:
824 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
825 		break;
826 
827 	case IOCPF_E_FAIL:
828 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
829 		break;
830 
831 	default:
832 		bfa_sm_fault(event);
833 	}
834 }
835 
836 static void
837 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
838 {
839 	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
840 		msecs_to_jiffies(BFA_IOC_TOV));
841 	bfa_ioc_send_disable(iocpf->ioc);
842 }
843 
844 /* IOC is being disabled */
845 static void
846 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
847 {
848 	struct bfa_ioc *ioc = iocpf->ioc;
849 
850 	switch (event) {
851 	case IOCPF_E_FWRSP_DISABLE:
852 		del_timer(&ioc->iocpf_timer);
853 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
854 		break;
855 
856 	case IOCPF_E_FAIL:
857 		del_timer(&ioc->iocpf_timer);
858 		/*
859 		 * !!! fall through !!!
860 		 */
861 
862 	case IOCPF_E_TIMEOUT:
863 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
864 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
865 		break;
866 
867 	case IOCPF_E_FWRSP_ENABLE:
868 		break;
869 
870 	default:
871 		bfa_sm_fault(event);
872 	}
873 }
874 
875 static void
876 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
877 {
878 	bfa_ioc_hw_sem_get(iocpf->ioc);
879 }
880 
881 /* IOC hb ack request is being removed. */
882 static void
883 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
884 {
885 	struct bfa_ioc *ioc = iocpf->ioc;
886 
887 	switch (event) {
888 	case IOCPF_E_SEMLOCKED:
889 		bfa_ioc_sync_leave(ioc);
890 		bfa_nw_ioc_hw_sem_release(ioc);
891 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
892 		break;
893 
894 	case IOCPF_E_SEM_ERROR:
895 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
896 		bfa_ioc_pf_hwfailed(ioc);
897 		break;
898 
899 	case IOCPF_E_FAIL:
900 		break;
901 
902 	default:
903 		bfa_sm_fault(event);
904 	}
905 }
906 
907 /* IOC disable completion entry. */
908 static void
909 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
910 {
911 	bfa_ioc_mbox_flush(iocpf->ioc);
912 	bfa_ioc_pf_disabled(iocpf->ioc);
913 }
914 
915 static void
916 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
917 {
918 	struct bfa_ioc *ioc = iocpf->ioc;
919 
920 	switch (event) {
921 	case IOCPF_E_ENABLE:
922 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
923 		break;
924 
925 	case IOCPF_E_STOP:
926 		bfa_ioc_firmware_unlock(ioc);
927 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
928 		break;
929 
930 	default:
931 		bfa_sm_fault(event);
932 	}
933 }
934 
935 static void
936 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
937 {
938 	bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
939 	bfa_ioc_hw_sem_get(iocpf->ioc);
940 }
941 
942 /* Hardware initialization failed. */
943 static void
944 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
945 {
946 	struct bfa_ioc *ioc = iocpf->ioc;
947 
948 	switch (event) {
949 	case IOCPF_E_SEMLOCKED:
950 		bfa_ioc_notify_fail(ioc);
951 		bfa_ioc_sync_leave(ioc);
952 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
953 		bfa_nw_ioc_hw_sem_release(ioc);
954 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
955 		break;
956 
957 	case IOCPF_E_SEM_ERROR:
958 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
959 		bfa_ioc_pf_hwfailed(ioc);
960 		break;
961 
962 	case IOCPF_E_DISABLE:
963 		bfa_ioc_hw_sem_get_cancel(ioc);
964 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
965 		break;
966 
967 	case IOCPF_E_STOP:
968 		bfa_ioc_hw_sem_get_cancel(ioc);
969 		bfa_ioc_firmware_unlock(ioc);
970 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
971 		break;
972 
973 	case IOCPF_E_FAIL:
974 		break;
975 
976 	default:
977 		bfa_sm_fault(event);
978 	}
979 }
980 
981 static void
982 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
983 {
984 }
985 
986 /* Hardware initialization failed. */
987 static void
988 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
989 {
990 	struct bfa_ioc *ioc = iocpf->ioc;
991 
992 	switch (event) {
993 	case IOCPF_E_DISABLE:
994 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
995 		break;
996 
997 	case IOCPF_E_STOP:
998 		bfa_ioc_firmware_unlock(ioc);
999 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1000 		break;
1001 
1002 	default:
1003 		bfa_sm_fault(event);
1004 	}
1005 }
1006 
1007 static void
1008 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1009 {
1010 	/**
1011 	 * Mark IOC as failed in hardware and stop firmware.
1012 	 */
1013 	bfa_ioc_lpu_stop(iocpf->ioc);
1014 
1015 	/**
1016 	 * Flush any queued up mailbox requests.
1017 	 */
1018 	bfa_ioc_mbox_flush(iocpf->ioc);
1019 	bfa_ioc_hw_sem_get(iocpf->ioc);
1020 }
1021 
1022 /* IOC is in failed state. */
1023 static void
1024 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1025 {
1026 	struct bfa_ioc *ioc = iocpf->ioc;
1027 
1028 	switch (event) {
1029 	case IOCPF_E_SEMLOCKED:
1030 		bfa_ioc_sync_ack(ioc);
1031 		bfa_ioc_notify_fail(ioc);
1032 		if (!iocpf->auto_recover) {
1033 			bfa_ioc_sync_leave(ioc);
1034 			writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1035 			bfa_nw_ioc_hw_sem_release(ioc);
1036 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1037 		} else {
1038 			if (bfa_ioc_sync_complete(ioc))
1039 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1040 			else {
1041 				bfa_nw_ioc_hw_sem_release(ioc);
1042 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1043 			}
1044 		}
1045 		break;
1046 
1047 	case IOCPF_E_SEM_ERROR:
1048 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1049 		bfa_ioc_pf_hwfailed(ioc);
1050 		break;
1051 
1052 	case IOCPF_E_DISABLE:
1053 		bfa_ioc_hw_sem_get_cancel(ioc);
1054 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055 		break;
1056 
1057 	case IOCPF_E_FAIL:
1058 		break;
1059 
1060 	default:
1061 		bfa_sm_fault(event);
1062 	}
1063 }
1064 
1065 static void
1066 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1067 {
1068 }
1069 
1070 /* IOC is in failed state. */
1071 static void
1072 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1073 {
1074 	switch (event) {
1075 	case IOCPF_E_DISABLE:
1076 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1077 		break;
1078 
1079 	default:
1080 		bfa_sm_fault(event);
1081 	}
1082 }
1083 
1084 /* BFA IOC private functions */
1085 
1086 /* Notify common modules registered for notification. */
1087 static void
1088 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1089 {
1090 	struct bfa_ioc_notify *notify;
1091 	struct list_head			*qe;
1092 
1093 	list_for_each(qe, &ioc->notify_q) {
1094 		notify = (struct bfa_ioc_notify *)qe;
1095 		notify->cbfn(notify->cbarg, event);
1096 	}
1097 }
1098 
1099 static void
1100 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1101 {
1102 	ioc->cbfn->disable_cbfn(ioc->bfa);
1103 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1104 }
1105 
1106 bool
1107 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1108 {
1109 	u32 r32;
1110 	int cnt = 0;
1111 #define BFA_SEM_SPINCNT	3000
1112 
1113 	r32 = readl(sem_reg);
1114 
1115 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1116 		cnt++;
1117 		udelay(2);
1118 		r32 = readl(sem_reg);
1119 	}
1120 
1121 	if (!(r32 & 1))
1122 		return true;
1123 
1124 	return false;
1125 }
1126 
1127 void
1128 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1129 {
1130 	readl(sem_reg);
1131 	writel(1, sem_reg);
1132 }
1133 
1134 /* Clear fwver hdr */
1135 static void
1136 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1137 {
1138 	u32 pgnum, pgoff, loff = 0;
1139 	int i;
1140 
1141 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1142 	pgoff = PSS_SMEM_PGOFF(loff);
1143 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1144 
1145 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1146 		writel(0, ioc->ioc_regs.smem_page_start + loff);
1147 		loff += sizeof(u32);
1148 	}
1149 }
1150 
1151 
1152 static void
1153 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1154 {
1155 	struct bfi_ioc_image_hdr fwhdr;
1156 	u32 fwstate, r32;
1157 
1158 	/* Spin on init semaphore to serialize. */
1159 	r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1160 	while (r32 & 0x1) {
1161 		udelay(20);
1162 		r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1163 	}
1164 
1165 	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1166 	if (fwstate == BFI_IOC_UNINIT) {
1167 		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1168 		return;
1169 	}
1170 
1171 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1172 
1173 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1174 		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1175 		return;
1176 	}
1177 
1178 	bfa_ioc_fwver_clear(ioc);
1179 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1180 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1181 
1182 	/*
1183 	 * Try to lock and then unlock the semaphore.
1184 	 */
1185 	readl(ioc->ioc_regs.ioc_sem_reg);
1186 	writel(1, ioc->ioc_regs.ioc_sem_reg);
1187 
1188 	/* Unlock init semaphore */
1189 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1190 }
1191 
1192 static void
1193 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1194 {
1195 	u32	r32;
1196 
1197 	/**
1198 	 * First read to the semaphore register will return 0, subsequent reads
1199 	 * will return 1. Semaphore is released by writing 1 to the register
1200 	 */
1201 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1202 	if (r32 == ~0) {
1203 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1204 		return;
1205 	}
1206 	if (!(r32 & 1)) {
1207 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1208 		return;
1209 	}
1210 
1211 	mod_timer(&ioc->sem_timer, jiffies +
1212 		msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1213 }
1214 
1215 void
1216 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1217 {
1218 	writel(1, ioc->ioc_regs.ioc_sem_reg);
1219 }
1220 
1221 static void
1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1223 {
1224 	del_timer(&ioc->sem_timer);
1225 }
1226 
1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1228 static void
1229 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1230 {
1231 	u32	pss_ctl;
1232 	int		i;
1233 #define PSS_LMEM_INIT_TIME  10000
1234 
1235 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1236 	pss_ctl &= ~__PSS_LMEM_RESET;
1237 	pss_ctl |= __PSS_LMEM_INIT_EN;
1238 
1239 	/*
1240 	 * i2c workaround 12.5khz clock
1241 	 */
1242 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1243 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1244 
1245 	/**
1246 	 * wait for memory initialization to be complete
1247 	 */
1248 	i = 0;
1249 	do {
1250 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1251 		i++;
1252 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1253 
1254 	/**
1255 	 * If memory initialization is not successful, IOC timeout will catch
1256 	 * such failures.
1257 	 */
1258 	BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1259 
1260 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1261 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1262 }
1263 
1264 static void
1265 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1266 {
1267 	u32	pss_ctl;
1268 
1269 	/**
1270 	 * Take processor out of reset.
1271 	 */
1272 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1273 	pss_ctl &= ~__PSS_LPU0_RESET;
1274 
1275 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1276 }
1277 
1278 static void
1279 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1280 {
1281 	u32	pss_ctl;
1282 
1283 	/**
1284 	 * Put processors in reset.
1285 	 */
1286 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1287 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1288 
1289 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1290 }
1291 
1292 /* Get driver and firmware versions. */
1293 void
1294 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1295 {
1296 	u32	pgnum;
1297 	u32	loff = 0;
1298 	int		i;
1299 	u32	*fwsig = (u32 *) fwhdr;
1300 
1301 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1302 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1303 
1304 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1305 	     i++) {
1306 		fwsig[i] =
1307 			swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1308 		loff += sizeof(u32);
1309 	}
1310 }
1311 
1312 /* Returns TRUE if same. */
1313 bool
1314 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1315 {
1316 	struct bfi_ioc_image_hdr *drv_fwhdr;
1317 	int i;
1318 
1319 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
1320 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1321 
1322 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1323 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1324 			return false;
1325 	}
1326 
1327 	return true;
1328 }
1329 
1330 /* Return true if current running version is valid. Firmware signature and
1331  * execution context (driver/bios) must match.
1332  */
1333 static bool
1334 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1335 {
1336 	struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1337 
1338 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1339 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
1340 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1341 
1342 	if (fwhdr.signature != drv_fwhdr->signature)
1343 		return false;
1344 
1345 	if (swab32(fwhdr.bootenv) != boot_env)
1346 		return false;
1347 
1348 	return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1349 }
1350 
1351 /* Conditionally flush any pending message from firmware at start. */
1352 static void
1353 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1354 {
1355 	u32	r32;
1356 
1357 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1358 	if (r32)
1359 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1360 }
1361 
1362 static void
1363 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1364 {
1365 	enum bfi_ioc_state ioc_fwstate;
1366 	bool fwvalid;
1367 	u32 boot_env;
1368 
1369 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1370 
1371 	if (force)
1372 		ioc_fwstate = BFI_IOC_UNINIT;
1373 
1374 	boot_env = BFI_FWBOOT_ENV_OS;
1375 
1376 	/**
1377 	 * check if firmware is valid
1378 	 */
1379 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1380 		false : bfa_ioc_fwver_valid(ioc, boot_env);
1381 
1382 	if (!fwvalid) {
1383 		bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1384 		bfa_ioc_poll_fwinit(ioc);
1385 		return;
1386 	}
1387 
1388 	/**
1389 	 * If hardware initialization is in progress (initialized by other IOC),
1390 	 * just wait for an initialization completion interrupt.
1391 	 */
1392 	if (ioc_fwstate == BFI_IOC_INITING) {
1393 		bfa_ioc_poll_fwinit(ioc);
1394 		return;
1395 	}
1396 
1397 	/**
1398 	 * If IOC function is disabled and firmware version is same,
1399 	 * just re-enable IOC.
1400 	 */
1401 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1402 		/**
1403 		 * When using MSI-X any pending firmware ready event should
1404 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1405 		 */
1406 		bfa_ioc_msgflush(ioc);
1407 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1408 		return;
1409 	}
1410 
1411 	/**
1412 	 * Initialize the h/w for any other states.
1413 	 */
1414 	bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1415 	bfa_ioc_poll_fwinit(ioc);
1416 }
1417 
1418 void
1419 bfa_nw_ioc_timeout(void *ioc_arg)
1420 {
1421 	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1422 
1423 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1424 }
1425 
1426 static void
1427 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1428 {
1429 	u32 *msgp = (u32 *) ioc_msg;
1430 	u32 i;
1431 
1432 	BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1433 
1434 	/*
1435 	 * first write msg to mailbox registers
1436 	 */
1437 	for (i = 0; i < len / sizeof(u32); i++)
1438 		writel(cpu_to_le32(msgp[i]),
1439 			      ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1440 
1441 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1442 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1443 
1444 	/*
1445 	 * write 1 to mailbox CMD to trigger LPU event
1446 	 */
1447 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1448 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1449 }
1450 
1451 static void
1452 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1453 {
1454 	struct bfi_ioc_ctrl_req enable_req;
1455 	struct timeval tv;
1456 
1457 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1458 		    bfa_ioc_portid(ioc));
1459 	enable_req.clscode = htons(ioc->clscode);
1460 	do_gettimeofday(&tv);
1461 	enable_req.tv_sec = ntohl(tv.tv_sec);
1462 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1463 }
1464 
1465 static void
1466 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1467 {
1468 	struct bfi_ioc_ctrl_req disable_req;
1469 
1470 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1471 		    bfa_ioc_portid(ioc));
1472 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1473 }
1474 
1475 static void
1476 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1477 {
1478 	struct bfi_ioc_getattr_req attr_req;
1479 
1480 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1481 		    bfa_ioc_portid(ioc));
1482 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1483 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1484 }
1485 
1486 void
1487 bfa_nw_ioc_hb_check(void *cbarg)
1488 {
1489 	struct bfa_ioc *ioc = cbarg;
1490 	u32	hb_count;
1491 
1492 	hb_count = readl(ioc->ioc_regs.heartbeat);
1493 	if (ioc->hb_count == hb_count) {
1494 		bfa_ioc_recover(ioc);
1495 		return;
1496 	} else {
1497 		ioc->hb_count = hb_count;
1498 	}
1499 
1500 	bfa_ioc_mbox_poll(ioc);
1501 	mod_timer(&ioc->hb_timer, jiffies +
1502 		msecs_to_jiffies(BFA_IOC_HB_TOV));
1503 }
1504 
1505 static void
1506 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1507 {
1508 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1509 	mod_timer(&ioc->hb_timer, jiffies +
1510 		msecs_to_jiffies(BFA_IOC_HB_TOV));
1511 }
1512 
1513 static void
1514 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1515 {
1516 	del_timer(&ioc->hb_timer);
1517 }
1518 
1519 /* Initiate a full firmware download. */
1520 static void
1521 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1522 		    u32 boot_env)
1523 {
1524 	u32 *fwimg;
1525 	u32 pgnum;
1526 	u32 loff = 0;
1527 	u32 chunkno = 0;
1528 	u32 i;
1529 	u32 asicmode;
1530 
1531 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1532 
1533 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1534 
1535 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1536 
1537 	for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1538 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1539 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1540 			fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1541 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1542 		}
1543 
1544 		/**
1545 		 * write smem
1546 		 */
1547 		writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1548 			      ((ioc->ioc_regs.smem_page_start) + (loff)));
1549 
1550 		loff += sizeof(u32);
1551 
1552 		/**
1553 		 * handle page offset wrap around
1554 		 */
1555 		loff = PSS_SMEM_PGOFF(loff);
1556 		if (loff == 0) {
1557 			pgnum++;
1558 			writel(pgnum,
1559 				      ioc->ioc_regs.host_page_num_fn);
1560 		}
1561 	}
1562 
1563 	writel(bfa_ioc_smem_pgnum(ioc, 0),
1564 		      ioc->ioc_regs.host_page_num_fn);
1565 
1566 	/*
1567 	 * Set boot type, env and device mode at the end.
1568 	*/
1569 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1570 					ioc->port0_mode, ioc->port1_mode);
1571 	writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1572 			+ BFI_FWBOOT_DEVMODE_OFF));
1573 	writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1574 			+ (BFI_FWBOOT_TYPE_OFF)));
1575 	writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1576 			+ (BFI_FWBOOT_ENV_OFF)));
1577 }
1578 
1579 static void
1580 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1581 {
1582 	bfa_ioc_hwinit(ioc, force);
1583 }
1584 
1585 /* BFA ioc enable reply by firmware */
1586 static void
1587 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1588 			u8 cap_bm)
1589 {
1590 	struct bfa_iocpf *iocpf = &ioc->iocpf;
1591 
1592 	ioc->port_mode = ioc->port_mode_cfg = port_mode;
1593 	ioc->ad_cap_bm = cap_bm;
1594 	bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1595 }
1596 
1597 /* Update BFA configuration from firmware configuration. */
1598 static void
1599 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1600 {
1601 	struct bfi_ioc_attr *attr = ioc->attr;
1602 
1603 	attr->adapter_prop  = ntohl(attr->adapter_prop);
1604 	attr->card_type     = ntohl(attr->card_type);
1605 	attr->maxfrsize	    = ntohs(attr->maxfrsize);
1606 
1607 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1608 }
1609 
1610 /* Attach time initialization of mbox logic. */
1611 static void
1612 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1613 {
1614 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1615 	int	mc;
1616 
1617 	INIT_LIST_HEAD(&mod->cmd_q);
1618 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1619 		mod->mbhdlr[mc].cbfn = NULL;
1620 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1621 	}
1622 }
1623 
1624 /* Mbox poll timer -- restarts any pending mailbox requests. */
1625 static void
1626 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1627 {
1628 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1629 	struct bfa_mbox_cmd *cmd;
1630 	bfa_mbox_cmd_cbfn_t cbfn;
1631 	void *cbarg;
1632 	u32 stat;
1633 
1634 	/**
1635 	 * If no command pending, do nothing
1636 	 */
1637 	if (list_empty(&mod->cmd_q))
1638 		return;
1639 
1640 	/**
1641 	 * If previous command is not yet fetched by firmware, do nothing
1642 	 */
1643 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1644 	if (stat)
1645 		return;
1646 
1647 	/**
1648 	 * Enqueue command to firmware.
1649 	 */
1650 	bfa_q_deq(&mod->cmd_q, &cmd);
1651 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1652 
1653 	/**
1654 	 * Give a callback to the client, indicating that the command is sent
1655 	 */
1656 	if (cmd->cbfn) {
1657 		cbfn = cmd->cbfn;
1658 		cbarg = cmd->cbarg;
1659 		cmd->cbfn = NULL;
1660 		cbfn(cbarg);
1661 	}
1662 }
1663 
1664 /* Cleanup any pending requests. */
1665 static void
1666 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1667 {
1668 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1669 	struct bfa_mbox_cmd *cmd;
1670 
1671 	while (!list_empty(&mod->cmd_q))
1672 		bfa_q_deq(&mod->cmd_q, &cmd);
1673 }
1674 
1675 /**
1676  * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
1677  *
1678  * @ioc:     memory for IOC
1679  * @tbuf:    app memory to store data from smem
1680  * @soff:    smem offset
1681  * @sz:      size of smem in bytes
1682  */
1683 static int
1684 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1685 {
1686 	u32 pgnum, loff, r32;
1687 	int i, len;
1688 	u32 *buf = tbuf;
1689 
1690 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1691 	loff = PSS_SMEM_PGOFF(soff);
1692 
1693 	/*
1694 	 *  Hold semaphore to serialize pll init and fwtrc.
1695 	*/
1696 	if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1697 		return 1;
1698 
1699 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1700 
1701 	len = sz/sizeof(u32);
1702 	for (i = 0; i < len; i++) {
1703 		r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1704 		buf[i] = be32_to_cpu(r32);
1705 		loff += sizeof(u32);
1706 
1707 		/**
1708 		 * handle page offset wrap around
1709 		 */
1710 		loff = PSS_SMEM_PGOFF(loff);
1711 		if (loff == 0) {
1712 			pgnum++;
1713 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1714 		}
1715 	}
1716 
1717 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1718 	       ioc->ioc_regs.host_page_num_fn);
1719 
1720 	/*
1721 	 * release semaphore
1722 	 */
1723 	readl(ioc->ioc_regs.ioc_init_sem_reg);
1724 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1725 	return 0;
1726 }
1727 
1728 /* Retrieve saved firmware trace from a prior IOC failure. */
1729 int
1730 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1731 {
1732 	u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1733 	int tlen, status = 0;
1734 
1735 	tlen = *trclen;
1736 	if (tlen > BNA_DBG_FWTRC_LEN)
1737 		tlen = BNA_DBG_FWTRC_LEN;
1738 
1739 	status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1740 	*trclen = tlen;
1741 	return status;
1742 }
1743 
1744 /* Save firmware trace if configured. */
1745 static void
1746 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1747 {
1748 	int tlen;
1749 
1750 	if (ioc->dbg_fwsave_once) {
1751 		ioc->dbg_fwsave_once = 0;
1752 		if (ioc->dbg_fwsave_len) {
1753 			tlen = ioc->dbg_fwsave_len;
1754 			bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1755 		}
1756 	}
1757 }
1758 
1759 /* Retrieve saved firmware trace from a prior IOC failure. */
1760 int
1761 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1762 {
1763 	int tlen;
1764 
1765 	if (ioc->dbg_fwsave_len == 0)
1766 		return BFA_STATUS_ENOFSAVE;
1767 
1768 	tlen = *trclen;
1769 	if (tlen > ioc->dbg_fwsave_len)
1770 		tlen = ioc->dbg_fwsave_len;
1771 
1772 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
1773 	*trclen = tlen;
1774 	return BFA_STATUS_OK;
1775 }
1776 
1777 static void
1778 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1779 {
1780 	/**
1781 	 * Notify driver and common modules registered for notification.
1782 	 */
1783 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
1784 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1785 	bfa_nw_ioc_debug_save_ftrc(ioc);
1786 }
1787 
1788 /* IOCPF to IOC interface */
1789 static void
1790 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1791 {
1792 	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1793 }
1794 
1795 static void
1796 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1797 {
1798 	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1799 }
1800 
1801 static void
1802 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1803 {
1804 	bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1805 }
1806 
1807 static void
1808 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1809 {
1810 	bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1811 }
1812 
1813 static void
1814 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1815 {
1816 	/**
1817 	 * Provide enable completion callback and AEN notification.
1818 	 */
1819 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1820 }
1821 
1822 /* IOC public */
1823 static enum bfa_status
1824 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1825 {
1826 	/*
1827 	 *  Hold semaphore so that nobody can access the chip during init.
1828 	 */
1829 	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1830 
1831 	bfa_ioc_pll_init_asic(ioc);
1832 
1833 	ioc->pllinit = true;
1834 
1835 	/* Initialize LMEM */
1836 	bfa_ioc_lmem_init(ioc);
1837 
1838 	/*
1839 	 *  release semaphore.
1840 	 */
1841 	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1842 
1843 	return BFA_STATUS_OK;
1844 }
1845 
1846 /* Interface used by diag module to do firmware boot with memory test
1847  * as the entry vector.
1848  */
1849 static void
1850 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1851 		u32 boot_env)
1852 {
1853 	bfa_ioc_stats(ioc, ioc_boots);
1854 
1855 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1856 		return;
1857 
1858 	/**
1859 	 * Initialize IOC state of all functions on a chip reset.
1860 	 */
1861 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1862 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1863 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1864 	} else {
1865 		writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1866 		writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1867 	}
1868 
1869 	bfa_ioc_msgflush(ioc);
1870 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
1871 	bfa_ioc_lpu_start(ioc);
1872 }
1873 
1874 /* Enable/disable IOC failure auto recovery. */
1875 void
1876 bfa_nw_ioc_auto_recover(bool auto_recover)
1877 {
1878 	bfa_nw_auto_recover = auto_recover;
1879 }
1880 
1881 static bool
1882 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1883 {
1884 	u32	*msgp = mbmsg;
1885 	u32	r32;
1886 	int		i;
1887 
1888 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1889 	if ((r32 & 1) == 0)
1890 		return false;
1891 
1892 	/**
1893 	 * read the MBOX msg
1894 	 */
1895 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1896 	     i++) {
1897 		r32 = readl(ioc->ioc_regs.lpu_mbox +
1898 				   i * sizeof(u32));
1899 		msgp[i] = htonl(r32);
1900 	}
1901 
1902 	/**
1903 	 * turn off mailbox interrupt by clearing mailbox status
1904 	 */
1905 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1906 	readl(ioc->ioc_regs.lpu_mbox_cmd);
1907 
1908 	return true;
1909 }
1910 
1911 static void
1912 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1913 {
1914 	union bfi_ioc_i2h_msg_u	*msg;
1915 	struct bfa_iocpf *iocpf = &ioc->iocpf;
1916 
1917 	msg = (union bfi_ioc_i2h_msg_u *) m;
1918 
1919 	bfa_ioc_stats(ioc, ioc_isrs);
1920 
1921 	switch (msg->mh.msg_id) {
1922 	case BFI_IOC_I2H_HBEAT:
1923 		break;
1924 
1925 	case BFI_IOC_I2H_ENABLE_REPLY:
1926 		bfa_ioc_enable_reply(ioc,
1927 			(enum bfa_mode)msg->fw_event.port_mode,
1928 			msg->fw_event.cap_bm);
1929 		break;
1930 
1931 	case BFI_IOC_I2H_DISABLE_REPLY:
1932 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1933 		break;
1934 
1935 	case BFI_IOC_I2H_GETATTR_REPLY:
1936 		bfa_ioc_getattr_reply(ioc);
1937 		break;
1938 
1939 	default:
1940 		BUG_ON(1);
1941 	}
1942 }
1943 
1944 /**
1945  * bfa_nw_ioc_attach - IOC attach time initialization and setup.
1946  *
1947  * @ioc:	memory for IOC
1948  * @bfa:	driver instance structure
1949  */
1950 void
1951 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1952 {
1953 	ioc->bfa	= bfa;
1954 	ioc->cbfn	= cbfn;
1955 	ioc->fcmode	= false;
1956 	ioc->pllinit	= false;
1957 	ioc->dbg_fwsave_once = true;
1958 	ioc->iocpf.ioc  = ioc;
1959 
1960 	bfa_ioc_mbox_attach(ioc);
1961 	INIT_LIST_HEAD(&ioc->notify_q);
1962 
1963 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1964 	bfa_fsm_send_event(ioc, IOC_E_RESET);
1965 }
1966 
1967 /* Driver detach time IOC cleanup. */
1968 void
1969 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1970 {
1971 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
1972 
1973 	/* Done with detach, empty the notify_q. */
1974 	INIT_LIST_HEAD(&ioc->notify_q);
1975 }
1976 
1977 /**
1978  * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
1979  *
1980  * @pcidev:	PCI device information for this IOC
1981  */
1982 void
1983 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1984 		 enum bfi_pcifn_class clscode)
1985 {
1986 	ioc->clscode	= clscode;
1987 	ioc->pcidev	= *pcidev;
1988 
1989 	/**
1990 	 * Initialize IOC and device personality
1991 	 */
1992 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1993 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
1994 
1995 	switch (pcidev->device_id) {
1996 	case PCI_DEVICE_ID_BROCADE_CT:
1997 		ioc->asic_gen = BFI_ASIC_GEN_CT;
1998 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1999 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2000 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2001 		ioc->ad_cap_bm = BFA_CM_CNA;
2002 		break;
2003 
2004 	case BFA_PCI_DEVICE_ID_CT2:
2005 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2006 		if (clscode == BFI_PCIFN_CLASS_FC &&
2007 			pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2008 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2009 			ioc->fcmode = true;
2010 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2011 			ioc->ad_cap_bm = BFA_CM_HBA;
2012 		} else {
2013 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2014 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2015 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2016 				ioc->port_mode =
2017 				ioc->port_mode_cfg = BFA_MODE_CNA;
2018 				ioc->ad_cap_bm = BFA_CM_CNA;
2019 			} else {
2020 				ioc->port_mode =
2021 				ioc->port_mode_cfg = BFA_MODE_NIC;
2022 				ioc->ad_cap_bm = BFA_CM_NIC;
2023 			}
2024 		}
2025 		break;
2026 
2027 	default:
2028 		BUG_ON(1);
2029 	}
2030 
2031 	/**
2032 	 * Set asic specific interfaces.
2033 	 */
2034 	if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2035 		bfa_nw_ioc_set_ct_hwif(ioc);
2036 	else {
2037 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2038 		bfa_nw_ioc_set_ct2_hwif(ioc);
2039 		bfa_nw_ioc_ct2_poweron(ioc);
2040 	}
2041 
2042 	bfa_ioc_map_port(ioc);
2043 	bfa_ioc_reg_init(ioc);
2044 }
2045 
2046 /**
2047  * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2048  *
2049  * @dm_kva:	kernel virtual address of IOC dma memory
2050  * @dm_pa:	physical address of IOC dma memory
2051  */
2052 void
2053 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2054 {
2055 	/**
2056 	 * dma memory for firmware attribute
2057 	 */
2058 	ioc->attr_dma.kva = dm_kva;
2059 	ioc->attr_dma.pa = dm_pa;
2060 	ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2061 }
2062 
2063 /* Return size of dma memory required. */
2064 u32
2065 bfa_nw_ioc_meminfo(void)
2066 {
2067 	return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2068 }
2069 
2070 void
2071 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2072 {
2073 	bfa_ioc_stats(ioc, ioc_enables);
2074 	ioc->dbg_fwsave_once = true;
2075 
2076 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2077 }
2078 
2079 void
2080 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2081 {
2082 	bfa_ioc_stats(ioc, ioc_disables);
2083 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2084 }
2085 
2086 /* Initialize memory for saving firmware trace. */
2087 void
2088 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2089 {
2090 	ioc->dbg_fwsave = dbg_fwsave;
2091 	ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2092 }
2093 
2094 static u32
2095 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2096 {
2097 	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2098 }
2099 
2100 /* Register mailbox message handler function, to be called by common modules */
2101 void
2102 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2103 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2104 {
2105 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2106 
2107 	mod->mbhdlr[mc].cbfn	= cbfn;
2108 	mod->mbhdlr[mc].cbarg = cbarg;
2109 }
2110 
2111 /**
2112  * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2113  *
2114  * @ioc:	IOC instance
2115  * @cmd:	Mailbox command
2116  *
2117  * Waits if mailbox is busy. Responsibility of caller to serialize
2118  */
2119 bool
2120 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2121 			bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2122 {
2123 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2124 	u32			stat;
2125 
2126 	cmd->cbfn = cbfn;
2127 	cmd->cbarg = cbarg;
2128 
2129 	/**
2130 	 * If a previous command is pending, queue new command
2131 	 */
2132 	if (!list_empty(&mod->cmd_q)) {
2133 		list_add_tail(&cmd->qe, &mod->cmd_q);
2134 		return true;
2135 	}
2136 
2137 	/**
2138 	 * If mailbox is busy, queue command for poll timer
2139 	 */
2140 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2141 	if (stat) {
2142 		list_add_tail(&cmd->qe, &mod->cmd_q);
2143 		return true;
2144 	}
2145 
2146 	/**
2147 	 * mailbox is free -- queue command to firmware
2148 	 */
2149 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2150 
2151 	return false;
2152 }
2153 
2154 /* Handle mailbox interrupts */
2155 void
2156 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2157 {
2158 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2159 	struct bfi_mbmsg m;
2160 	int				mc;
2161 
2162 	if (bfa_ioc_msgget(ioc, &m)) {
2163 		/**
2164 		 * Treat IOC message class as special.
2165 		 */
2166 		mc = m.mh.msg_class;
2167 		if (mc == BFI_MC_IOC) {
2168 			bfa_ioc_isr(ioc, &m);
2169 			return;
2170 		}
2171 
2172 		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2173 			return;
2174 
2175 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2176 	}
2177 
2178 	bfa_ioc_lpu_read_stat(ioc);
2179 
2180 	/**
2181 	 * Try to send pending mailbox commands
2182 	 */
2183 	bfa_ioc_mbox_poll(ioc);
2184 }
2185 
2186 void
2187 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2188 {
2189 	bfa_ioc_stats(ioc, ioc_hbfails);
2190 	bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2191 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2192 }
2193 
2194 /* return true if IOC is disabled */
2195 bool
2196 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2197 {
2198 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2199 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2200 }
2201 
2202 /* return true if IOC is operational */
2203 bool
2204 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2205 {
2206 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2207 }
2208 
2209 /* Add to IOC heartbeat failure notification queue. To be used by common
2210  * modules such as cee, port, diag.
2211  */
2212 void
2213 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2214 			struct bfa_ioc_notify *notify)
2215 {
2216 	list_add_tail(&notify->qe, &ioc->notify_q);
2217 }
2218 
2219 #define BFA_MFG_NAME "Brocade"
2220 static void
2221 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2222 			 struct bfa_adapter_attr *ad_attr)
2223 {
2224 	struct bfi_ioc_attr *ioc_attr;
2225 
2226 	ioc_attr = ioc->attr;
2227 
2228 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2229 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2230 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2231 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2232 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2233 		      sizeof(struct bfa_mfg_vpd));
2234 
2235 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2236 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2237 
2238 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2239 	/* For now, model descr uses same model string */
2240 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2241 
2242 	ad_attr->card_type = ioc_attr->card_type;
2243 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2244 
2245 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2246 		ad_attr->prototype = 1;
2247 	else
2248 		ad_attr->prototype = 0;
2249 
2250 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2251 	ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2252 
2253 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2254 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2255 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2256 	ad_attr->asic_rev = ioc_attr->asic_rev;
2257 
2258 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2259 }
2260 
2261 static enum bfa_ioc_type
2262 bfa_ioc_get_type(struct bfa_ioc *ioc)
2263 {
2264 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2265 		return BFA_IOC_TYPE_LL;
2266 
2267 	BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2268 
2269 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2270 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2271 }
2272 
2273 static void
2274 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2275 {
2276 	memcpy(serial_num,
2277 			(void *)ioc->attr->brcd_serialnum,
2278 			BFA_ADAPTER_SERIAL_NUM_LEN);
2279 }
2280 
2281 static void
2282 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2283 {
2284 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2285 }
2286 
2287 static void
2288 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2289 {
2290 	BUG_ON(!(chip_rev));
2291 
2292 	memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2293 
2294 	chip_rev[0] = 'R';
2295 	chip_rev[1] = 'e';
2296 	chip_rev[2] = 'v';
2297 	chip_rev[3] = '-';
2298 	chip_rev[4] = ioc->attr->asic_rev;
2299 	chip_rev[5] = '\0';
2300 }
2301 
2302 static void
2303 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2304 {
2305 	memcpy(optrom_ver, ioc->attr->optrom_version,
2306 		      BFA_VERSION_LEN);
2307 }
2308 
2309 static void
2310 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2311 {
2312 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2313 }
2314 
2315 static void
2316 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2317 {
2318 	struct bfi_ioc_attr *ioc_attr;
2319 
2320 	BUG_ON(!(model));
2321 	memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2322 
2323 	ioc_attr = ioc->attr;
2324 
2325 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2326 		BFA_MFG_NAME, ioc_attr->card_type);
2327 }
2328 
2329 static enum bfa_ioc_state
2330 bfa_ioc_get_state(struct bfa_ioc *ioc)
2331 {
2332 	enum bfa_iocpf_state iocpf_st;
2333 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2334 
2335 	if (ioc_st == BFA_IOC_ENABLING ||
2336 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2337 
2338 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2339 
2340 		switch (iocpf_st) {
2341 		case BFA_IOCPF_SEMWAIT:
2342 			ioc_st = BFA_IOC_SEMWAIT;
2343 			break;
2344 
2345 		case BFA_IOCPF_HWINIT:
2346 			ioc_st = BFA_IOC_HWINIT;
2347 			break;
2348 
2349 		case BFA_IOCPF_FWMISMATCH:
2350 			ioc_st = BFA_IOC_FWMISMATCH;
2351 			break;
2352 
2353 		case BFA_IOCPF_FAIL:
2354 			ioc_st = BFA_IOC_FAIL;
2355 			break;
2356 
2357 		case BFA_IOCPF_INITFAIL:
2358 			ioc_st = BFA_IOC_INITFAIL;
2359 			break;
2360 
2361 		default:
2362 			break;
2363 		}
2364 	}
2365 	return ioc_st;
2366 }
2367 
2368 void
2369 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2370 {
2371 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2372 
2373 	ioc_attr->state = bfa_ioc_get_state(ioc);
2374 	ioc_attr->port_id = ioc->port_id;
2375 	ioc_attr->port_mode = ioc->port_mode;
2376 
2377 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2378 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2379 
2380 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2381 
2382 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2383 
2384 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2385 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2386 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2387 }
2388 
2389 /* WWN public */
2390 static u64
2391 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2392 {
2393 	return ioc->attr->pwwn;
2394 }
2395 
2396 mac_t
2397 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2398 {
2399 	return ioc->attr->mac;
2400 }
2401 
2402 /* Firmware failure detected. Start recovery actions. */
2403 static void
2404 bfa_ioc_recover(struct bfa_ioc *ioc)
2405 {
2406 	pr_crit("Heart Beat of IOC has failed\n");
2407 	bfa_ioc_stats(ioc, ioc_hbfails);
2408 	bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2409 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2410 }
2411 
2412 /* BFA IOC PF private functions */
2413 
2414 static void
2415 bfa_iocpf_enable(struct bfa_ioc *ioc)
2416 {
2417 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2418 }
2419 
2420 static void
2421 bfa_iocpf_disable(struct bfa_ioc *ioc)
2422 {
2423 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2424 }
2425 
2426 static void
2427 bfa_iocpf_fail(struct bfa_ioc *ioc)
2428 {
2429 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2430 }
2431 
2432 static void
2433 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2434 {
2435 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2436 }
2437 
2438 static void
2439 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2440 {
2441 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2442 }
2443 
2444 static void
2445 bfa_iocpf_stop(struct bfa_ioc *ioc)
2446 {
2447 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2448 }
2449 
2450 void
2451 bfa_nw_iocpf_timeout(void *ioc_arg)
2452 {
2453 	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2454 	enum bfa_iocpf_state iocpf_st;
2455 
2456 	iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2457 
2458 	if (iocpf_st == BFA_IOCPF_HWINIT)
2459 		bfa_ioc_poll_fwinit(ioc);
2460 	else
2461 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2462 }
2463 
2464 void
2465 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2466 {
2467 	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2468 
2469 	bfa_ioc_hw_sem_get(ioc);
2470 }
2471 
2472 static void
2473 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2474 {
2475 	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2476 
2477 	if (fwstate == BFI_IOC_DISABLED) {
2478 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2479 		return;
2480 	}
2481 
2482 	if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2483 		bfa_nw_iocpf_timeout(ioc);
2484 	} else {
2485 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2486 		mod_timer(&ioc->iocpf_timer, jiffies +
2487 			msecs_to_jiffies(BFA_IOC_POLL_TOV));
2488 	}
2489 }
2490 
2491 /*
2492  *	Flash module specific
2493  */
2494 
2495 /*
2496  * FLASH DMA buffer should be big enough to hold both MFG block and
2497  * asic block(64k) at the same time and also should be 2k aligned to
2498  * avoid write segement to cross sector boundary.
2499  */
2500 #define BFA_FLASH_SEG_SZ	2048
2501 #define BFA_FLASH_DMA_BUF_SZ	\
2502 	roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2503 
2504 static void
2505 bfa_flash_cb(struct bfa_flash *flash)
2506 {
2507 	flash->op_busy = 0;
2508 	if (flash->cbfn)
2509 		flash->cbfn(flash->cbarg, flash->status);
2510 }
2511 
2512 static void
2513 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2514 {
2515 	struct bfa_flash *flash = cbarg;
2516 
2517 	switch (event) {
2518 	case BFA_IOC_E_DISABLED:
2519 	case BFA_IOC_E_FAILED:
2520 		if (flash->op_busy) {
2521 			flash->status = BFA_STATUS_IOC_FAILURE;
2522 			flash->cbfn(flash->cbarg, flash->status);
2523 			flash->op_busy = 0;
2524 		}
2525 		break;
2526 	default:
2527 		break;
2528 	}
2529 }
2530 
2531 /*
2532  * Send flash write request.
2533  */
2534 static void
2535 bfa_flash_write_send(struct bfa_flash *flash)
2536 {
2537 	struct bfi_flash_write_req *msg =
2538 			(struct bfi_flash_write_req *) flash->mb.msg;
2539 	u32	len;
2540 
2541 	msg->type = be32_to_cpu(flash->type);
2542 	msg->instance = flash->instance;
2543 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2544 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2545 	       flash->residue : BFA_FLASH_DMA_BUF_SZ;
2546 	msg->length = be32_to_cpu(len);
2547 
2548 	/* indicate if it's the last msg of the whole write operation */
2549 	msg->last = (len == flash->residue) ? 1 : 0;
2550 
2551 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
2552 		    bfa_ioc_portid(flash->ioc));
2553 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2554 	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2555 	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2556 
2557 	flash->residue -= len;
2558 	flash->offset += len;
2559 }
2560 
2561 /**
2562  * bfa_flash_read_send - Send flash read request.
2563  *
2564  * @cbarg: callback argument
2565  */
2566 static void
2567 bfa_flash_read_send(void *cbarg)
2568 {
2569 	struct bfa_flash *flash = cbarg;
2570 	struct bfi_flash_read_req *msg =
2571 			(struct bfi_flash_read_req *) flash->mb.msg;
2572 	u32	len;
2573 
2574 	msg->type = be32_to_cpu(flash->type);
2575 	msg->instance = flash->instance;
2576 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2577 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2578 	       flash->residue : BFA_FLASH_DMA_BUF_SZ;
2579 	msg->length = be32_to_cpu(len);
2580 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
2581 		    bfa_ioc_portid(flash->ioc));
2582 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2583 	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2584 }
2585 
2586 /**
2587  * bfa_flash_intr - Process flash response messages upon receiving interrupts.
2588  *
2589  * @flasharg: flash structure
2590  * @msg: message structure
2591  */
2592 static void
2593 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2594 {
2595 	struct bfa_flash *flash = flasharg;
2596 	u32	status;
2597 
2598 	union {
2599 		struct bfi_flash_query_rsp *query;
2600 		struct bfi_flash_write_rsp *write;
2601 		struct bfi_flash_read_rsp *read;
2602 		struct bfi_mbmsg   *msg;
2603 	} m;
2604 
2605 	m.msg = msg;
2606 
2607 	/* receiving response after ioc failure */
2608 	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2609 		return;
2610 
2611 	switch (msg->mh.msg_id) {
2612 	case BFI_FLASH_I2H_QUERY_RSP:
2613 		status = be32_to_cpu(m.query->status);
2614 		if (status == BFA_STATUS_OK) {
2615 			u32	i;
2616 			struct bfa_flash_attr *attr, *f;
2617 
2618 			attr = (struct bfa_flash_attr *) flash->ubuf;
2619 			f = (struct bfa_flash_attr *) flash->dbuf_kva;
2620 			attr->status = be32_to_cpu(f->status);
2621 			attr->npart = be32_to_cpu(f->npart);
2622 			for (i = 0; i < attr->npart; i++) {
2623 				attr->part[i].part_type =
2624 					be32_to_cpu(f->part[i].part_type);
2625 				attr->part[i].part_instance =
2626 					be32_to_cpu(f->part[i].part_instance);
2627 				attr->part[i].part_off =
2628 					be32_to_cpu(f->part[i].part_off);
2629 				attr->part[i].part_size =
2630 					be32_to_cpu(f->part[i].part_size);
2631 				attr->part[i].part_len =
2632 					be32_to_cpu(f->part[i].part_len);
2633 				attr->part[i].part_status =
2634 					be32_to_cpu(f->part[i].part_status);
2635 			}
2636 		}
2637 		flash->status = status;
2638 		bfa_flash_cb(flash);
2639 		break;
2640 	case BFI_FLASH_I2H_WRITE_RSP:
2641 		status = be32_to_cpu(m.write->status);
2642 		if (status != BFA_STATUS_OK || flash->residue == 0) {
2643 			flash->status = status;
2644 			bfa_flash_cb(flash);
2645 		} else
2646 			bfa_flash_write_send(flash);
2647 		break;
2648 	case BFI_FLASH_I2H_READ_RSP:
2649 		status = be32_to_cpu(m.read->status);
2650 		if (status != BFA_STATUS_OK) {
2651 			flash->status = status;
2652 			bfa_flash_cb(flash);
2653 		} else {
2654 			u32 len = be32_to_cpu(m.read->length);
2655 			memcpy(flash->ubuf + flash->offset,
2656 			       flash->dbuf_kva, len);
2657 			flash->residue -= len;
2658 			flash->offset += len;
2659 			if (flash->residue == 0) {
2660 				flash->status = status;
2661 				bfa_flash_cb(flash);
2662 			} else
2663 				bfa_flash_read_send(flash);
2664 		}
2665 		break;
2666 	case BFI_FLASH_I2H_BOOT_VER_RSP:
2667 	case BFI_FLASH_I2H_EVENT:
2668 		break;
2669 	default:
2670 		WARN_ON(1);
2671 	}
2672 }
2673 
2674 /*
2675  * Flash memory info API.
2676  */
2677 u32
2678 bfa_nw_flash_meminfo(void)
2679 {
2680 	return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2681 }
2682 
2683 /**
2684  * bfa_nw_flash_attach - Flash attach API.
2685  *
2686  * @flash: flash structure
2687  * @ioc: ioc structure
2688  * @dev: device structure
2689  */
2690 void
2691 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2692 {
2693 	flash->ioc = ioc;
2694 	flash->cbfn = NULL;
2695 	flash->cbarg = NULL;
2696 	flash->op_busy = 0;
2697 
2698 	bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2699 	bfa_q_qe_init(&flash->ioc_notify);
2700 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2701 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2702 }
2703 
2704 /**
2705  * bfa_nw_flash_memclaim - Claim memory for flash
2706  *
2707  * @flash: flash structure
2708  * @dm_kva: pointer to virtual memory address
2709  * @dm_pa: physical memory address
2710  */
2711 void
2712 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2713 {
2714 	flash->dbuf_kva = dm_kva;
2715 	flash->dbuf_pa = dm_pa;
2716 	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2717 	dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2718 	dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2719 }
2720 
2721 /**
2722  * bfa_nw_flash_get_attr - Get flash attribute.
2723  *
2724  * @flash: flash structure
2725  * @attr: flash attribute structure
2726  * @cbfn: callback function
2727  * @cbarg: callback argument
2728  *
2729  * Return status.
2730  */
2731 enum bfa_status
2732 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2733 		      bfa_cb_flash cbfn, void *cbarg)
2734 {
2735 	struct bfi_flash_query_req *msg =
2736 			(struct bfi_flash_query_req *) flash->mb.msg;
2737 
2738 	if (!bfa_nw_ioc_is_operational(flash->ioc))
2739 		return BFA_STATUS_IOC_NON_OP;
2740 
2741 	if (flash->op_busy)
2742 		return BFA_STATUS_DEVBUSY;
2743 
2744 	flash->op_busy = 1;
2745 	flash->cbfn = cbfn;
2746 	flash->cbarg = cbarg;
2747 	flash->ubuf = (u8 *) attr;
2748 
2749 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
2750 		    bfa_ioc_portid(flash->ioc));
2751 	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2752 	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2753 
2754 	return BFA_STATUS_OK;
2755 }
2756 
2757 /**
2758  * bfa_nw_flash_update_part - Update flash partition.
2759  *
2760  * @flash: flash structure
2761  * @type: flash partition type
2762  * @instance: flash partition instance
2763  * @buf: update data buffer
2764  * @len: data buffer length
2765  * @offset: offset relative to the partition starting address
2766  * @cbfn: callback function
2767  * @cbarg: callback argument
2768  *
2769  * Return status.
2770  */
2771 enum bfa_status
2772 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2773 			 void *buf, u32 len, u32 offset,
2774 			 bfa_cb_flash cbfn, void *cbarg)
2775 {
2776 	if (!bfa_nw_ioc_is_operational(flash->ioc))
2777 		return BFA_STATUS_IOC_NON_OP;
2778 
2779 	/*
2780 	 * 'len' must be in word (4-byte) boundary
2781 	 */
2782 	if (!len || (len & 0x03))
2783 		return BFA_STATUS_FLASH_BAD_LEN;
2784 
2785 	if (type == BFA_FLASH_PART_MFG)
2786 		return BFA_STATUS_EINVAL;
2787 
2788 	if (flash->op_busy)
2789 		return BFA_STATUS_DEVBUSY;
2790 
2791 	flash->op_busy = 1;
2792 	flash->cbfn = cbfn;
2793 	flash->cbarg = cbarg;
2794 	flash->type = type;
2795 	flash->instance = instance;
2796 	flash->residue = len;
2797 	flash->offset = 0;
2798 	flash->addr_off = offset;
2799 	flash->ubuf = buf;
2800 
2801 	bfa_flash_write_send(flash);
2802 
2803 	return BFA_STATUS_OK;
2804 }
2805 
2806 /**
2807  * bfa_nw_flash_read_part - Read flash partition.
2808  *
2809  * @flash: flash structure
2810  * @type: flash partition type
2811  * @instance: flash partition instance
2812  * @buf: read data buffer
2813  * @len: data buffer length
2814  * @offset: offset relative to the partition starting address
2815  * @cbfn: callback function
2816  * @cbarg: callback argument
2817  *
2818  * Return status.
2819  */
2820 enum bfa_status
2821 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2822 		       void *buf, u32 len, u32 offset,
2823 		       bfa_cb_flash cbfn, void *cbarg)
2824 {
2825 	if (!bfa_nw_ioc_is_operational(flash->ioc))
2826 		return BFA_STATUS_IOC_NON_OP;
2827 
2828 	/*
2829 	 * 'len' must be in word (4-byte) boundary
2830 	 */
2831 	if (!len || (len & 0x03))
2832 		return BFA_STATUS_FLASH_BAD_LEN;
2833 
2834 	if (flash->op_busy)
2835 		return BFA_STATUS_DEVBUSY;
2836 
2837 	flash->op_busy = 1;
2838 	flash->cbfn = cbfn;
2839 	flash->cbarg = cbarg;
2840 	flash->type = type;
2841 	flash->instance = instance;
2842 	flash->residue = len;
2843 	flash->offset = 0;
2844 	flash->addr_off = offset;
2845 	flash->ubuf = buf;
2846 
2847 	bfa_flash_read_send(flash);
2848 
2849 	return BFA_STATUS_OK;
2850 }
2851