xref: /openbmc/linux/drivers/scsi/bfa/bfa_ioc.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfa_ioc.h"
19 #include "bfi_ctreg.h"
20 #include "bfa_defs.h"
21 #include "bfa_defs_svc.h"
22 #include "bfad_drv.h"
23 
24 BFA_TRC_FILE(CNA, IOC);
25 
26 /*
27  * IOC local definitions
28  */
29 #define BFA_IOC_TOV		3000	/* msecs */
30 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
31 #define BFA_IOC_HB_TOV		500	/* msecs */
32 #define BFA_IOC_HWINIT_MAX	2
33 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
34 
35 #define bfa_ioc_timer_start(__ioc)					\
36 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
37 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
39 
40 #define bfa_hb_timer_start(__ioc)					\
41 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
42 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
44 
45 #define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
46 #define BFA_DBG_FWTRC_LEN					\
47 	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
48 	 (sizeof(struct bfa_trc_mod_s) -			\
49 	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
50 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
51 
52 /*
53  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
54  */
55 
56 #define bfa_ioc_firmware_lock(__ioc)			\
57 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
58 #define bfa_ioc_firmware_unlock(__ioc)			\
59 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
60 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
61 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
62 #define bfa_ioc_notify_hbfail(__ioc)			\
63 			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
64 
65 #ifdef BFA_IOC_IS_UEFI
66 #define bfa_ioc_is_bios_optrom(__ioc) (0)
67 #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
68 #else
69 #define bfa_ioc_is_bios_optrom(__ioc)	\
70 	(bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
71 #define bfa_ioc_is_uefi(__ioc) (0)
72 #endif
73 
74 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
75 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
76 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
77 
78 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
79 
80 /*
81  * forward declarations
82  */
83 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
86 static void bfa_ioc_timeout(void *ioc);
87 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
93 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
99 static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
100 static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
103 
104 /*
105  *  hal_ioc_sm
106  */
107 
108 /*
109  * IOC state machine definitions/declarations
110  */
111 enum ioc_event {
112 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
113 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
114 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
115 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
116 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
117 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
118 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
119 	IOC_E_FAILED		= 8,	/*  failure notice by iocpf sm	*/
120 	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
121 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
122 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
123 };
124 
125 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
134 
135 static struct bfa_sm_table_s ioc_sm_table[] = {
136 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
137 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
139 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
140 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
141 	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
142 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
143 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
144 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
145 };
146 
147 /*
148  * IOCPF state machine definitions/declarations
149  */
150 
151 #define bfa_iocpf_timer_start(__ioc)					\
152 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
153 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
154 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
155 
156 #define bfa_iocpf_recovery_timer_start(__ioc)				\
157 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
158 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
159 
160 #define bfa_sem_timer_start(__ioc)					\
161 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
162 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
163 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
164 
165 /*
166  * Forward declareations for iocpf state machine
167  */
168 static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
169 static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
170 static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
171 static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
172 static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
173 static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174 static void bfa_iocpf_timeout(void *ioc_arg);
175 static void bfa_iocpf_sem_timeout(void *ioc_arg);
176 
177 /*
178  * IOCPF state machine events
179  */
180 enum iocpf_event {
181 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
182 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
183 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
184 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
185 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
186 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
187 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
188 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
189 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
190 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
191 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
192 };
193 
194 /*
195  * IOCPF states
196  */
197 enum bfa_iocpf_state {
198 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
199 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
200 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
201 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
202 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
203 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
204 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
205 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
206 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
207 };
208 
209 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
217 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
218 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
219 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
220 
221 static struct bfa_sm_table_s iocpf_sm_table[] = {
222 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
223 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
224 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
225 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
226 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
229 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
231 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
232 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
233 };
234 
235 /*
236  * IOC State Machine
237  */
238 
239 /*
240  * Beginning state. IOC uninit state.
241  */
242 
243 static void
244 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
245 {
246 }
247 
248 /*
249  * IOC is in uninit state.
250  */
251 static void
252 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
253 {
254 	bfa_trc(ioc, event);
255 
256 	switch (event) {
257 	case IOC_E_RESET:
258 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
259 		break;
260 
261 	default:
262 		bfa_sm_fault(ioc, event);
263 	}
264 }
265 /*
266  * Reset entry actions -- initialize state machine
267  */
268 static void
269 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
270 {
271 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
272 }
273 
274 /*
275  * IOC is in reset state.
276  */
277 static void
278 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
279 {
280 	bfa_trc(ioc, event);
281 
282 	switch (event) {
283 	case IOC_E_ENABLE:
284 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
285 		break;
286 
287 	case IOC_E_DISABLE:
288 		bfa_ioc_disable_comp(ioc);
289 		break;
290 
291 	case IOC_E_DETACH:
292 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
293 		break;
294 
295 	default:
296 		bfa_sm_fault(ioc, event);
297 	}
298 }
299 
300 
301 static void
302 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
303 {
304 	bfa_iocpf_enable(ioc);
305 }
306 
307 /*
308  * Host IOC function is being enabled, awaiting response from firmware.
309  * Semaphore is acquired.
310  */
311 static void
312 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
313 {
314 	bfa_trc(ioc, event);
315 
316 	switch (event) {
317 	case IOC_E_ENABLED:
318 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
319 		break;
320 
321 	case IOC_E_FAILED:
322 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
323 		break;
324 
325 	case IOC_E_HWERROR:
326 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
327 		bfa_iocpf_initfail(ioc);
328 		break;
329 
330 	case IOC_E_DISABLE:
331 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
332 		break;
333 
334 	case IOC_E_DETACH:
335 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
336 		bfa_iocpf_stop(ioc);
337 		break;
338 
339 	case IOC_E_ENABLE:
340 		break;
341 
342 	default:
343 		bfa_sm_fault(ioc, event);
344 	}
345 }
346 
347 
348 static void
349 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
350 {
351 	bfa_ioc_timer_start(ioc);
352 	bfa_ioc_send_getattr(ioc);
353 }
354 
355 /*
356  * IOC configuration in progress. Timer is active.
357  */
358 static void
359 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
360 {
361 	bfa_trc(ioc, event);
362 
363 	switch (event) {
364 	case IOC_E_FWRSP_GETATTR:
365 		bfa_ioc_timer_stop(ioc);
366 		bfa_ioc_check_attr_wwns(ioc);
367 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
368 		break;
369 
370 	case IOC_E_FAILED:
371 		bfa_ioc_timer_stop(ioc);
372 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
373 		break;
374 
375 	case IOC_E_HWERROR:
376 		bfa_ioc_timer_stop(ioc);
377 		/* fall through */
378 
379 	case IOC_E_TIMEOUT:
380 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
381 		bfa_iocpf_getattrfail(ioc);
382 		break;
383 
384 	case IOC_E_DISABLE:
385 		bfa_ioc_timer_stop(ioc);
386 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
387 		break;
388 
389 	case IOC_E_ENABLE:
390 		break;
391 
392 	default:
393 		bfa_sm_fault(ioc, event);
394 	}
395 }
396 
397 
398 static void
399 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
400 {
401 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
402 
403 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
404 	bfa_ioc_hb_monitor(ioc);
405 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
406 }
407 
408 static void
409 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
410 {
411 	bfa_trc(ioc, event);
412 
413 	switch (event) {
414 	case IOC_E_ENABLE:
415 		break;
416 
417 	case IOC_E_DISABLE:
418 		bfa_ioc_hb_stop(ioc);
419 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
420 		break;
421 
422 	case IOC_E_FAILED:
423 		bfa_ioc_hb_stop(ioc);
424 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
425 		break;
426 
427 	case IOC_E_HWERROR:
428 		bfa_ioc_hb_stop(ioc);
429 		/* !!! fall through !!! */
430 
431 	case IOC_E_HBFAIL:
432 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
433 		bfa_iocpf_fail(ioc);
434 		break;
435 
436 	default:
437 		bfa_sm_fault(ioc, event);
438 	}
439 }
440 
441 
442 static void
443 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
444 {
445 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446 	bfa_iocpf_disable(ioc);
447 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
448 }
449 
450 /*
451  * IOC is being disabled
452  */
453 static void
454 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
455 {
456 	bfa_trc(ioc, event);
457 
458 	switch (event) {
459 	case IOC_E_DISABLED:
460 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
461 		break;
462 
463 	case IOC_E_HWERROR:
464 		/*
465 		 * No state change.  Will move to disabled state
466 		 * after iocpf sm completes failure processing and
467 		 * moves to disabled state.
468 		 */
469 		bfa_iocpf_fail(ioc);
470 		break;
471 
472 	default:
473 		bfa_sm_fault(ioc, event);
474 	}
475 }
476 
477 /*
478  * IOC disable completion entry.
479  */
480 static void
481 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
482 {
483 	bfa_ioc_disable_comp(ioc);
484 }
485 
486 static void
487 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
488 {
489 	bfa_trc(ioc, event);
490 
491 	switch (event) {
492 	case IOC_E_ENABLE:
493 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
494 		break;
495 
496 	case IOC_E_DISABLE:
497 		ioc->cbfn->disable_cbfn(ioc->bfa);
498 		break;
499 
500 	case IOC_E_DETACH:
501 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
502 		bfa_iocpf_stop(ioc);
503 		break;
504 
505 	default:
506 		bfa_sm_fault(ioc, event);
507 	}
508 }
509 
510 
511 static void
512 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
513 {
514 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
515 }
516 
517 /*
518  * Hardware initialization failed.
519  */
520 static void
521 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
522 {
523 	bfa_trc(ioc, event);
524 
525 	switch (event) {
526 	case IOC_E_ENABLED:
527 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
528 		break;
529 
530 	case IOC_E_FAILED:
531 		/*
532 		 * Initialization failure during iocpf init retry.
533 		 */
534 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
535 		break;
536 
537 	case IOC_E_DISABLE:
538 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 		break;
540 
541 	case IOC_E_DETACH:
542 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 		bfa_iocpf_stop(ioc);
544 		break;
545 
546 	default:
547 		bfa_sm_fault(ioc, event);
548 	}
549 }
550 
551 
552 static void
553 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
554 {
555 	struct list_head			*qe;
556 	struct bfa_ioc_hbfail_notify_s	*notify;
557 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
558 
559 	/*
560 	 * Notify driver and common modules registered for notification.
561 	 */
562 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
563 	list_for_each(qe, &ioc->hb_notify_q) {
564 		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
565 		notify->cbfn(notify->cbarg);
566 	}
567 
568 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
569 		"Heart Beat of IOC has failed\n");
570 }
571 
572 /*
573  * IOC failure.
574  */
575 static void
576 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
577 {
578 	bfa_trc(ioc, event);
579 
580 	switch (event) {
581 
582 	case IOC_E_FAILED:
583 		/*
584 		 * Initialization failure during iocpf recovery.
585 		 * !!! Fall through !!!
586 		 */
587 	case IOC_E_ENABLE:
588 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589 		break;
590 
591 	case IOC_E_ENABLED:
592 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
593 		break;
594 
595 	case IOC_E_DISABLE:
596 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
597 		break;
598 
599 	case IOC_E_HWERROR:
600 		/*
601 		 * HB failure notification, ignore.
602 		 */
603 		break;
604 	default:
605 		bfa_sm_fault(ioc, event);
606 	}
607 }
608 
609 
610 
611 /*
612  * IOCPF State Machine
613  */
614 
615 
616 /*
617  * Reset entry actions -- initialize state machine
618  */
619 static void
620 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
621 {
622 	iocpf->retry_count = 0;
623 	iocpf->auto_recover = bfa_auto_recover;
624 }
625 
626 /*
627  * Beginning state. IOC is in reset state.
628  */
629 static void
630 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
631 {
632 	struct bfa_ioc_s *ioc = iocpf->ioc;
633 
634 	bfa_trc(ioc, event);
635 
636 	switch (event) {
637 	case IOCPF_E_ENABLE:
638 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
639 		break;
640 
641 	case IOCPF_E_STOP:
642 		break;
643 
644 	default:
645 		bfa_sm_fault(ioc, event);
646 	}
647 }
648 
649 /*
650  * Semaphore should be acquired for version check.
651  */
652 static void
653 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
654 {
655 	bfa_ioc_hw_sem_get(iocpf->ioc);
656 }
657 
658 /*
659  * Awaiting h/w semaphore to continue with version check.
660  */
661 static void
662 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
663 {
664 	struct bfa_ioc_s *ioc = iocpf->ioc;
665 
666 	bfa_trc(ioc, event);
667 
668 	switch (event) {
669 	case IOCPF_E_SEMLOCKED:
670 		if (bfa_ioc_firmware_lock(ioc)) {
671 			iocpf->retry_count = 0;
672 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
673 		} else {
674 			bfa_ioc_hw_sem_release(ioc);
675 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
676 		}
677 		break;
678 
679 	case IOCPF_E_DISABLE:
680 		bfa_ioc_hw_sem_get_cancel(ioc);
681 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
682 		bfa_ioc_pf_disabled(ioc);
683 		break;
684 
685 	case IOCPF_E_STOP:
686 		bfa_ioc_hw_sem_get_cancel(ioc);
687 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
688 		break;
689 
690 	default:
691 		bfa_sm_fault(ioc, event);
692 	}
693 }
694 
695 /*
696  * Notify enable completion callback.
697  */
698 static void
699 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
700 {
701 	/*
702 	 * Call only the first time sm enters fwmismatch state.
703 	 */
704 	if (iocpf->retry_count == 0)
705 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
706 
707 	iocpf->retry_count++;
708 	bfa_iocpf_timer_start(iocpf->ioc);
709 }
710 
711 /*
712  * Awaiting firmware version match.
713  */
714 static void
715 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
716 {
717 	struct bfa_ioc_s *ioc = iocpf->ioc;
718 
719 	bfa_trc(ioc, event);
720 
721 	switch (event) {
722 	case IOCPF_E_TIMEOUT:
723 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
724 		break;
725 
726 	case IOCPF_E_DISABLE:
727 		bfa_iocpf_timer_stop(ioc);
728 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
729 		bfa_ioc_pf_disabled(ioc);
730 		break;
731 
732 	case IOCPF_E_STOP:
733 		bfa_iocpf_timer_stop(ioc);
734 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
735 		break;
736 
737 	default:
738 		bfa_sm_fault(ioc, event);
739 	}
740 }
741 
742 /*
743  * Request for semaphore.
744  */
745 static void
746 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
747 {
748 	bfa_ioc_hw_sem_get(iocpf->ioc);
749 }
750 
751 /*
752  * Awaiting semaphore for h/w initialzation.
753  */
754 static void
755 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
756 {
757 	struct bfa_ioc_s *ioc = iocpf->ioc;
758 
759 	bfa_trc(ioc, event);
760 
761 	switch (event) {
762 	case IOCPF_E_SEMLOCKED:
763 		iocpf->retry_count = 0;
764 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
765 		break;
766 
767 	case IOCPF_E_DISABLE:
768 		bfa_ioc_hw_sem_get_cancel(ioc);
769 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
770 		break;
771 
772 	default:
773 		bfa_sm_fault(ioc, event);
774 	}
775 }
776 
777 
778 static void
779 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
780 {
781 	bfa_iocpf_timer_start(iocpf->ioc);
782 	bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
783 }
784 
785 /*
786  * Hardware is being initialized. Interrupts are enabled.
787  * Holding hardware semaphore lock.
788  */
789 static void
790 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
791 {
792 	struct bfa_ioc_s *ioc = iocpf->ioc;
793 
794 	bfa_trc(ioc, event);
795 
796 	switch (event) {
797 	case IOCPF_E_FWREADY:
798 		bfa_iocpf_timer_stop(ioc);
799 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
800 		break;
801 
802 	case IOCPF_E_INITFAIL:
803 		bfa_iocpf_timer_stop(ioc);
804 		/*
805 		 * !!! fall through !!!
806 		 */
807 
808 	case IOCPF_E_TIMEOUT:
809 		iocpf->retry_count++;
810 		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
811 			bfa_iocpf_timer_start(ioc);
812 			bfa_ioc_reset(ioc, BFA_TRUE);
813 			break;
814 		}
815 
816 		bfa_ioc_hw_sem_release(ioc);
817 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
818 
819 		if (event == IOCPF_E_TIMEOUT)
820 			bfa_ioc_pf_failed(ioc);
821 		break;
822 
823 	case IOCPF_E_DISABLE:
824 		bfa_ioc_hw_sem_release(ioc);
825 		bfa_iocpf_timer_stop(ioc);
826 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
827 		break;
828 
829 	default:
830 		bfa_sm_fault(ioc, event);
831 	}
832 }
833 
834 
835 static void
836 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
837 {
838 	bfa_iocpf_timer_start(iocpf->ioc);
839 	bfa_ioc_send_enable(iocpf->ioc);
840 }
841 
842 /*
843  * Host IOC function is being enabled, awaiting response from firmware.
844  * Semaphore is acquired.
845  */
846 static void
847 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
848 {
849 	struct bfa_ioc_s *ioc = iocpf->ioc;
850 
851 	bfa_trc(ioc, event);
852 
853 	switch (event) {
854 	case IOCPF_E_FWRSP_ENABLE:
855 		bfa_iocpf_timer_stop(ioc);
856 		bfa_ioc_hw_sem_release(ioc);
857 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
858 		break;
859 
860 	case IOCPF_E_INITFAIL:
861 		bfa_iocpf_timer_stop(ioc);
862 		/*
863 		 * !!! fall through !!!
864 		 */
865 
866 	case IOCPF_E_TIMEOUT:
867 		iocpf->retry_count++;
868 		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 			writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
870 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
871 			break;
872 		}
873 
874 		bfa_ioc_hw_sem_release(ioc);
875 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
876 
877 		if (event == IOCPF_E_TIMEOUT)
878 			bfa_ioc_pf_failed(ioc);
879 		break;
880 
881 	case IOCPF_E_DISABLE:
882 		bfa_iocpf_timer_stop(ioc);
883 		bfa_ioc_hw_sem_release(ioc);
884 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
885 		break;
886 
887 	case IOCPF_E_FWREADY:
888 		bfa_ioc_send_enable(ioc);
889 		break;
890 
891 	default:
892 		bfa_sm_fault(ioc, event);
893 	}
894 }
895 
896 
897 
898 static void
899 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
900 {
901 	bfa_ioc_pf_enabled(iocpf->ioc);
902 }
903 
904 static void
905 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
906 {
907 	struct bfa_ioc_s *ioc = iocpf->ioc;
908 
909 	bfa_trc(ioc, event);
910 
911 	switch (event) {
912 	case IOCPF_E_DISABLE:
913 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
914 		break;
915 
916 	case IOCPF_E_GETATTRFAIL:
917 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
918 		break;
919 
920 	case IOCPF_E_FAIL:
921 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
922 		break;
923 
924 	case IOCPF_E_FWREADY:
925 		if (bfa_ioc_is_operational(ioc))
926 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
927 		else
928 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
929 
930 		bfa_ioc_pf_failed(ioc);
931 		break;
932 
933 	default:
934 		bfa_sm_fault(ioc, event);
935 	}
936 }
937 
938 
939 static void
940 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
941 {
942 	bfa_iocpf_timer_start(iocpf->ioc);
943 	bfa_ioc_send_disable(iocpf->ioc);
944 }
945 
946 /*
947  * IOC is being disabled
948  */
949 static void
950 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
951 {
952 	struct bfa_ioc_s *ioc = iocpf->ioc;
953 
954 	bfa_trc(ioc, event);
955 
956 	switch (event) {
957 	case IOCPF_E_FWRSP_DISABLE:
958 	case IOCPF_E_FWREADY:
959 		bfa_iocpf_timer_stop(ioc);
960 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
961 		break;
962 
963 	case IOCPF_E_FAIL:
964 		bfa_iocpf_timer_stop(ioc);
965 		/*
966 		 * !!! fall through !!!
967 		 */
968 
969 	case IOCPF_E_TIMEOUT:
970 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
971 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
972 		break;
973 
974 	case IOCPF_E_FWRSP_ENABLE:
975 		break;
976 
977 	default:
978 		bfa_sm_fault(ioc, event);
979 	}
980 }
981 
982 /*
983  * IOC disable completion entry.
984  */
985 static void
986 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
987 {
988 	bfa_ioc_pf_disabled(iocpf->ioc);
989 }
990 
991 static void
992 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
993 {
994 	struct bfa_ioc_s *ioc = iocpf->ioc;
995 
996 	bfa_trc(ioc, event);
997 
998 	switch (event) {
999 	case IOCPF_E_ENABLE:
1000 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1001 		break;
1002 
1003 	case IOCPF_E_STOP:
1004 		bfa_ioc_firmware_unlock(ioc);
1005 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1006 		break;
1007 
1008 	default:
1009 		bfa_sm_fault(ioc, event);
1010 	}
1011 }
1012 
1013 
1014 static void
1015 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1016 {
1017 	bfa_iocpf_timer_start(iocpf->ioc);
1018 }
1019 
1020 /*
1021  * Hardware initialization failed.
1022  */
1023 static void
1024 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1025 {
1026 	struct bfa_ioc_s *ioc = iocpf->ioc;
1027 
1028 	bfa_trc(ioc, event);
1029 
1030 	switch (event) {
1031 	case IOCPF_E_DISABLE:
1032 		bfa_iocpf_timer_stop(ioc);
1033 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1034 		break;
1035 
1036 	case IOCPF_E_STOP:
1037 		bfa_iocpf_timer_stop(ioc);
1038 		bfa_ioc_firmware_unlock(ioc);
1039 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1040 		break;
1041 
1042 	case IOCPF_E_TIMEOUT:
1043 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1044 		break;
1045 
1046 	default:
1047 		bfa_sm_fault(ioc, event);
1048 	}
1049 }
1050 
1051 
1052 static void
1053 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1054 {
1055 	/*
1056 	 * Mark IOC as failed in hardware and stop firmware.
1057 	 */
1058 	bfa_ioc_lpu_stop(iocpf->ioc);
1059 	writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
1060 
1061 	/*
1062 	 * Notify other functions on HB failure.
1063 	 */
1064 	bfa_ioc_notify_hbfail(iocpf->ioc);
1065 
1066 	/*
1067 	 * Flush any queued up mailbox requests.
1068 	 */
1069 	bfa_ioc_mbox_hbfail(iocpf->ioc);
1070 
1071 	if (iocpf->auto_recover)
1072 		bfa_iocpf_recovery_timer_start(iocpf->ioc);
1073 }
1074 
1075 /*
1076  * IOC is in failed state.
1077  */
1078 static void
1079 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1080 {
1081 	struct bfa_ioc_s *ioc = iocpf->ioc;
1082 
1083 	bfa_trc(ioc, event);
1084 
1085 	switch (event) {
1086 	case IOCPF_E_DISABLE:
1087 		if (iocpf->auto_recover)
1088 			bfa_iocpf_timer_stop(ioc);
1089 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 		break;
1091 
1092 	case IOCPF_E_TIMEOUT:
1093 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1094 		break;
1095 
1096 	default:
1097 		bfa_sm_fault(ioc, event);
1098 	}
1099 }
1100 
1101 
1102 
1103 /*
1104  *  hal_ioc_pvt BFA IOC private functions
1105  */
1106 
1107 static void
1108 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1109 {
1110 	struct list_head			*qe;
1111 	struct bfa_ioc_hbfail_notify_s	*notify;
1112 
1113 	ioc->cbfn->disable_cbfn(ioc->bfa);
1114 
1115 	/*
1116 	 * Notify common modules registered for notification.
1117 	 */
1118 	list_for_each(qe, &ioc->hb_notify_q) {
1119 		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1120 		notify->cbfn(notify->cbarg);
1121 	}
1122 }
1123 
1124 bfa_boolean_t
1125 bfa_ioc_sem_get(void __iomem *sem_reg)
1126 {
1127 	u32 r32;
1128 	int cnt = 0;
1129 #define BFA_SEM_SPINCNT	3000
1130 
1131 	r32 = readl(sem_reg);
1132 
1133 	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1134 		cnt++;
1135 		udelay(2);
1136 		r32 = readl(sem_reg);
1137 	}
1138 
1139 	if (r32 == 0)
1140 		return BFA_TRUE;
1141 
1142 	bfa_assert(cnt < BFA_SEM_SPINCNT);
1143 	return BFA_FALSE;
1144 }
1145 
1146 void
1147 bfa_ioc_sem_release(void __iomem *sem_reg)
1148 {
1149 	writel(1, sem_reg);
1150 }
1151 
1152 static void
1153 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1154 {
1155 	u32	r32;
1156 
1157 	/*
1158 	 * First read to the semaphore register will return 0, subsequent reads
1159 	 * will return 1. Semaphore is released by writing 1 to the register
1160 	 */
1161 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1162 	if (r32 == 0) {
1163 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1164 		return;
1165 	}
1166 
1167 	bfa_sem_timer_start(ioc);
1168 }
1169 
1170 void
1171 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
1172 {
1173 	writel(1, ioc->ioc_regs.ioc_sem_reg);
1174 }
1175 
1176 static void
1177 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
1178 {
1179 	bfa_sem_timer_stop(ioc);
1180 }
1181 
1182 /*
1183  * Initialize LPU local memory (aka secondary memory / SRAM)
1184  */
1185 static void
1186 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1187 {
1188 	u32	pss_ctl;
1189 	int		i;
1190 #define PSS_LMEM_INIT_TIME  10000
1191 
1192 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1193 	pss_ctl &= ~__PSS_LMEM_RESET;
1194 	pss_ctl |= __PSS_LMEM_INIT_EN;
1195 
1196 	/*
1197 	 * i2c workaround 12.5khz clock
1198 	 */
1199 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1200 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1201 
1202 	/*
1203 	 * wait for memory initialization to be complete
1204 	 */
1205 	i = 0;
1206 	do {
1207 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1208 		i++;
1209 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1210 
1211 	/*
1212 	 * If memory initialization is not successful, IOC timeout will catch
1213 	 * such failures.
1214 	 */
1215 	bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
1216 	bfa_trc(ioc, pss_ctl);
1217 
1218 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1219 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1220 }
1221 
1222 static void
1223 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1224 {
1225 	u32	pss_ctl;
1226 
1227 	/*
1228 	 * Take processor out of reset.
1229 	 */
1230 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1231 	pss_ctl &= ~__PSS_LPU0_RESET;
1232 
1233 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1234 }
1235 
1236 static void
1237 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1238 {
1239 	u32	pss_ctl;
1240 
1241 	/*
1242 	 * Put processors in reset.
1243 	 */
1244 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1245 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1246 
1247 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1248 }
1249 
1250 /*
1251  * Get driver and firmware versions.
1252  */
1253 void
1254 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1255 {
1256 	u32	pgnum, pgoff;
1257 	u32	loff = 0;
1258 	int		i;
1259 	u32	*fwsig = (u32 *) fwhdr;
1260 
1261 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1262 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1263 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1264 
1265 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1266 	     i++) {
1267 		fwsig[i] =
1268 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1269 		loff += sizeof(u32);
1270 	}
1271 }
1272 
1273 /*
1274  * Returns TRUE if same.
1275  */
1276 bfa_boolean_t
1277 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1278 {
1279 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1280 	int i;
1281 
1282 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1283 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1284 
1285 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1286 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1287 			bfa_trc(ioc, i);
1288 			bfa_trc(ioc, fwhdr->md5sum[i]);
1289 			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1290 			return BFA_FALSE;
1291 		}
1292 	}
1293 
1294 	bfa_trc(ioc, fwhdr->md5sum[0]);
1295 	return BFA_TRUE;
1296 }
1297 
1298 /*
1299  * Return true if current running version is valid. Firmware signature and
1300  * execution context (driver/bios) must match.
1301  */
1302 static bfa_boolean_t
1303 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1304 {
1305 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1306 
1307 	/*
1308 	 * If bios/efi boot (flash based) -- return true
1309 	 */
1310 	if (bfa_ioc_is_bios_optrom(ioc))
1311 		return BFA_TRUE;
1312 
1313 	bfa_ioc_fwver_get(ioc, &fwhdr);
1314 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1315 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1316 
1317 	if (fwhdr.signature != drv_fwhdr->signature) {
1318 		bfa_trc(ioc, fwhdr.signature);
1319 		bfa_trc(ioc, drv_fwhdr->signature);
1320 		return BFA_FALSE;
1321 	}
1322 
1323 	if (swab32(fwhdr.param) != boot_env) {
1324 		bfa_trc(ioc, fwhdr.param);
1325 		bfa_trc(ioc, boot_env);
1326 		return BFA_FALSE;
1327 	}
1328 
1329 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1330 }
1331 
1332 /*
1333  * Conditionally flush any pending message from firmware at start.
1334  */
1335 static void
1336 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1337 {
1338 	u32	r32;
1339 
1340 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1341 	if (r32)
1342 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1343 }
1344 
1345 
1346 static void
1347 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1348 {
1349 	enum bfi_ioc_state ioc_fwstate;
1350 	bfa_boolean_t fwvalid;
1351 	u32 boot_type;
1352 	u32 boot_env;
1353 
1354 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1355 
1356 	if (force)
1357 		ioc_fwstate = BFI_IOC_UNINIT;
1358 
1359 	bfa_trc(ioc, ioc_fwstate);
1360 
1361 	boot_type = BFI_BOOT_TYPE_NORMAL;
1362 	boot_env = BFI_BOOT_LOADER_OS;
1363 
1364 	/*
1365 	 * Flash based firmware boot BIOS env.
1366 	 */
1367 	if (bfa_ioc_is_bios_optrom(ioc)) {
1368 		boot_type = BFI_BOOT_TYPE_FLASH;
1369 		boot_env = BFI_BOOT_LOADER_BIOS;
1370 	}
1371 
1372 	/*
1373 	 * Flash based firmware boot UEFI env.
1374 	 */
1375 	if (bfa_ioc_is_uefi(ioc)) {
1376 		boot_type = BFI_BOOT_TYPE_FLASH;
1377 		boot_env = BFI_BOOT_LOADER_UEFI;
1378 	}
1379 
1380 	/*
1381 	 * check if firmware is valid
1382 	 */
1383 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1384 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1385 
1386 	if (!fwvalid) {
1387 		bfa_ioc_boot(ioc, boot_type, boot_env);
1388 		return;
1389 	}
1390 
1391 	/*
1392 	 * If hardware initialization is in progress (initialized by other IOC),
1393 	 * just wait for an initialization completion interrupt.
1394 	 */
1395 	if (ioc_fwstate == BFI_IOC_INITING) {
1396 		ioc->cbfn->reset_cbfn(ioc->bfa);
1397 		return;
1398 	}
1399 
1400 	/*
1401 	 * If IOC function is disabled and firmware version is same,
1402 	 * just re-enable IOC.
1403 	 *
1404 	 * If option rom, IOC must not be in operational state. With
1405 	 * convergence, IOC will be in operational state when 2nd driver
1406 	 * is loaded.
1407 	 */
1408 	if (ioc_fwstate == BFI_IOC_DISABLED ||
1409 	    (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
1410 
1411 		/*
1412 		 * When using MSI-X any pending firmware ready event should
1413 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1414 		 */
1415 		bfa_ioc_msgflush(ioc);
1416 		ioc->cbfn->reset_cbfn(ioc->bfa);
1417 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1418 		return;
1419 	}
1420 
1421 	/*
1422 	 * Initialize the h/w for any other states.
1423 	 */
1424 	bfa_ioc_boot(ioc, boot_type, boot_env);
1425 }
1426 
1427 static void
1428 bfa_ioc_timeout(void *ioc_arg)
1429 {
1430 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1431 
1432 	bfa_trc(ioc, 0);
1433 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1434 }
1435 
1436 void
1437 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1438 {
1439 	u32 *msgp = (u32 *) ioc_msg;
1440 	u32 i;
1441 
1442 	bfa_trc(ioc, msgp[0]);
1443 	bfa_trc(ioc, len);
1444 
1445 	bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1446 
1447 	/*
1448 	 * first write msg to mailbox registers
1449 	 */
1450 	for (i = 0; i < len / sizeof(u32); i++)
1451 		writel(cpu_to_le32(msgp[i]),
1452 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1453 
1454 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1455 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1456 
1457 	/*
1458 	 * write 1 to mailbox CMD to trigger LPU event
1459 	 */
1460 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1461 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1462 }
1463 
1464 static void
1465 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1466 {
1467 	struct bfi_ioc_ctrl_req_s enable_req;
1468 	struct bfa_timeval_s tv;
1469 
1470 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1471 		    bfa_ioc_portid(ioc));
1472 	enable_req.ioc_class = ioc->ioc_mc;
1473 	bfa_os_gettimeofday(&tv);
1474 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1475 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1476 }
1477 
1478 static void
1479 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1480 {
1481 	struct bfi_ioc_ctrl_req_s disable_req;
1482 
1483 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1484 		    bfa_ioc_portid(ioc));
1485 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1486 }
1487 
1488 static void
1489 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1490 {
1491 	struct bfi_ioc_getattr_req_s	attr_req;
1492 
1493 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1494 		    bfa_ioc_portid(ioc));
1495 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1496 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1497 }
1498 
1499 static void
1500 bfa_ioc_hb_check(void *cbarg)
1501 {
1502 	struct bfa_ioc_s  *ioc = cbarg;
1503 	u32	hb_count;
1504 
1505 	hb_count = readl(ioc->ioc_regs.heartbeat);
1506 	if (ioc->hb_count == hb_count) {
1507 		printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1508 		bfa_ioc_recover(ioc);
1509 		return;
1510 	} else {
1511 		ioc->hb_count = hb_count;
1512 	}
1513 
1514 	bfa_ioc_mbox_poll(ioc);
1515 	bfa_hb_timer_start(ioc);
1516 }
1517 
1518 static void
1519 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1520 {
1521 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1522 	bfa_hb_timer_start(ioc);
1523 }
1524 
1525 static void
1526 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1527 {
1528 	bfa_hb_timer_stop(ioc);
1529 }
1530 
1531 
1532 /*
1533  *	Initiate a full firmware download.
1534  */
1535 static void
1536 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1537 		    u32 boot_env)
1538 {
1539 	u32 *fwimg;
1540 	u32 pgnum, pgoff;
1541 	u32 loff = 0;
1542 	u32 chunkno = 0;
1543 	u32 i;
1544 
1545 	/*
1546 	 * Initialize LMEM first before code download
1547 	 */
1548 	bfa_ioc_lmem_init(ioc);
1549 
1550 	bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1551 	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1552 
1553 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1554 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1555 
1556 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1557 
1558 	for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1559 
1560 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1561 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1562 			fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1563 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1564 		}
1565 
1566 		/*
1567 		 * write smem
1568 		 */
1569 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1570 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1571 
1572 		loff += sizeof(u32);
1573 
1574 		/*
1575 		 * handle page offset wrap around
1576 		 */
1577 		loff = PSS_SMEM_PGOFF(loff);
1578 		if (loff == 0) {
1579 			pgnum++;
1580 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1581 		}
1582 	}
1583 
1584 	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1585 
1586 	/*
1587 	 * Set boot type and boot param at the end.
1588 	*/
1589 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1590 			swab32(boot_type));
1591 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1592 			swab32(boot_env));
1593 }
1594 
1595 static void
1596 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1597 {
1598 	bfa_ioc_hwinit(ioc, force);
1599 }
1600 
1601 /*
1602  * Update BFA configuration from firmware configuration.
1603  */
1604 static void
1605 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1606 {
1607 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1608 
1609 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1610 	attr->card_type     = be32_to_cpu(attr->card_type);
1611 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1612 
1613 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1614 }
1615 
1616 /*
1617  * Attach time initialization of mbox logic.
1618  */
1619 static void
1620 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1621 {
1622 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1623 	int	mc;
1624 
1625 	INIT_LIST_HEAD(&mod->cmd_q);
1626 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1627 		mod->mbhdlr[mc].cbfn = NULL;
1628 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1629 	}
1630 }
1631 
1632 /*
1633  * Mbox poll timer -- restarts any pending mailbox requests.
1634  */
1635 static void
1636 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1637 {
1638 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1639 	struct bfa_mbox_cmd_s		*cmd;
1640 	u32			stat;
1641 
1642 	/*
1643 	 * If no command pending, do nothing
1644 	 */
1645 	if (list_empty(&mod->cmd_q))
1646 		return;
1647 
1648 	/*
1649 	 * If previous command is not yet fetched by firmware, do nothing
1650 	 */
1651 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1652 	if (stat)
1653 		return;
1654 
1655 	/*
1656 	 * Enqueue command to firmware.
1657 	 */
1658 	bfa_q_deq(&mod->cmd_q, &cmd);
1659 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1660 }
1661 
1662 /*
1663  * Cleanup any pending requests.
1664  */
1665 static void
1666 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1667 {
1668 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1669 	struct bfa_mbox_cmd_s		*cmd;
1670 
1671 	while (!list_empty(&mod->cmd_q))
1672 		bfa_q_deq(&mod->cmd_q, &cmd);
1673 }
1674 
1675 /*
1676  * Read data from SMEM to host through PCI memmap
1677  *
1678  * @param[in]	ioc	memory for IOC
1679  * @param[in]	tbuf	app memory to store data from smem
1680  * @param[in]	soff	smem offset
1681  * @param[in]	sz	size of smem in bytes
1682  */
1683 static bfa_status_t
1684 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1685 {
1686 	u32 pgnum, loff, r32;
1687 	int i, len;
1688 	u32 *buf = tbuf;
1689 
1690 	pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1691 	loff = bfa_ioc_smem_pgoff(ioc, soff);
1692 	bfa_trc(ioc, pgnum);
1693 	bfa_trc(ioc, loff);
1694 	bfa_trc(ioc, sz);
1695 
1696 	/*
1697 	 *  Hold semaphore to serialize pll init and fwtrc.
1698 	 */
1699 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1700 		bfa_trc(ioc, 0);
1701 		return BFA_STATUS_FAILED;
1702 	}
1703 
1704 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1705 
1706 	len = sz/sizeof(u32);
1707 	bfa_trc(ioc, len);
1708 	for (i = 0; i < len; i++) {
1709 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1710 		buf[i] = be32_to_cpu(r32);
1711 		loff += sizeof(u32);
1712 
1713 		/*
1714 		 * handle page offset wrap around
1715 		 */
1716 		loff = PSS_SMEM_PGOFF(loff);
1717 		if (loff == 0) {
1718 			pgnum++;
1719 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1720 		}
1721 	}
1722 	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1723 	/*
1724 	 *  release semaphore.
1725 	 */
1726 	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1727 
1728 	bfa_trc(ioc, pgnum);
1729 	return BFA_STATUS_OK;
1730 }
1731 
1732 /*
1733  * Clear SMEM data from host through PCI memmap
1734  *
1735  * @param[in]	ioc	memory for IOC
1736  * @param[in]	soff	smem offset
1737  * @param[in]	sz	size of smem in bytes
1738  */
1739 static bfa_status_t
1740 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1741 {
1742 	int i, len;
1743 	u32 pgnum, loff;
1744 
1745 	pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1746 	loff = bfa_ioc_smem_pgoff(ioc, soff);
1747 	bfa_trc(ioc, pgnum);
1748 	bfa_trc(ioc, loff);
1749 	bfa_trc(ioc, sz);
1750 
1751 	/*
1752 	 *  Hold semaphore to serialize pll init and fwtrc.
1753 	 */
1754 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1755 		bfa_trc(ioc, 0);
1756 		return BFA_STATUS_FAILED;
1757 	}
1758 
1759 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1760 
1761 	len = sz/sizeof(u32); /* len in words */
1762 	bfa_trc(ioc, len);
1763 	for (i = 0; i < len; i++) {
1764 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1765 		loff += sizeof(u32);
1766 
1767 		/*
1768 		 * handle page offset wrap around
1769 		 */
1770 		loff = PSS_SMEM_PGOFF(loff);
1771 		if (loff == 0) {
1772 			pgnum++;
1773 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1774 		}
1775 	}
1776 	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1777 
1778 	/*
1779 	 *  release semaphore.
1780 	 */
1781 	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1782 	bfa_trc(ioc, pgnum);
1783 	return BFA_STATUS_OK;
1784 }
1785 
1786 /*
1787  * hal iocpf to ioc interface
1788  */
1789 static void
1790 bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
1791 {
1792 	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1793 }
1794 
1795 static void
1796 bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
1797 {
1798 	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1799 }
1800 
1801 static void
1802 bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
1803 {
1804 	bfa_fsm_send_event(ioc, IOC_E_FAILED);
1805 }
1806 
1807 static void
1808 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1809 {
1810 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1811 	/*
1812 	 * Provide enable completion callback.
1813 	 */
1814 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1815 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1816 		"Running firmware version is incompatible "
1817 		"with the driver version\n");
1818 }
1819 
1820 
1821 
1822 /*
1823  *  hal_ioc_public
1824  */
1825 
1826 bfa_status_t
1827 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1828 {
1829 
1830 	/*
1831 	 *  Hold semaphore so that nobody can access the chip during init.
1832 	 */
1833 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1834 
1835 	bfa_ioc_pll_init_asic(ioc);
1836 
1837 	ioc->pllinit = BFA_TRUE;
1838 	/*
1839 	 *  release semaphore.
1840 	 */
1841 	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1842 
1843 	return BFA_STATUS_OK;
1844 }
1845 
1846 /*
1847  * Interface used by diag module to do firmware boot with memory test
1848  * as the entry vector.
1849  */
1850 void
1851 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1852 {
1853 	void __iomem *rb;
1854 
1855 	bfa_ioc_stats(ioc, ioc_boots);
1856 
1857 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1858 		return;
1859 
1860 	/*
1861 	 * Initialize IOC state of all functions on a chip reset.
1862 	 */
1863 	rb = ioc->pcidev.pci_bar_kva;
1864 	if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1865 		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1866 		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1867 	} else {
1868 		writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1869 		writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1870 	}
1871 
1872 	bfa_ioc_msgflush(ioc);
1873 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
1874 
1875 	/*
1876 	 * Enable interrupts just before starting LPU
1877 	 */
1878 	ioc->cbfn->reset_cbfn(ioc->bfa);
1879 	bfa_ioc_lpu_start(ioc);
1880 }
1881 
1882 /*
1883  * Enable/disable IOC failure auto recovery.
1884  */
1885 void
1886 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1887 {
1888 	bfa_auto_recover = auto_recover;
1889 }
1890 
1891 
1892 
1893 bfa_boolean_t
1894 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1895 {
1896 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1897 }
1898 
1899 bfa_boolean_t
1900 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1901 {
1902 	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1903 
1904 	return ((r32 != BFI_IOC_UNINIT) &&
1905 		(r32 != BFI_IOC_INITING) &&
1906 		(r32 != BFI_IOC_MEMTEST));
1907 }
1908 
1909 void
1910 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1911 {
1912 	u32	*msgp = mbmsg;
1913 	u32	r32;
1914 	int		i;
1915 
1916 	/*
1917 	 * read the MBOX msg
1918 	 */
1919 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1920 	     i++) {
1921 		r32 = readl(ioc->ioc_regs.lpu_mbox +
1922 				   i * sizeof(u32));
1923 		msgp[i] = cpu_to_be32(r32);
1924 	}
1925 
1926 	/*
1927 	 * turn off mailbox interrupt by clearing mailbox status
1928 	 */
1929 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1930 	readl(ioc->ioc_regs.lpu_mbox_cmd);
1931 }
1932 
1933 void
1934 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1935 {
1936 	union bfi_ioc_i2h_msg_u	*msg;
1937 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1938 
1939 	msg = (union bfi_ioc_i2h_msg_u *) m;
1940 
1941 	bfa_ioc_stats(ioc, ioc_isrs);
1942 
1943 	switch (msg->mh.msg_id) {
1944 	case BFI_IOC_I2H_HBEAT:
1945 		break;
1946 
1947 	case BFI_IOC_I2H_READY_EVENT:
1948 		bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1949 		break;
1950 
1951 	case BFI_IOC_I2H_ENABLE_REPLY:
1952 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1953 		break;
1954 
1955 	case BFI_IOC_I2H_DISABLE_REPLY:
1956 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1957 		break;
1958 
1959 	case BFI_IOC_I2H_GETATTR_REPLY:
1960 		bfa_ioc_getattr_reply(ioc);
1961 		break;
1962 
1963 	default:
1964 		bfa_trc(ioc, msg->mh.msg_id);
1965 		bfa_assert(0);
1966 	}
1967 }
1968 
1969 /*
1970  * IOC attach time initialization and setup.
1971  *
1972  * @param[in]	ioc	memory for IOC
1973  * @param[in]	bfa	driver instance structure
1974  */
1975 void
1976 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1977 	       struct bfa_timer_mod_s *timer_mod)
1978 {
1979 	ioc->bfa	= bfa;
1980 	ioc->cbfn	= cbfn;
1981 	ioc->timer_mod	= timer_mod;
1982 	ioc->fcmode	= BFA_FALSE;
1983 	ioc->pllinit	= BFA_FALSE;
1984 	ioc->dbg_fwsave_once = BFA_TRUE;
1985 	ioc->iocpf.ioc	= ioc;
1986 
1987 	bfa_ioc_mbox_attach(ioc);
1988 	INIT_LIST_HEAD(&ioc->hb_notify_q);
1989 
1990 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1991 	bfa_fsm_send_event(ioc, IOC_E_RESET);
1992 }
1993 
1994 /*
1995  * Driver detach time IOC cleanup.
1996  */
1997 void
1998 bfa_ioc_detach(struct bfa_ioc_s *ioc)
1999 {
2000 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2001 }
2002 
2003 /*
2004  * Setup IOC PCI properties.
2005  *
2006  * @param[in]	pcidev	PCI device information for this IOC
2007  */
2008 void
2009 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2010 		 enum bfi_mclass mc)
2011 {
2012 	ioc->ioc_mc	= mc;
2013 	ioc->pcidev	= *pcidev;
2014 	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
2015 	ioc->cna	= ioc->ctdev && !ioc->fcmode;
2016 
2017 	/*
2018 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2019 	 */
2020 	if (ioc->ctdev)
2021 		bfa_ioc_set_ct_hwif(ioc);
2022 	else
2023 		bfa_ioc_set_cb_hwif(ioc);
2024 
2025 	bfa_ioc_map_port(ioc);
2026 	bfa_ioc_reg_init(ioc);
2027 }
2028 
2029 /*
2030  * Initialize IOC dma memory
2031  *
2032  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2033  * @param[in]	dm_pa	physical address of IOC dma memory
2034  */
2035 void
2036 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2037 {
2038 	/*
2039 	 * dma memory for firmware attribute
2040 	 */
2041 	ioc->attr_dma.kva = dm_kva;
2042 	ioc->attr_dma.pa = dm_pa;
2043 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2044 }
2045 
2046 /*
2047  * Return size of dma memory required.
2048  */
2049 u32
2050 bfa_ioc_meminfo(void)
2051 {
2052 	return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
2053 }
2054 
2055 void
2056 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2057 {
2058 	bfa_ioc_stats(ioc, ioc_enables);
2059 	ioc->dbg_fwsave_once = BFA_TRUE;
2060 
2061 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2062 }
2063 
2064 void
2065 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2066 {
2067 	bfa_ioc_stats(ioc, ioc_disables);
2068 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2069 }
2070 
2071 /*
2072  * Returns memory required for saving firmware trace in case of crash.
2073  * Driver must call this interface to allocate memory required for
2074  * automatic saving of firmware trace. Driver should call
2075  * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
2076  * trace memory.
2077  */
2078 int
2079 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
2080 {
2081 	return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2082 }
2083 
2084 /*
2085  * Initialize memory for saving firmware trace. Driver must initialize
2086  * trace memory before call bfa_ioc_enable().
2087  */
2088 void
2089 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2090 {
2091 	ioc->dbg_fwsave	    = dbg_fwsave;
2092 	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
2093 }
2094 
2095 u32
2096 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
2097 {
2098 	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2099 }
2100 
2101 u32
2102 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
2103 {
2104 	return PSS_SMEM_PGOFF(fmaddr);
2105 }
2106 
2107 /*
2108  * Register mailbox message handler functions
2109  *
2110  * @param[in]	ioc		IOC instance
2111  * @param[in]	mcfuncs		message class handler functions
2112  */
2113 void
2114 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2115 {
2116 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2117 	int				mc;
2118 
2119 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2120 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2121 }
2122 
2123 /*
2124  * Register mailbox message handler function, to be called by common modules
2125  */
2126 void
2127 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2128 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2129 {
2130 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2131 
2132 	mod->mbhdlr[mc].cbfn	= cbfn;
2133 	mod->mbhdlr[mc].cbarg	= cbarg;
2134 }
2135 
2136 /*
2137  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2138  * Responsibility of caller to serialize
2139  *
2140  * @param[in]	ioc	IOC instance
2141  * @param[i]	cmd	Mailbox command
2142  */
2143 void
2144 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2145 {
2146 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2147 	u32			stat;
2148 
2149 	/*
2150 	 * If a previous command is pending, queue new command
2151 	 */
2152 	if (!list_empty(&mod->cmd_q)) {
2153 		list_add_tail(&cmd->qe, &mod->cmd_q);
2154 		return;
2155 	}
2156 
2157 	/*
2158 	 * If mailbox is busy, queue command for poll timer
2159 	 */
2160 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2161 	if (stat) {
2162 		list_add_tail(&cmd->qe, &mod->cmd_q);
2163 		return;
2164 	}
2165 
2166 	/*
2167 	 * mailbox is free -- queue command to firmware
2168 	 */
2169 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2170 }
2171 
2172 /*
2173  * Handle mailbox interrupts
2174  */
2175 void
2176 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2177 {
2178 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2179 	struct bfi_mbmsg_s		m;
2180 	int				mc;
2181 
2182 	bfa_ioc_msgget(ioc, &m);
2183 
2184 	/*
2185 	 * Treat IOC message class as special.
2186 	 */
2187 	mc = m.mh.msg_class;
2188 	if (mc == BFI_MC_IOC) {
2189 		bfa_ioc_isr(ioc, &m);
2190 		return;
2191 	}
2192 
2193 	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2194 		return;
2195 
2196 	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2197 }
2198 
2199 void
2200 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2201 {
2202 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2203 }
2204 
2205 void
2206 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2207 {
2208 	ioc->fcmode  = BFA_TRUE;
2209 	ioc->port_id = bfa_ioc_pcifn(ioc);
2210 }
2211 
2212 /*
2213  * return true if IOC is disabled
2214  */
2215 bfa_boolean_t
2216 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2217 {
2218 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2219 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2220 }
2221 
2222 /*
2223  * return true if IOC firmware is different.
2224  */
2225 bfa_boolean_t
2226 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2227 {
2228 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2229 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2230 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2231 }
2232 
2233 #define bfa_ioc_state_disabled(__sm)		\
2234 	(((__sm) == BFI_IOC_UNINIT) ||		\
2235 	 ((__sm) == BFI_IOC_INITING) ||		\
2236 	 ((__sm) == BFI_IOC_HWINIT) ||		\
2237 	 ((__sm) == BFI_IOC_DISABLED) ||	\
2238 	 ((__sm) == BFI_IOC_FAIL) ||		\
2239 	 ((__sm) == BFI_IOC_CFG_DISABLED))
2240 
2241 /*
2242  * Check if adapter is disabled -- both IOCs should be in a disabled
2243  * state.
2244  */
2245 bfa_boolean_t
2246 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2247 {
2248 	u32	ioc_state;
2249 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
2250 
2251 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2252 		return BFA_FALSE;
2253 
2254 	ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2255 	if (!bfa_ioc_state_disabled(ioc_state))
2256 		return BFA_FALSE;
2257 
2258 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2259 		ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2260 		if (!bfa_ioc_state_disabled(ioc_state))
2261 			return BFA_FALSE;
2262 	}
2263 
2264 	return BFA_TRUE;
2265 }
2266 
2267 /*
2268  * Add to IOC heartbeat failure notification queue. To be used by common
2269  * modules such as cee, port, diag.
2270  */
2271 void
2272 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
2273 			struct bfa_ioc_hbfail_notify_s *notify)
2274 {
2275 	list_add_tail(&notify->qe, &ioc->hb_notify_q);
2276 }
2277 
2278 #define BFA_MFG_NAME "Brocade"
2279 void
2280 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2281 			 struct bfa_adapter_attr_s *ad_attr)
2282 {
2283 	struct bfi_ioc_attr_s	*ioc_attr;
2284 
2285 	ioc_attr = ioc->attr;
2286 
2287 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2288 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2289 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2290 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2291 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2292 		      sizeof(struct bfa_mfg_vpd_s));
2293 
2294 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2295 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2296 
2297 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2298 	/* For now, model descr uses same model string */
2299 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2300 
2301 	ad_attr->card_type = ioc_attr->card_type;
2302 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2303 
2304 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2305 		ad_attr->prototype = 1;
2306 	else
2307 		ad_attr->prototype = 0;
2308 
2309 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2310 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2311 
2312 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2313 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2314 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2315 	ad_attr->asic_rev = ioc_attr->asic_rev;
2316 
2317 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2318 
2319 	ad_attr->cna_capable = ioc->cna;
2320 	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2321 }
2322 
2323 enum bfa_ioc_type_e
2324 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2325 {
2326 	if (!ioc->ctdev || ioc->fcmode)
2327 		return BFA_IOC_TYPE_FC;
2328 	else if (ioc->ioc_mc == BFI_MC_IOCFC)
2329 		return BFA_IOC_TYPE_FCoE;
2330 	else if (ioc->ioc_mc == BFI_MC_LL)
2331 		return BFA_IOC_TYPE_LL;
2332 	else {
2333 		bfa_assert(ioc->ioc_mc == BFI_MC_LL);
2334 		return BFA_IOC_TYPE_LL;
2335 	}
2336 }
2337 
2338 void
2339 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2340 {
2341 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2342 	memcpy((void *)serial_num,
2343 			(void *)ioc->attr->brcd_serialnum,
2344 			BFA_ADAPTER_SERIAL_NUM_LEN);
2345 }
2346 
2347 void
2348 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2349 {
2350 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2351 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2352 }
2353 
2354 void
2355 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2356 {
2357 	bfa_assert(chip_rev);
2358 
2359 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2360 
2361 	chip_rev[0] = 'R';
2362 	chip_rev[1] = 'e';
2363 	chip_rev[2] = 'v';
2364 	chip_rev[3] = '-';
2365 	chip_rev[4] = ioc->attr->asic_rev;
2366 	chip_rev[5] = '\0';
2367 }
2368 
2369 void
2370 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2371 {
2372 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2373 	memcpy(optrom_ver, ioc->attr->optrom_version,
2374 		      BFA_VERSION_LEN);
2375 }
2376 
2377 void
2378 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2379 {
2380 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2381 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2382 }
2383 
2384 void
2385 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2386 {
2387 	struct bfi_ioc_attr_s	*ioc_attr;
2388 
2389 	bfa_assert(model);
2390 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2391 
2392 	ioc_attr = ioc->attr;
2393 
2394 	/*
2395 	 * model name
2396 	 */
2397 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2398 		BFA_MFG_NAME, ioc_attr->card_type);
2399 }
2400 
2401 enum bfa_ioc_state
2402 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2403 {
2404 	enum bfa_iocpf_state iocpf_st;
2405 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2406 
2407 	if (ioc_st == BFA_IOC_ENABLING ||
2408 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2409 
2410 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2411 
2412 		switch (iocpf_st) {
2413 		case BFA_IOCPF_SEMWAIT:
2414 			ioc_st = BFA_IOC_SEMWAIT;
2415 			break;
2416 
2417 		case BFA_IOCPF_HWINIT:
2418 			ioc_st = BFA_IOC_HWINIT;
2419 			break;
2420 
2421 		case BFA_IOCPF_FWMISMATCH:
2422 			ioc_st = BFA_IOC_FWMISMATCH;
2423 			break;
2424 
2425 		case BFA_IOCPF_FAIL:
2426 			ioc_st = BFA_IOC_FAIL;
2427 			break;
2428 
2429 		case BFA_IOCPF_INITFAIL:
2430 			ioc_st = BFA_IOC_INITFAIL;
2431 			break;
2432 
2433 		default:
2434 			break;
2435 		}
2436 	}
2437 
2438 	return ioc_st;
2439 }
2440 
2441 void
2442 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2443 {
2444 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2445 
2446 	ioc_attr->state = bfa_ioc_get_state(ioc);
2447 	ioc_attr->port_id = ioc->port_id;
2448 
2449 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2450 
2451 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2452 
2453 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2454 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2455 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2456 }
2457 
2458 /*
2459  *  hal_wwn_public
2460  */
2461 wwn_t
2462 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
2463 {
2464 	return ioc->attr->pwwn;
2465 }
2466 
2467 wwn_t
2468 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
2469 {
2470 	return ioc->attr->nwwn;
2471 }
2472 
2473 u64
2474 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
2475 {
2476 	return ioc->attr->mfg_pwwn;
2477 }
2478 
2479 mac_t
2480 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2481 {
2482 	/*
2483 	 * Check the IOC type and return the appropriate MAC
2484 	 */
2485 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2486 		return ioc->attr->fcoe_mac;
2487 	else
2488 		return ioc->attr->mac;
2489 }
2490 
2491 wwn_t
2492 bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
2493 {
2494 	return ioc->attr->mfg_pwwn;
2495 }
2496 
2497 wwn_t
2498 bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
2499 {
2500 	return ioc->attr->mfg_nwwn;
2501 }
2502 
2503 mac_t
2504 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2505 {
2506 	mac_t	m;
2507 
2508 	m = ioc->attr->mfg_mac;
2509 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2510 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2511 	else
2512 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2513 			bfa_ioc_pcifn(ioc));
2514 
2515 	return m;
2516 }
2517 
2518 bfa_boolean_t
2519 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2520 {
2521 	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2522 }
2523 
2524 /*
2525  * Retrieve saved firmware trace from a prior IOC failure.
2526  */
2527 bfa_status_t
2528 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2529 {
2530 	int	tlen;
2531 
2532 	if (ioc->dbg_fwsave_len == 0)
2533 		return BFA_STATUS_ENOFSAVE;
2534 
2535 	tlen = *trclen;
2536 	if (tlen > ioc->dbg_fwsave_len)
2537 		tlen = ioc->dbg_fwsave_len;
2538 
2539 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2540 	*trclen = tlen;
2541 	return BFA_STATUS_OK;
2542 }
2543 
2544 /*
2545  * Clear saved firmware trace
2546  */
2547 void
2548 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
2549 {
2550 	ioc->dbg_fwsave_once = BFA_TRUE;
2551 }
2552 
2553 /*
2554  * Retrieve saved firmware trace from a prior IOC failure.
2555  */
2556 bfa_status_t
2557 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2558 {
2559 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2560 	int tlen;
2561 	bfa_status_t status;
2562 
2563 	bfa_trc(ioc, *trclen);
2564 
2565 	tlen = *trclen;
2566 	if (tlen > BFA_DBG_FWTRC_LEN)
2567 		tlen = BFA_DBG_FWTRC_LEN;
2568 
2569 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2570 	*trclen = tlen;
2571 	return status;
2572 }
2573 
2574 static void
2575 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2576 {
2577 	struct bfa_mbox_cmd_s cmd;
2578 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2579 
2580 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2581 		    bfa_ioc_portid(ioc));
2582 	req->ioc_class = ioc->ioc_mc;
2583 	bfa_ioc_mbox_queue(ioc, &cmd);
2584 }
2585 
2586 static void
2587 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2588 {
2589 	u32 fwsync_iter = 1000;
2590 
2591 	bfa_ioc_send_fwsync(ioc);
2592 
2593 	/*
2594 	 * After sending a fw sync mbox command wait for it to
2595 	 * take effect.  We will not wait for a response because
2596 	 *    1. fw_sync mbox cmd doesn't have a response.
2597 	 *    2. Even if we implement that,  interrupts might not
2598 	 *	 be enabled when we call this function.
2599 	 * So, just keep checking if any mbox cmd is pending, and
2600 	 * after waiting for a reasonable amount of time, go ahead.
2601 	 * It is possible that fw has crashed and the mbox command
2602 	 * is never acknowledged.
2603 	 */
2604 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2605 		fwsync_iter--;
2606 }
2607 
2608 /*
2609  * Dump firmware smem
2610  */
2611 bfa_status_t
2612 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2613 				u32 *offset, int *buflen)
2614 {
2615 	u32 loff;
2616 	int dlen;
2617 	bfa_status_t status;
2618 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2619 
2620 	if (*offset >= smem_len) {
2621 		*offset = *buflen = 0;
2622 		return BFA_STATUS_EINVAL;
2623 	}
2624 
2625 	loff = *offset;
2626 	dlen = *buflen;
2627 
2628 	/*
2629 	 * First smem read, sync smem before proceeding
2630 	 * No need to sync before reading every chunk.
2631 	 */
2632 	if (loff == 0)
2633 		bfa_ioc_fwsync(ioc);
2634 
2635 	if ((loff + dlen) >= smem_len)
2636 		dlen = smem_len - loff;
2637 
2638 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2639 
2640 	if (status != BFA_STATUS_OK) {
2641 		*offset = *buflen = 0;
2642 		return status;
2643 	}
2644 
2645 	*offset += dlen;
2646 
2647 	if (*offset >= smem_len)
2648 		*offset = 0;
2649 
2650 	*buflen = dlen;
2651 
2652 	return status;
2653 }
2654 
2655 /*
2656  * Firmware statistics
2657  */
2658 bfa_status_t
2659 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2660 {
2661 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2662 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2663 	int tlen;
2664 	bfa_status_t status;
2665 
2666 	if (ioc->stats_busy) {
2667 		bfa_trc(ioc, ioc->stats_busy);
2668 		return BFA_STATUS_DEVBUSY;
2669 	}
2670 	ioc->stats_busy = BFA_TRUE;
2671 
2672 	tlen = sizeof(struct bfa_fw_stats_s);
2673 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2674 
2675 	ioc->stats_busy = BFA_FALSE;
2676 	return status;
2677 }
2678 
2679 bfa_status_t
2680 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2681 {
2682 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2683 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2684 	int tlen;
2685 	bfa_status_t status;
2686 
2687 	if (ioc->stats_busy) {
2688 		bfa_trc(ioc, ioc->stats_busy);
2689 		return BFA_STATUS_DEVBUSY;
2690 	}
2691 	ioc->stats_busy = BFA_TRUE;
2692 
2693 	tlen = sizeof(struct bfa_fw_stats_s);
2694 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
2695 
2696 	ioc->stats_busy = BFA_FALSE;
2697 	return status;
2698 }
2699 
2700 /*
2701  * Save firmware trace if configured.
2702  */
2703 static void
2704 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2705 {
2706 	int		tlen;
2707 
2708 	if (ioc->dbg_fwsave_len) {
2709 		tlen = ioc->dbg_fwsave_len;
2710 		bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2711 	}
2712 }
2713 
2714 /*
2715  * Firmware failure detected. Start recovery actions.
2716  */
2717 static void
2718 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2719 {
2720 	if (ioc->dbg_fwsave_once) {
2721 		ioc->dbg_fwsave_once = BFA_FALSE;
2722 		bfa_ioc_debug_save(ioc);
2723 	}
2724 
2725 	bfa_ioc_stats(ioc, ioc_hbfails);
2726 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2727 }
2728 
2729 static void
2730 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2731 {
2732 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2733 		return;
2734 }
2735 
2736 /*
2737  *  hal_iocpf_pvt BFA IOC PF private functions
2738  */
2739 
2740 static void
2741 bfa_iocpf_enable(struct bfa_ioc_s *ioc)
2742 {
2743 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2744 }
2745 
2746 static void
2747 bfa_iocpf_disable(struct bfa_ioc_s *ioc)
2748 {
2749 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2750 }
2751 
2752 static void
2753 bfa_iocpf_fail(struct bfa_ioc_s *ioc)
2754 {
2755 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2756 }
2757 
2758 static void
2759 bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
2760 {
2761 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2762 }
2763 
2764 static void
2765 bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
2766 {
2767 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2768 }
2769 
2770 static void
2771 bfa_iocpf_stop(struct bfa_ioc_s *ioc)
2772 {
2773 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2774 }
2775 
2776 static void
2777 bfa_iocpf_timeout(void *ioc_arg)
2778 {
2779 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2780 
2781 	bfa_trc(ioc, 0);
2782 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2783 }
2784 
2785 static void
2786 bfa_iocpf_sem_timeout(void *ioc_arg)
2787 {
2788 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2789 
2790 	bfa_ioc_hw_sem_get(ioc);
2791 }
2792 
2793 /*
2794  *  bfa timer function
2795  */
2796 void
2797 bfa_timer_init(struct bfa_timer_mod_s *mod)
2798 {
2799 	INIT_LIST_HEAD(&mod->timer_q);
2800 }
2801 
2802 void
2803 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2804 {
2805 	struct list_head *qh = &mod->timer_q;
2806 	struct list_head *qe, *qe_next;
2807 	struct bfa_timer_s *elem;
2808 	struct list_head timedout_q;
2809 
2810 	INIT_LIST_HEAD(&timedout_q);
2811 
2812 	qe = bfa_q_next(qh);
2813 
2814 	while (qe != qh) {
2815 		qe_next = bfa_q_next(qe);
2816 
2817 		elem = (struct bfa_timer_s *) qe;
2818 		if (elem->timeout <= BFA_TIMER_FREQ) {
2819 			elem->timeout = 0;
2820 			list_del(&elem->qe);
2821 			list_add_tail(&elem->qe, &timedout_q);
2822 		} else {
2823 			elem->timeout -= BFA_TIMER_FREQ;
2824 		}
2825 
2826 		qe = qe_next;	/* go to next elem */
2827 	}
2828 
2829 	/*
2830 	 * Pop all the timeout entries
2831 	 */
2832 	while (!list_empty(&timedout_q)) {
2833 		bfa_q_deq(&timedout_q, &elem);
2834 		elem->timercb(elem->arg);
2835 	}
2836 }
2837 
2838 /*
2839  * Should be called with lock protection
2840  */
2841 void
2842 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2843 		    void (*timercb) (void *), void *arg, unsigned int timeout)
2844 {
2845 
2846 	bfa_assert(timercb != NULL);
2847 	bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
2848 
2849 	timer->timeout = timeout;
2850 	timer->timercb = timercb;
2851 	timer->arg = arg;
2852 
2853 	list_add_tail(&timer->qe, &mod->timer_q);
2854 }
2855 
2856 /*
2857  * Should be called with lock protection
2858  */
2859 void
2860 bfa_timer_stop(struct bfa_timer_s *timer)
2861 {
2862 	bfa_assert(!list_empty(&timer->qe));
2863 
2864 	list_del(&timer->qe);
2865 }
2866