xref: /openbmc/linux/drivers/scsi/bfa/bfa_ioc.c (revision ed969324)
1 /*
2  * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include <bfa.h>
19 #include <bfa_ioc.h>
20 #include <bfa_fwimg_priv.h>
21 #include <cna/bfa_cna_trcmod.h>
22 #include <cs/bfa_debug.h>
23 #include <bfi/bfi_ioc.h>
24 #include <bfi/bfi_ctreg.h>
25 #include <aen/bfa_aen_ioc.h>
26 #include <aen/bfa_aen.h>
27 #include <log/bfa_log_hal.h>
28 #include <defs/bfa_defs_pci.h>
29 
30 BFA_TRC_FILE(CNA, IOC);
31 
32 /**
33  * IOC local definitions
34  */
35 #define BFA_IOC_TOV		2000	/* msecs */
36 #define BFA_IOC_HWSEM_TOV       500     /* msecs */
37 #define BFA_IOC_HB_TOV          500     /* msecs */
38 #define BFA_IOC_HWINIT_MAX      2
39 #define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
40 #define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
41 
42 #define bfa_ioc_timer_start(__ioc)					\
43 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
44 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
46 
47 #define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
48 #define BFA_DBG_FWTRC_LEN					\
49 	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
50 	 (sizeof(struct bfa_trc_mod_s) -			\
51 	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
52 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
53 
54 /**
55  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
56  */
57 
58 #define bfa_ioc_firmware_lock(__ioc)                    \
59 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60 #define bfa_ioc_firmware_unlock(__ioc)                  \
61 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
63 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
64 #define bfa_ioc_notify_hbfail(__ioc)                    \
65 			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66 #define bfa_ioc_is_optrom(__ioc)        \
67 	(bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
68 
69 bfa_boolean_t   bfa_auto_recover = BFA_TRUE;
70 
71 /*
72  * forward declarations
73  */
74 static void     bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
75 				 enum bfa_ioc_aen_event event);
76 static void     bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
77 static void     bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
78 static void     bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
79 static void     bfa_ioc_timeout(void *ioc);
80 static void     bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
81 static void     bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
82 static void     bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
83 static void     bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
84 static void     bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
85 static void     bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
86 static void     bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
87 static void     bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
88 static void     bfa_ioc_recover(struct bfa_ioc_s *ioc);
89 static void     bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
90 static void     bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
91 
92 /**
93  *  bfa_ioc_sm
94  */
95 
96 /**
97  * IOC state machine events
98  */
99 enum ioc_event {
100 	IOC_E_ENABLE = 1,	/*  IOC enable request */
101 	IOC_E_DISABLE = 2,	/*  IOC disable request */
102 	IOC_E_TIMEOUT = 3,	/*  f/w response timeout */
103 	IOC_E_FWREADY = 4,	/*  f/w initialization done */
104 	IOC_E_FWRSP_GETATTR = 5,	/*  IOC get attribute response */
105 	IOC_E_FWRSP_ENABLE = 6,	/*  enable f/w response */
106 	IOC_E_FWRSP_DISABLE = 7,	/*  disable f/w response */
107 	IOC_E_HBFAIL = 8,	/*  heartbeat failure */
108 	IOC_E_HWERROR = 9,	/*  hardware error interrupt */
109 	IOC_E_SEMLOCKED = 10,	/*  h/w semaphore is locked */
110 	IOC_E_DETACH = 11,	/*  driver detach cleanup */
111 };
112 
113 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
125 
126 static struct bfa_sm_table_s ioc_sm_table[] = {
127 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
128 	{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
129 	{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
130 	{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
131 	{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
132 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
133 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
134 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
135 	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
136 	{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
137 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
138 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
139 };
140 
141 /**
142  * Reset entry actions -- initialize state machine
143  */
144 static void
145 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
146 {
147 	ioc->retry_count = 0;
148 	ioc->auto_recover = bfa_auto_recover;
149 }
150 
151 /**
152  * Beginning state. IOC is in reset state.
153  */
154 static void
155 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
156 {
157 	bfa_trc(ioc, event);
158 
159 	switch (event) {
160 	case IOC_E_ENABLE:
161 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
162 		break;
163 
164 	case IOC_E_DISABLE:
165 		bfa_ioc_disable_comp(ioc);
166 		break;
167 
168 	case IOC_E_DETACH:
169 		break;
170 
171 	default:
172 		bfa_sm_fault(ioc, event);
173 	}
174 }
175 
176 /**
177  * Semaphore should be acquired for version check.
178  */
179 static void
180 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
181 {
182 	bfa_ioc_hw_sem_get(ioc);
183 }
184 
185 /**
186  * Awaiting h/w semaphore to continue with version check.
187  */
188 static void
189 bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
190 {
191 	bfa_trc(ioc, event);
192 
193 	switch (event) {
194 	case IOC_E_SEMLOCKED:
195 		if (bfa_ioc_firmware_lock(ioc)) {
196 			ioc->retry_count = 0;
197 			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
198 		} else {
199 			bfa_ioc_hw_sem_release(ioc);
200 			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
201 		}
202 		break;
203 
204 	case IOC_E_DISABLE:
205 		bfa_ioc_disable_comp(ioc);
206 		/*
207 		 * fall through
208 		 */
209 
210 	case IOC_E_DETACH:
211 		bfa_ioc_hw_sem_get_cancel(ioc);
212 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
213 		break;
214 
215 	case IOC_E_FWREADY:
216 		break;
217 
218 	default:
219 		bfa_sm_fault(ioc, event);
220 	}
221 }
222 
223 /**
224  * Notify enable completion callback and generate mismatch AEN.
225  */
226 static void
227 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
228 {
229 	/**
230 	 * Provide enable completion callback and AEN notification only once.
231 	 */
232 	if (ioc->retry_count == 0) {
233 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
234 		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
235 	}
236 	ioc->retry_count++;
237 	bfa_ioc_timer_start(ioc);
238 }
239 
240 /**
241  * Awaiting firmware version match.
242  */
243 static void
244 bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
245 {
246 	bfa_trc(ioc, event);
247 
248 	switch (event) {
249 	case IOC_E_TIMEOUT:
250 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
251 		break;
252 
253 	case IOC_E_DISABLE:
254 		bfa_ioc_disable_comp(ioc);
255 		/*
256 		 * fall through
257 		 */
258 
259 	case IOC_E_DETACH:
260 		bfa_ioc_timer_stop(ioc);
261 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
262 		break;
263 
264 	case IOC_E_FWREADY:
265 		break;
266 
267 	default:
268 		bfa_sm_fault(ioc, event);
269 	}
270 }
271 
272 /**
273  * Request for semaphore.
274  */
275 static void
276 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
277 {
278 	bfa_ioc_hw_sem_get(ioc);
279 }
280 
281 /**
282  * Awaiting semaphore for h/w initialzation.
283  */
284 static void
285 bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
286 {
287 	bfa_trc(ioc, event);
288 
289 	switch (event) {
290 	case IOC_E_SEMLOCKED:
291 		ioc->retry_count = 0;
292 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
293 		break;
294 
295 	case IOC_E_DISABLE:
296 		bfa_ioc_hw_sem_get_cancel(ioc);
297 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
298 		break;
299 
300 	default:
301 		bfa_sm_fault(ioc, event);
302 	}
303 }
304 
305 
306 static void
307 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
308 {
309 	bfa_ioc_timer_start(ioc);
310 	bfa_ioc_reset(ioc, BFA_FALSE);
311 }
312 
313 /**
314  * Hardware is being initialized. Interrupts are enabled.
315  * Holding hardware semaphore lock.
316  */
317 static void
318 bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
319 {
320 	bfa_trc(ioc, event);
321 
322 	switch (event) {
323 	case IOC_E_FWREADY:
324 		bfa_ioc_timer_stop(ioc);
325 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
326 		break;
327 
328 	case IOC_E_HWERROR:
329 		bfa_ioc_timer_stop(ioc);
330 		/*
331 		 * fall through
332 		 */
333 
334 	case IOC_E_TIMEOUT:
335 		ioc->retry_count++;
336 		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
337 			bfa_ioc_timer_start(ioc);
338 			bfa_ioc_reset(ioc, BFA_TRUE);
339 			break;
340 		}
341 
342 		bfa_ioc_hw_sem_release(ioc);
343 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
344 		break;
345 
346 	case IOC_E_DISABLE:
347 		bfa_ioc_hw_sem_release(ioc);
348 		bfa_ioc_timer_stop(ioc);
349 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
350 		break;
351 
352 	default:
353 		bfa_sm_fault(ioc, event);
354 	}
355 }
356 
357 
358 static void
359 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
360 {
361 	bfa_ioc_timer_start(ioc);
362 	bfa_ioc_send_enable(ioc);
363 }
364 
365 /**
366  * Host IOC function is being enabled, awaiting response from firmware.
367  * Semaphore is acquired.
368  */
369 static void
370 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
371 {
372 	bfa_trc(ioc, event);
373 
374 	switch (event) {
375 	case IOC_E_FWRSP_ENABLE:
376 		bfa_ioc_timer_stop(ioc);
377 		bfa_ioc_hw_sem_release(ioc);
378 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
379 		break;
380 
381 	case IOC_E_HWERROR:
382 		bfa_ioc_timer_stop(ioc);
383 		/*
384 		 * fall through
385 		 */
386 
387 	case IOC_E_TIMEOUT:
388 		ioc->retry_count++;
389 		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
390 			bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
391 				      BFI_IOC_UNINIT);
392 			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
393 			break;
394 		}
395 
396 		bfa_ioc_hw_sem_release(ioc);
397 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
398 		break;
399 
400 	case IOC_E_DISABLE:
401 		bfa_ioc_timer_stop(ioc);
402 		bfa_ioc_hw_sem_release(ioc);
403 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
404 		break;
405 
406 	case IOC_E_FWREADY:
407 		bfa_ioc_send_enable(ioc);
408 		break;
409 
410 	default:
411 		bfa_sm_fault(ioc, event);
412 	}
413 }
414 
415 
416 static void
417 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
418 {
419 	bfa_ioc_timer_start(ioc);
420 	bfa_ioc_send_getattr(ioc);
421 }
422 
423 /**
424  * IOC configuration in progress. Timer is active.
425  */
426 static void
427 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
428 {
429 	bfa_trc(ioc, event);
430 
431 	switch (event) {
432 	case IOC_E_FWRSP_GETATTR:
433 		bfa_ioc_timer_stop(ioc);
434 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
435 		break;
436 
437 	case IOC_E_HWERROR:
438 		bfa_ioc_timer_stop(ioc);
439 		/*
440 		 * fall through
441 		 */
442 
443 	case IOC_E_TIMEOUT:
444 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
445 		break;
446 
447 	case IOC_E_DISABLE:
448 		bfa_ioc_timer_stop(ioc);
449 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
450 		break;
451 
452 	default:
453 		bfa_sm_fault(ioc, event);
454 	}
455 }
456 
457 
458 static void
459 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
460 {
461 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
462 	bfa_ioc_hb_monitor(ioc);
463 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
464 }
465 
466 static void
467 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
468 {
469 	bfa_trc(ioc, event);
470 
471 	switch (event) {
472 	case IOC_E_ENABLE:
473 		break;
474 
475 	case IOC_E_DISABLE:
476 		bfa_ioc_hb_stop(ioc);
477 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
478 		break;
479 
480 	case IOC_E_HWERROR:
481 	case IOC_E_FWREADY:
482 		/**
483 		 * Hard error or IOC recovery by other function.
484 		 * Treat it same as heartbeat failure.
485 		 */
486 		bfa_ioc_hb_stop(ioc);
487 		/*
488 		 * !!! fall through !!!
489 		 */
490 
491 	case IOC_E_HBFAIL:
492 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
493 		break;
494 
495 	default:
496 		bfa_sm_fault(ioc, event);
497 	}
498 }
499 
500 
501 static void
502 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
503 {
504 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
505 	bfa_ioc_timer_start(ioc);
506 	bfa_ioc_send_disable(ioc);
507 }
508 
509 /**
510  * IOC is being disabled
511  */
512 static void
513 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
514 {
515 	bfa_trc(ioc, event);
516 
517 	switch (event) {
518 	case IOC_E_FWRSP_DISABLE:
519 		bfa_ioc_timer_stop(ioc);
520 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
521 		break;
522 
523 	case IOC_E_HWERROR:
524 		bfa_ioc_timer_stop(ioc);
525 		/*
526 		 * !!! fall through !!!
527 		 */
528 
529 	case IOC_E_TIMEOUT:
530 		bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
531 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
532 		break;
533 
534 	default:
535 		bfa_sm_fault(ioc, event);
536 	}
537 }
538 
539 /**
540  * IOC disable completion entry.
541  */
542 static void
543 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
544 {
545 	bfa_ioc_disable_comp(ioc);
546 }
547 
548 static void
549 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
550 {
551 	bfa_trc(ioc, event);
552 
553 	switch (event) {
554 	case IOC_E_ENABLE:
555 		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
556 		break;
557 
558 	case IOC_E_DISABLE:
559 		ioc->cbfn->disable_cbfn(ioc->bfa);
560 		break;
561 
562 	case IOC_E_FWREADY:
563 		break;
564 
565 	case IOC_E_DETACH:
566 		bfa_ioc_firmware_unlock(ioc);
567 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
568 		break;
569 
570 	default:
571 		bfa_sm_fault(ioc, event);
572 	}
573 }
574 
575 
576 static void
577 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
578 {
579 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
580 	bfa_ioc_timer_start(ioc);
581 }
582 
583 /**
584  * Hardware initialization failed.
585  */
586 static void
587 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
588 {
589 	bfa_trc(ioc, event);
590 
591 	switch (event) {
592 	case IOC_E_DISABLE:
593 		bfa_ioc_timer_stop(ioc);
594 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
595 		break;
596 
597 	case IOC_E_DETACH:
598 		bfa_ioc_timer_stop(ioc);
599 		bfa_ioc_firmware_unlock(ioc);
600 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
601 		break;
602 
603 	case IOC_E_TIMEOUT:
604 		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
605 		break;
606 
607 	default:
608 		bfa_sm_fault(ioc, event);
609 	}
610 }
611 
612 
613 static void
614 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
615 {
616 	struct list_head *qe;
617 	struct bfa_ioc_hbfail_notify_s *notify;
618 
619 	/**
620 	 * Mark IOC as failed in hardware and stop firmware.
621 	 */
622 	bfa_ioc_lpu_stop(ioc);
623 	bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
624 
625 	/**
626 	 * Notify other functions on HB failure.
627 	 */
628 	bfa_ioc_notify_hbfail(ioc);
629 
630 	/**
631 	 * Notify driver and common modules registered for notification.
632 	 */
633 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
634 	list_for_each(qe, &ioc->hb_notify_q) {
635 		notify = (struct bfa_ioc_hbfail_notify_s *)qe;
636 		notify->cbfn(notify->cbarg);
637 	}
638 
639 	/**
640 	 * Flush any queued up mailbox requests.
641 	 */
642 	bfa_ioc_mbox_hbfail(ioc);
643 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
644 
645 	/**
646 	 * Trigger auto-recovery after a delay.
647 	 */
648 	if (ioc->auto_recover) {
649 		bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
650 				bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
651 	}
652 }
653 
654 /**
655  * IOC heartbeat failure.
656  */
657 static void
658 bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
659 {
660 	bfa_trc(ioc, event);
661 
662 	switch (event) {
663 
664 	case IOC_E_ENABLE:
665 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
666 		break;
667 
668 	case IOC_E_DISABLE:
669 		if (ioc->auto_recover)
670 			bfa_ioc_timer_stop(ioc);
671 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
672 		break;
673 
674 	case IOC_E_TIMEOUT:
675 		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
676 		break;
677 
678 	case IOC_E_FWREADY:
679 		/**
680 		 * Recovery is already initiated by other function.
681 		 */
682 		break;
683 
684 	case IOC_E_HWERROR:
685 		/*
686 		 * HB failure notification, ignore.
687 		 */
688 		break;
689 
690 	default:
691 		bfa_sm_fault(ioc, event);
692 	}
693 }
694 
695 
696 
697 /**
698  *  bfa_ioc_pvt BFA IOC private functions
699  */
700 
701 static void
702 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
703 {
704 	struct list_head *qe;
705 	struct bfa_ioc_hbfail_notify_s *notify;
706 
707 	ioc->cbfn->disable_cbfn(ioc->bfa);
708 
709 	/**
710 	 * Notify common modules registered for notification.
711 	 */
712 	list_for_each(qe, &ioc->hb_notify_q) {
713 		notify = (struct bfa_ioc_hbfail_notify_s *)qe;
714 		notify->cbfn(notify->cbarg);
715 	}
716 }
717 
718 void
719 bfa_ioc_sem_timeout(void *ioc_arg)
720 {
721 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
722 
723 	bfa_ioc_hw_sem_get(ioc);
724 }
725 
726 bfa_boolean_t
727 bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
728 {
729 	u32 r32;
730 	int cnt = 0;
731 #define BFA_SEM_SPINCNT 3000
732 
733 	r32 = bfa_reg_read(sem_reg);
734 
735 	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
736 		cnt++;
737 		bfa_os_udelay(2);
738 		r32 = bfa_reg_read(sem_reg);
739 	}
740 
741 	if (r32 == 0)
742 		return BFA_TRUE;
743 
744 	bfa_assert(cnt < BFA_SEM_SPINCNT);
745 	return BFA_FALSE;
746 }
747 
748 void
749 bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
750 {
751 	bfa_reg_write(sem_reg, 1);
752 }
753 
754 static void
755 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
756 {
757 	u32        r32;
758 
759 	/**
760 	 * First read to the semaphore register will return 0, subsequent reads
761 	 * will return 1. Semaphore is released by writing 1 to the register
762 	 */
763 	r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
764 	if (r32 == 0) {
765 		bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
766 		return;
767 	}
768 
769 	bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
770 			ioc, BFA_IOC_HWSEM_TOV);
771 }
772 
773 void
774 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
775 {
776 	bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
777 }
778 
779 static void
780 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
781 {
782 	bfa_timer_stop(&ioc->sem_timer);
783 }
784 
785 /**
786  * Initialize LPU local memory (aka secondary memory / SRAM)
787  */
788 static void
789 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
790 {
791 	u32        pss_ctl;
792 	int             i;
793 #define PSS_LMEM_INIT_TIME  10000
794 
795 	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
796 	pss_ctl &= ~__PSS_LMEM_RESET;
797 	pss_ctl |= __PSS_LMEM_INIT_EN;
798 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
799 	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
800 
801 	/**
802 	 * wait for memory initialization to be complete
803 	 */
804 	i = 0;
805 	do {
806 		pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
807 		i++;
808 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
809 
810 	/**
811 	 * If memory initialization is not successful, IOC timeout will catch
812 	 * such failures.
813 	 */
814 	bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
815 	bfa_trc(ioc, pss_ctl);
816 
817 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
818 	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
819 }
820 
821 static void
822 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
823 {
824 	u32        pss_ctl;
825 
826 	/**
827 	 * Take processor out of reset.
828 	 */
829 	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
830 	pss_ctl &= ~__PSS_LPU0_RESET;
831 
832 	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
833 }
834 
835 static void
836 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
837 {
838 	u32        pss_ctl;
839 
840 	/**
841 	 * Put processors in reset.
842 	 */
843 	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
844 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
845 
846 	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
847 }
848 
849 /**
850  * Get driver and firmware versions.
851  */
852 void
853 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
854 {
855 	u32        pgnum, pgoff;
856 	u32        loff = 0;
857 	int             i;
858 	u32       *fwsig = (u32 *) fwhdr;
859 
860 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
861 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
862 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
863 
864 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
865 	     i++) {
866 		fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
867 		loff += sizeof(u32);
868 	}
869 }
870 
871 /**
872  * Returns TRUE if same.
873  */
874 bfa_boolean_t
875 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
876 {
877 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
878 	int             i;
879 
880 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
881 			bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
882 
883 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
884 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
885 			bfa_trc(ioc, i);
886 			bfa_trc(ioc, fwhdr->md5sum[i]);
887 			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
888 			return BFA_FALSE;
889 		}
890 	}
891 
892 	bfa_trc(ioc, fwhdr->md5sum[0]);
893 	return BFA_TRUE;
894 }
895 
896 /**
897  * Return true if current running version is valid. Firmware signature and
898  * execution context (driver/bios) must match.
899  */
900 static          bfa_boolean_t
901 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
902 {
903 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
904 
905 	/**
906 	 * If bios/efi boot (flash based) -- return true
907 	 */
908 	if (bfa_ioc_is_optrom(ioc))
909 		return BFA_TRUE;
910 
911 	bfa_ioc_fwver_get(ioc, &fwhdr);
912 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
913 			bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
914 
915 
916 	if (fwhdr.signature != drv_fwhdr->signature) {
917 		bfa_trc(ioc, fwhdr.signature);
918 		bfa_trc(ioc, drv_fwhdr->signature);
919 		return BFA_FALSE;
920 	}
921 
922 	if (fwhdr.exec != drv_fwhdr->exec) {
923 		bfa_trc(ioc, fwhdr.exec);
924 		bfa_trc(ioc, drv_fwhdr->exec);
925 		return BFA_FALSE;
926 	}
927 
928 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
929 }
930 
931 /**
932  * Conditionally flush any pending message from firmware at start.
933  */
934 static void
935 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
936 {
937 	u32        r32;
938 
939 	r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
940 	if (r32)
941 		bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
942 }
943 
944 
945 static void
946 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
947 {
948 	enum bfi_ioc_state ioc_fwstate;
949 	bfa_boolean_t   fwvalid;
950 
951 	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
952 
953 	if (force)
954 		ioc_fwstate = BFI_IOC_UNINIT;
955 
956 	bfa_trc(ioc, ioc_fwstate);
957 
958 	/**
959 	 * check if firmware is valid
960 	 */
961 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
962 			BFA_FALSE : bfa_ioc_fwver_valid(ioc);
963 
964 	if (!fwvalid) {
965 		bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
966 		return;
967 	}
968 
969 	/**
970 	 * If hardware initialization is in progress (initialized by other IOC),
971 	 * just wait for an initialization completion interrupt.
972 	 */
973 	if (ioc_fwstate == BFI_IOC_INITING) {
974 		bfa_trc(ioc, ioc_fwstate);
975 		ioc->cbfn->reset_cbfn(ioc->bfa);
976 		return;
977 	}
978 
979 	/**
980 	 * If IOC function is disabled and firmware version is same,
981 	 * just re-enable IOC.
982 	 */
983 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
984 		bfa_trc(ioc, ioc_fwstate);
985 
986 		/**
987 		 * When using MSI-X any pending firmware ready event should
988 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
989 		 */
990 		bfa_ioc_msgflush(ioc);
991 		ioc->cbfn->reset_cbfn(ioc->bfa);
992 		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
993 		return;
994 	}
995 
996 	/**
997 	 * Initialize the h/w for any other states.
998 	 */
999 	bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1000 }
1001 
1002 static void
1003 bfa_ioc_timeout(void *ioc_arg)
1004 {
1005 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1006 
1007 	bfa_trc(ioc, 0);
1008 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1009 }
1010 
1011 void
1012 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1013 {
1014 	u32       *msgp = (u32 *) ioc_msg;
1015 	u32        i;
1016 
1017 	bfa_trc(ioc, msgp[0]);
1018 	bfa_trc(ioc, len);
1019 
1020 	bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1021 
1022 	/*
1023 	 * first write msg to mailbox registers
1024 	 */
1025 	for (i = 0; i < len / sizeof(u32); i++)
1026 		bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1027 			      bfa_os_wtole(msgp[i]));
1028 
1029 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1030 		bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1031 
1032 	/*
1033 	 * write 1 to mailbox CMD to trigger LPU event
1034 	 */
1035 	bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1036 	(void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1037 }
1038 
1039 static void
1040 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1041 {
1042 	struct bfi_ioc_ctrl_req_s enable_req;
1043 
1044 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1045 		    bfa_ioc_portid(ioc));
1046 	enable_req.ioc_class = ioc->ioc_mc;
1047 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1048 }
1049 
1050 static void
1051 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1052 {
1053 	struct bfi_ioc_ctrl_req_s disable_req;
1054 
1055 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1056 		    bfa_ioc_portid(ioc));
1057 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1058 }
1059 
1060 static void
1061 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1062 {
1063 	struct bfi_ioc_getattr_req_s attr_req;
1064 
1065 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1066 		    bfa_ioc_portid(ioc));
1067 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1068 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1069 }
1070 
1071 static void
1072 bfa_ioc_hb_check(void *cbarg)
1073 {
1074 	struct bfa_ioc_s  *ioc = cbarg;
1075 	u32     hb_count;
1076 
1077 	hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1078 	if (ioc->hb_count == hb_count) {
1079 		bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1080 			hb_count);
1081 		bfa_ioc_recover(ioc);
1082 		return;
1083 	} else {
1084 		ioc->hb_count = hb_count;
1085 	}
1086 
1087 	bfa_ioc_mbox_poll(ioc);
1088 	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1089 			ioc, BFA_IOC_HB_TOV);
1090 }
1091 
1092 static void
1093 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1094 {
1095 	ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1096 	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1097 			BFA_IOC_HB_TOV);
1098 }
1099 
1100 static void
1101 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1102 {
1103 	bfa_timer_stop(&ioc->ioc_timer);
1104 }
1105 
1106 /**
1107  *      Initiate a full firmware download.
1108  */
1109 static void
1110 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1111 		    u32 boot_param)
1112 {
1113 	u32       *fwimg;
1114 	u32        pgnum, pgoff;
1115 	u32        loff = 0;
1116 	u32        chunkno = 0;
1117 	u32        i;
1118 
1119 	/**
1120 	 * Initialize LMEM first before code download
1121 	 */
1122 	bfa_ioc_lmem_init(ioc);
1123 
1124 	/**
1125 	 * Flash based firmware boot
1126 	 */
1127 	bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1128 	if (bfa_ioc_is_optrom(ioc))
1129 		boot_type = BFI_BOOT_TYPE_FLASH;
1130 	fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1131 
1132 
1133 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1134 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1135 
1136 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1137 
1138 	for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1139 
1140 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1141 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1142 			fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1143 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1144 		}
1145 
1146 		/**
1147 		 * write smem
1148 		 */
1149 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1150 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1151 
1152 		loff += sizeof(u32);
1153 
1154 		/**
1155 		 * handle page offset wrap around
1156 		 */
1157 		loff = PSS_SMEM_PGOFF(loff);
1158 		if (loff == 0) {
1159 			pgnum++;
1160 			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1161 		}
1162 	}
1163 
1164 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1165 		      bfa_ioc_smem_pgnum(ioc, 0));
1166 
1167 	/*
1168 	 * Set boot type and boot param at the end.
1169 	 */
1170 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1171 			bfa_os_swap32(boot_type));
1172 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1173 			bfa_os_swap32(boot_param));
1174 }
1175 
1176 static void
1177 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1178 {
1179 	bfa_ioc_hwinit(ioc, force);
1180 }
1181 
1182 /**
1183  * Update BFA configuration from firmware configuration.
1184  */
1185 static void
1186 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1187 {
1188 	struct bfi_ioc_attr_s *attr = ioc->attr;
1189 
1190 	attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1191 	attr->card_type     = bfa_os_ntohl(attr->card_type);
1192 	attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1193 
1194 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1195 }
1196 
1197 /**
1198  * Attach time initialization of mbox logic.
1199  */
1200 static void
1201 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1202 {
1203 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1204 	int             mc;
1205 
1206 	INIT_LIST_HEAD(&mod->cmd_q);
1207 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1208 		mod->mbhdlr[mc].cbfn = NULL;
1209 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1210 	}
1211 }
1212 
1213 /**
1214  * Mbox poll timer -- restarts any pending mailbox requests.
1215  */
1216 static void
1217 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1218 {
1219 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1220 	struct bfa_mbox_cmd_s *cmd;
1221 	u32        stat;
1222 
1223 	/**
1224 	 * If no command pending, do nothing
1225 	 */
1226 	if (list_empty(&mod->cmd_q))
1227 		return;
1228 
1229 	/**
1230 	 * If previous command is not yet fetched by firmware, do nothing
1231 	 */
1232 	stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1233 	if (stat)
1234 		return;
1235 
1236 	/**
1237 	 * Enqueue command to firmware.
1238 	 */
1239 	bfa_q_deq(&mod->cmd_q, &cmd);
1240 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1241 }
1242 
1243 /**
1244  * Cleanup any pending requests.
1245  */
1246 static void
1247 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1248 {
1249 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1250 	struct bfa_mbox_cmd_s *cmd;
1251 
1252 	while (!list_empty(&mod->cmd_q))
1253 		bfa_q_deq(&mod->cmd_q, &cmd);
1254 }
1255 
1256 /**
1257  *  bfa_ioc_public
1258  */
1259 
1260 /**
1261  * Interface used by diag module to do firmware boot with memory test
1262  * as the entry vector.
1263  */
1264 void
1265 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1266 {
1267 	bfa_os_addr_t   rb;
1268 
1269 	bfa_ioc_stats(ioc, ioc_boots);
1270 
1271 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1272 		return;
1273 
1274 	/**
1275 	 * Initialize IOC state of all functions on a chip reset.
1276 	 */
1277 	rb = ioc->pcidev.pci_bar_kva;
1278 	if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1279 		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1280 		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1281 	} else {
1282 		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1283 		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1284 	}
1285 
1286 	bfa_ioc_download_fw(ioc, boot_type, boot_param);
1287 
1288 	/**
1289 	 * Enable interrupts just before starting LPU
1290 	 */
1291 	ioc->cbfn->reset_cbfn(ioc->bfa);
1292 	bfa_ioc_lpu_start(ioc);
1293 }
1294 
1295 /**
1296  * Enable/disable IOC failure auto recovery.
1297  */
1298 void
1299 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1300 {
1301 	bfa_auto_recover = auto_recover;
1302 }
1303 
1304 
1305 bfa_boolean_t
1306 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1307 {
1308 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1309 }
1310 
1311 void
1312 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1313 {
1314 	u32       *msgp = mbmsg;
1315 	u32        r32;
1316 	int             i;
1317 
1318 	/**
1319 	 * read the MBOX msg
1320 	 */
1321 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1322 	     i++) {
1323 		r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1324 				   i * sizeof(u32));
1325 		msgp[i] = bfa_os_htonl(r32);
1326 	}
1327 
1328 	/**
1329 	 * turn off mailbox interrupt by clearing mailbox status
1330 	 */
1331 	bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1332 	bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1333 }
1334 
1335 void
1336 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1337 {
1338 	union bfi_ioc_i2h_msg_u *msg;
1339 
1340 	msg = (union bfi_ioc_i2h_msg_u *)m;
1341 
1342 	bfa_ioc_stats(ioc, ioc_isrs);
1343 
1344 	switch (msg->mh.msg_id) {
1345 	case BFI_IOC_I2H_HBEAT:
1346 		break;
1347 
1348 	case BFI_IOC_I2H_READY_EVENT:
1349 		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1350 		break;
1351 
1352 	case BFI_IOC_I2H_ENABLE_REPLY:
1353 		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1354 		break;
1355 
1356 	case BFI_IOC_I2H_DISABLE_REPLY:
1357 		bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1358 		break;
1359 
1360 	case BFI_IOC_I2H_GETATTR_REPLY:
1361 		bfa_ioc_getattr_reply(ioc);
1362 		break;
1363 
1364 	default:
1365 		bfa_trc(ioc, msg->mh.msg_id);
1366 		bfa_assert(0);
1367 	}
1368 }
1369 
1370 /**
1371  * IOC attach time initialization and setup.
1372  *
1373  * @param[in]	ioc	memory for IOC
1374  * @param[in]	bfa	driver instance structure
1375  * @param[in]	trcmod	kernel trace module
1376  * @param[in]	aen	kernel aen event module
1377  * @param[in]	logm	kernel logging module
1378  */
1379 void
1380 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1381 	       struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1382 	       struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1383 {
1384 	ioc->bfa = bfa;
1385 	ioc->cbfn = cbfn;
1386 	ioc->timer_mod = timer_mod;
1387 	ioc->trcmod = trcmod;
1388 	ioc->aen = aen;
1389 	ioc->logm = logm;
1390 	ioc->fcmode = BFA_FALSE;
1391 	ioc->pllinit = BFA_FALSE;
1392 	ioc->dbg_fwsave_once = BFA_TRUE;
1393 
1394 	bfa_ioc_mbox_attach(ioc);
1395 	INIT_LIST_HEAD(&ioc->hb_notify_q);
1396 
1397 	bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1398 }
1399 
1400 /**
1401  * Driver detach time IOC cleanup.
1402  */
1403 void
1404 bfa_ioc_detach(struct bfa_ioc_s *ioc)
1405 {
1406 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
1407 }
1408 
1409 /**
1410  * Setup IOC PCI properties.
1411  *
1412  * @param[in]	pcidev	PCI device information for this IOC
1413  */
1414 void
1415 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1416 		 enum bfi_mclass mc)
1417 {
1418 	ioc->ioc_mc = mc;
1419 	ioc->pcidev = *pcidev;
1420 	ioc->ctdev  = bfa_asic_id_ct(ioc->pcidev.device_id);
1421 	ioc->cna = ioc->ctdev && !ioc->fcmode;
1422 
1423 	/**
1424 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1425 	 */
1426 	if (ioc->ctdev)
1427 		bfa_ioc_set_ct_hwif(ioc);
1428 	else
1429 		bfa_ioc_set_cb_hwif(ioc);
1430 
1431 	bfa_ioc_map_port(ioc);
1432 	bfa_ioc_reg_init(ioc);
1433 }
1434 
1435 /**
1436  * Initialize IOC dma memory
1437  *
1438  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
1439  * @param[in]	dm_pa	physical address of IOC dma memory
1440  */
1441 void
1442 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1443 {
1444 	/**
1445 	 * dma memory for firmware attribute
1446 	 */
1447 	ioc->attr_dma.kva = dm_kva;
1448 	ioc->attr_dma.pa = dm_pa;
1449 	ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1450 }
1451 
1452 /**
1453  * Return size of dma memory required.
1454  */
1455 u32
1456 bfa_ioc_meminfo(void)
1457 {
1458 	return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1459 }
1460 
1461 void
1462 bfa_ioc_enable(struct bfa_ioc_s *ioc)
1463 {
1464 	bfa_ioc_stats(ioc, ioc_enables);
1465 	ioc->dbg_fwsave_once = BFA_TRUE;
1466 
1467 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1468 }
1469 
1470 void
1471 bfa_ioc_disable(struct bfa_ioc_s *ioc)
1472 {
1473 	bfa_ioc_stats(ioc, ioc_disables);
1474 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1475 }
1476 
1477 /**
1478  * Returns memory required for saving firmware trace in case of crash.
1479  * Driver must call this interface to allocate memory required for
1480  * automatic saving of firmware trace. Driver should call
1481  * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1482  * trace memory.
1483  */
1484 int
1485 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1486 {
1487 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1488 }
1489 
1490 /**
1491  * Initialize memory for saving firmware trace. Driver must initialize
1492  * trace memory before call bfa_ioc_enable().
1493  */
1494 void
1495 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1496 {
1497 	ioc->dbg_fwsave = dbg_fwsave;
1498 	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1499 }
1500 
1501 u32
1502 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1503 {
1504 	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1505 }
1506 
1507 u32
1508 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1509 {
1510 	return PSS_SMEM_PGOFF(fmaddr);
1511 }
1512 
1513 /**
1514  * Register mailbox message handler functions
1515  *
1516  * @param[in]	ioc		IOC instance
1517  * @param[in]	mcfuncs		message class handler functions
1518  */
1519 void
1520 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1521 {
1522 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1523 	int             mc;
1524 
1525 	for (mc = 0; mc < BFI_MC_MAX; mc++)
1526 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1527 }
1528 
1529 /**
1530  * Register mailbox message handler function, to be called by common modules
1531  */
1532 void
1533 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1534 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1535 {
1536 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1537 
1538 	mod->mbhdlr[mc].cbfn = cbfn;
1539 	mod->mbhdlr[mc].cbarg = cbarg;
1540 }
1541 
1542 /**
1543  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1544  * Responsibility of caller to serialize
1545  *
1546  * @param[in]	ioc	IOC instance
1547  * @param[i]	cmd	Mailbox command
1548  */
1549 void
1550 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1551 {
1552 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1553 	u32        stat;
1554 
1555 	/**
1556 	 * If a previous command is pending, queue new command
1557 	 */
1558 	if (!list_empty(&mod->cmd_q)) {
1559 		list_add_tail(&cmd->qe, &mod->cmd_q);
1560 		return;
1561 	}
1562 
1563 	/**
1564 	 * If mailbox is busy, queue command for poll timer
1565 	 */
1566 	stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1567 	if (stat) {
1568 		list_add_tail(&cmd->qe, &mod->cmd_q);
1569 		return;
1570 	}
1571 
1572 	/**
1573 	 * mailbox is free -- queue command to firmware
1574 	 */
1575 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1576 }
1577 
1578 /**
1579  * Handle mailbox interrupts
1580  */
1581 void
1582 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1583 {
1584 	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1585 	struct bfi_mbmsg_s m;
1586 	int             mc;
1587 
1588 	bfa_ioc_msgget(ioc, &m);
1589 
1590 	/**
1591 	 * Treat IOC message class as special.
1592 	 */
1593 	mc = m.mh.msg_class;
1594 	if (mc == BFI_MC_IOC) {
1595 		bfa_ioc_isr(ioc, &m);
1596 		return;
1597 	}
1598 
1599 	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1600 		return;
1601 
1602 	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1603 }
1604 
1605 void
1606 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1607 {
1608 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1609 }
1610 
1611 void
1612 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1613 {
1614 	ioc->fcmode  = BFA_TRUE;
1615 	ioc->port_id = bfa_ioc_pcifn(ioc);
1616 }
1617 
1618 #ifndef BFA_BIOS_BUILD
1619 
1620 /**
1621  * return true if IOC is disabled
1622  */
1623 bfa_boolean_t
1624 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1625 {
1626 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1627 		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1628 }
1629 
1630 /**
1631  * return true if IOC firmware is different.
1632  */
1633 bfa_boolean_t
1634 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1635 {
1636 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1637 		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1638 		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1639 }
1640 
1641 #define bfa_ioc_state_disabled(__sm)		\
1642 	(((__sm) == BFI_IOC_UNINIT) ||		\
1643 	 ((__sm) == BFI_IOC_INITING) ||		\
1644 	 ((__sm) == BFI_IOC_HWINIT) ||		\
1645 	 ((__sm) == BFI_IOC_DISABLED) ||	\
1646 	 ((__sm) == BFI_IOC_FAIL) ||		\
1647 	 ((__sm) == BFI_IOC_CFG_DISABLED))
1648 
1649 /**
1650  * Check if adapter is disabled -- both IOCs should be in a disabled
1651  * state.
1652  */
1653 bfa_boolean_t
1654 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1655 {
1656 	u32        ioc_state;
1657 	bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
1658 
1659 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1660 		return BFA_FALSE;
1661 
1662 	ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1663 	if (!bfa_ioc_state_disabled(ioc_state))
1664 		return BFA_FALSE;
1665 
1666 	ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1667 	if (!bfa_ioc_state_disabled(ioc_state))
1668 		return BFA_FALSE;
1669 
1670 	return BFA_TRUE;
1671 }
1672 
1673 /**
1674  * Add to IOC heartbeat failure notification queue. To be used by common
1675  * modules such as
1676  */
1677 void
1678 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
1679 			struct bfa_ioc_hbfail_notify_s *notify)
1680 {
1681 	list_add_tail(&notify->qe, &ioc->hb_notify_q);
1682 }
1683 
1684 #define BFA_MFG_NAME "Brocade"
1685 void
1686 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1687 			 struct bfa_adapter_attr_s *ad_attr)
1688 {
1689 	struct bfi_ioc_attr_s *ioc_attr;
1690 
1691 	ioc_attr = ioc->attr;
1692 
1693 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1694 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1695 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1696 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1697 	bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1698 		      sizeof(struct bfa_mfg_vpd_s));
1699 
1700 	ad_attr->nports = bfa_ioc_get_nports(ioc);
1701 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1702 
1703 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1704 	/* For now, model descr uses same model string */
1705 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1706 
1707 	ad_attr->card_type = ioc_attr->card_type;
1708 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1709 
1710 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1711 		ad_attr->prototype = 1;
1712 	else
1713 		ad_attr->prototype = 0;
1714 
1715 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1716 	ad_attr->mac = bfa_ioc_get_mac(ioc);
1717 
1718 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
1719 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1720 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1721 	ad_attr->asic_rev = ioc_attr->asic_rev;
1722 
1723 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1724 
1725 	ad_attr->cna_capable = ioc->cna;
1726 }
1727 
1728 enum bfa_ioc_type_e
1729 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1730 {
1731 	if (!ioc->ctdev || ioc->fcmode)
1732 		return BFA_IOC_TYPE_FC;
1733 	else if (ioc->ioc_mc == BFI_MC_IOCFC)
1734 		return BFA_IOC_TYPE_FCoE;
1735 	else if (ioc->ioc_mc == BFI_MC_LL)
1736 		return BFA_IOC_TYPE_LL;
1737 	else {
1738 		bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1739 		return BFA_IOC_TYPE_LL;
1740 	}
1741 }
1742 
1743 void
1744 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1745 {
1746 	bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1747 	bfa_os_memcpy((void *)serial_num,
1748 			(void *)ioc->attr->brcd_serialnum,
1749 			BFA_ADAPTER_SERIAL_NUM_LEN);
1750 }
1751 
1752 void
1753 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1754 {
1755 	bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1756 	bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1757 }
1758 
1759 void
1760 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1761 {
1762 	bfa_assert(chip_rev);
1763 
1764 	bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1765 
1766 	chip_rev[0] = 'R';
1767 	chip_rev[1] = 'e';
1768 	chip_rev[2] = 'v';
1769 	chip_rev[3] = '-';
1770 	chip_rev[4] = ioc->attr->asic_rev;
1771 	chip_rev[5] = '\0';
1772 }
1773 
1774 void
1775 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1776 {
1777 	bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1778 	bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1779 		BFA_VERSION_LEN);
1780 }
1781 
1782 void
1783 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1784 {
1785 	bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1786 	bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1787 }
1788 
1789 void
1790 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1791 {
1792 	struct bfi_ioc_attr_s   *ioc_attr;
1793 	u8              nports;
1794 	u8              max_speed;
1795 
1796 	bfa_assert(model);
1797 	bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1798 
1799 	ioc_attr = ioc->attr;
1800 
1801 	nports = bfa_ioc_get_nports(ioc);
1802 	max_speed = bfa_ioc_speed_sup(ioc);
1803 
1804 	/**
1805 	 * model name
1806 	 */
1807 	if (max_speed == 10) {
1808 		strcpy(model, "BR-10?0");
1809 		model[5] = '0' + nports;
1810 	} else {
1811 		strcpy(model, "Brocade-??5");
1812 		model[8] = '0' + max_speed;
1813 		model[9] = '0' + nports;
1814 	}
1815 }
1816 
1817 enum bfa_ioc_state
1818 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1819 {
1820 	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1821 }
1822 
1823 void
1824 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1825 {
1826 	bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
1827 
1828 	ioc_attr->state = bfa_ioc_get_state(ioc);
1829 	ioc_attr->port_id = ioc->port_id;
1830 
1831 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1832 
1833 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1834 
1835 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1836 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1837 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1838 }
1839 
1840 /**
1841  *  hal_wwn_public
1842  */
1843 wwn_t
1844 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
1845 {
1846 	union {
1847 		wwn_t           wwn;
1848 		u8         byte[sizeof(wwn_t)];
1849 	}
1850 	w;
1851 
1852 	w.wwn = ioc->attr->mfg_wwn;
1853 
1854 	if (bfa_ioc_portid(ioc) == 1)
1855 		w.byte[7]++;
1856 
1857 	return w.wwn;
1858 }
1859 
1860 wwn_t
1861 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
1862 {
1863 	union {
1864 		wwn_t           wwn;
1865 		u8         byte[sizeof(wwn_t)];
1866 	}
1867 	w;
1868 
1869 	w.wwn = ioc->attr->mfg_wwn;
1870 
1871 	if (bfa_ioc_portid(ioc) == 1)
1872 		w.byte[7]++;
1873 
1874 	w.byte[0] = 0x20;
1875 
1876 	return w.wwn;
1877 }
1878 
1879 u64
1880 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
1881 {
1882 	return ioc->attr->mfg_wwn;
1883 }
1884 
1885 mac_t
1886 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1887 {
1888 	mac_t           mac;
1889 
1890 	mac = ioc->attr->mfg_mac;
1891 	mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1892 
1893 	return mac;
1894 }
1895 
1896 bfa_boolean_t
1897 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1898 {
1899 	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
1900 }
1901 
1902 /**
1903  * Send AEN notification
1904  */
1905 static void
1906 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1907 {
1908 	union bfa_aen_data_u aen_data;
1909 	struct bfa_log_mod_s *logmod = ioc->logm;
1910 	s32         inst_num = 0;
1911 	enum bfa_ioc_type_e ioc_type;
1912 
1913 	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1914 
1915 	memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1916 	memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1917 	ioc_type = bfa_ioc_get_type(ioc);
1918 	switch (ioc_type) {
1919 	case BFA_IOC_TYPE_FC:
1920 		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1921 		break;
1922 	case BFA_IOC_TYPE_FCoE:
1923 		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1924 		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1925 		break;
1926 	case BFA_IOC_TYPE_LL:
1927 		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1928 		break;
1929 	default:
1930 		bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1931 		break;
1932 	}
1933 	aen_data.ioc.ioc_type = ioc_type;
1934 }
1935 
1936 /**
1937  * Retrieve saved firmware trace from a prior IOC failure.
1938  */
1939 bfa_status_t
1940 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1941 {
1942 	int             tlen;
1943 
1944 	if (ioc->dbg_fwsave_len == 0)
1945 		return BFA_STATUS_ENOFSAVE;
1946 
1947 	tlen = *trclen;
1948 	if (tlen > ioc->dbg_fwsave_len)
1949 		tlen = ioc->dbg_fwsave_len;
1950 
1951 	bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
1952 	*trclen = tlen;
1953 	return BFA_STATUS_OK;
1954 }
1955 
1956 /**
1957  * Clear saved firmware trace
1958  */
1959 void
1960 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1961 {
1962 	ioc->dbg_fwsave_once = BFA_TRUE;
1963 }
1964 
1965 /**
1966  * Retrieve saved firmware trace from a prior IOC failure.
1967  */
1968 bfa_status_t
1969 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1970 {
1971 	u32        pgnum;
1972 	u32        loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1973 	int             i, tlen;
1974 	u32       *tbuf = trcdata, r32;
1975 
1976 	bfa_trc(ioc, *trclen);
1977 
1978 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1979 	loff = bfa_ioc_smem_pgoff(ioc, loff);
1980 
1981 	/*
1982 	 *  Hold semaphore to serialize pll init and fwtrc.
1983 	 */
1984 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
1985 		return BFA_STATUS_FAILED;
1986 
1987 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1988 
1989 	tlen = *trclen;
1990 	if (tlen > BFA_DBG_FWTRC_LEN)
1991 		tlen = BFA_DBG_FWTRC_LEN;
1992 	tlen /= sizeof(u32);
1993 
1994 	bfa_trc(ioc, tlen);
1995 
1996 	for (i = 0; i < tlen; i++) {
1997 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1998 		tbuf[i] = bfa_os_ntohl(r32);
1999 		loff += sizeof(u32);
2000 
2001 		/**
2002 		 * handle page offset wrap around
2003 		 */
2004 		loff = PSS_SMEM_PGOFF(loff);
2005 		if (loff == 0) {
2006 			pgnum++;
2007 			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2008 		}
2009 	}
2010 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2011 		      bfa_ioc_smem_pgnum(ioc, 0));
2012 
2013 	/*
2014 	 *  release semaphore.
2015 	 */
2016 	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2017 
2018 	bfa_trc(ioc, pgnum);
2019 
2020 	*trclen = tlen * sizeof(u32);
2021 	return BFA_STATUS_OK;
2022 }
2023 
2024 /**
2025  * Save firmware trace if configured.
2026  */
2027 static void
2028 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2029 {
2030 	int             tlen;
2031 
2032 	if (ioc->dbg_fwsave_len) {
2033 		tlen = ioc->dbg_fwsave_len;
2034 		bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2035 	}
2036 }
2037 
2038 /**
2039  * Firmware failure detected. Start recovery actions.
2040  */
2041 static void
2042 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2043 {
2044 	if (ioc->dbg_fwsave_once) {
2045 		ioc->dbg_fwsave_once = BFA_FALSE;
2046 		bfa_ioc_debug_save(ioc);
2047 	}
2048 
2049 	bfa_ioc_stats(ioc, ioc_hbfails);
2050 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2051 }
2052 
2053 #else
2054 
2055 static void
2056 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2057 {
2058 }
2059 
2060 static void
2061 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2062 {
2063 	bfa_assert(0);
2064 }
2065 
2066 #endif
2067 
2068 
2069