1 /* 2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include <bfa.h> 19 #include <bfa_ioc.h> 20 #include <bfa_fwimg_priv.h> 21 #include <cna/bfa_cna_trcmod.h> 22 #include <cs/bfa_debug.h> 23 #include <bfi/bfi_ioc.h> 24 #include <bfi/bfi_ctreg.h> 25 #include <aen/bfa_aen_ioc.h> 26 #include <aen/bfa_aen.h> 27 #include <log/bfa_log_hal.h> 28 #include <defs/bfa_defs_pci.h> 29 30 BFA_TRC_FILE(CNA, IOC); 31 32 /** 33 * IOC local definitions 34 */ 35 #define BFA_IOC_TOV 2000 /* msecs */ 36 #define BFA_IOC_HWSEM_TOV 500 /* msecs */ 37 #define BFA_IOC_HB_TOV 500 /* msecs */ 38 #define BFA_IOC_HWINIT_MAX 2 39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024) 40 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 41 42 #define bfa_ioc_timer_start(__ioc) \ 43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) 45 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) 46 47 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) 48 #define BFA_DBG_FWTRC_LEN \ 49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ 50 (sizeof(struct bfa_trc_mod_s) - \ 51 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 52 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 53 54 /** 55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 56 */ 57 58 #define bfa_ioc_firmware_lock(__ioc) \ 59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 60 #define bfa_ioc_firmware_unlock(__ioc) \ 61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 62 #define bfa_ioc_fwimg_get_chunk(__ioc, __off) \ 63 ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off)) 64 #define bfa_ioc_fwimg_get_size(__ioc) \ 65 ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc)) 66 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 67 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 68 #define bfa_ioc_notify_hbfail(__ioc) \ 69 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 70 71 bfa_boolean_t bfa_auto_recover = BFA_TRUE; 72 73 /* 74 * forward declarations 75 */ 76 static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, 77 enum bfa_ioc_aen_event event); 78 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 79 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 80 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 81 static void bfa_ioc_timeout(void *ioc); 82 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); 83 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); 84 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); 85 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); 86 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); 87 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); 88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 89 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 91 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 92 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 93 94 /** 95 * bfa_ioc_sm 96 */ 97 98 /** 99 * IOC state machine events 100 */ 101 enum ioc_event { 102 IOC_E_ENABLE = 1, /* IOC enable request */ 103 IOC_E_DISABLE = 2, /* IOC disable request */ 104 IOC_E_TIMEOUT = 3, /* f/w response timeout */ 105 IOC_E_FWREADY = 4, /* f/w initialization done */ 106 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */ 107 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */ 108 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */ 109 IOC_E_HBFAIL = 8, /* heartbeat failure */ 110 IOC_E_HWERROR = 9, /* hardware error interrupt */ 111 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */ 112 IOC_E_DETACH = 11, /* driver detach cleanup */ 113 }; 114 115 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); 116 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event); 117 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event); 118 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event); 119 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event); 120 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); 121 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); 122 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); 123 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); 124 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event); 125 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); 126 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); 127 128 static struct bfa_sm_table_s ioc_sm_table[] = { 129 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 130 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, 131 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, 132 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, 133 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, 134 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, 135 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 136 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 137 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 138 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, 139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 141 }; 142 143 /** 144 * Reset entry actions -- initialize state machine 145 */ 146 static void 147 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) 148 { 149 ioc->retry_count = 0; 150 ioc->auto_recover = bfa_auto_recover; 151 } 152 153 /** 154 * Beginning state. IOC is in reset state. 155 */ 156 static void 157 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) 158 { 159 bfa_trc(ioc, event); 160 161 switch (event) { 162 case IOC_E_ENABLE: 163 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 164 break; 165 166 case IOC_E_DISABLE: 167 bfa_ioc_disable_comp(ioc); 168 break; 169 170 case IOC_E_DETACH: 171 break; 172 173 default: 174 bfa_sm_fault(ioc, event); 175 } 176 } 177 178 /** 179 * Semaphore should be acquired for version check. 180 */ 181 static void 182 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc) 183 { 184 bfa_ioc_hw_sem_get(ioc); 185 } 186 187 /** 188 * Awaiting h/w semaphore to continue with version check. 189 */ 190 static void 191 bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) 192 { 193 bfa_trc(ioc, event); 194 195 switch (event) { 196 case IOC_E_SEMLOCKED: 197 if (bfa_ioc_firmware_lock(ioc)) { 198 ioc->retry_count = 0; 199 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 200 } else { 201 bfa_ioc_hw_sem_release(ioc); 202 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 203 } 204 break; 205 206 case IOC_E_DISABLE: 207 bfa_ioc_disable_comp(ioc); 208 /* 209 * fall through 210 */ 211 212 case IOC_E_DETACH: 213 bfa_ioc_hw_sem_get_cancel(ioc); 214 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 215 break; 216 217 case IOC_E_FWREADY: 218 break; 219 220 default: 221 bfa_sm_fault(ioc, event); 222 } 223 } 224 225 /** 226 * Notify enable completion callback and generate mismatch AEN. 227 */ 228 static void 229 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc) 230 { 231 /** 232 * Provide enable completion callback and AEN notification only once. 233 */ 234 if (ioc->retry_count == 0) { 235 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 236 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); 237 } 238 ioc->retry_count++; 239 bfa_ioc_timer_start(ioc); 240 } 241 242 /** 243 * Awaiting firmware version match. 244 */ 245 static void 246 bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) 247 { 248 bfa_trc(ioc, event); 249 250 switch (event) { 251 case IOC_E_TIMEOUT: 252 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 253 break; 254 255 case IOC_E_DISABLE: 256 bfa_ioc_disable_comp(ioc); 257 /* 258 * fall through 259 */ 260 261 case IOC_E_DETACH: 262 bfa_ioc_timer_stop(ioc); 263 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 264 break; 265 266 case IOC_E_FWREADY: 267 break; 268 269 default: 270 bfa_sm_fault(ioc, event); 271 } 272 } 273 274 /** 275 * Request for semaphore. 276 */ 277 static void 278 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc) 279 { 280 bfa_ioc_hw_sem_get(ioc); 281 } 282 283 /** 284 * Awaiting semaphore for h/w initialzation. 285 */ 286 static void 287 bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) 288 { 289 bfa_trc(ioc, event); 290 291 switch (event) { 292 case IOC_E_SEMLOCKED: 293 ioc->retry_count = 0; 294 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 295 break; 296 297 case IOC_E_DISABLE: 298 bfa_ioc_hw_sem_get_cancel(ioc); 299 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 300 break; 301 302 default: 303 bfa_sm_fault(ioc, event); 304 } 305 } 306 307 308 static void 309 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc) 310 { 311 bfa_ioc_timer_start(ioc); 312 bfa_ioc_reset(ioc, BFA_FALSE); 313 } 314 315 /** 316 * Hardware is being initialized. Interrupts are enabled. 317 * Holding hardware semaphore lock. 318 */ 319 static void 320 bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) 321 { 322 bfa_trc(ioc, event); 323 324 switch (event) { 325 case IOC_E_FWREADY: 326 bfa_ioc_timer_stop(ioc); 327 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 328 break; 329 330 case IOC_E_HWERROR: 331 bfa_ioc_timer_stop(ioc); 332 /* 333 * fall through 334 */ 335 336 case IOC_E_TIMEOUT: 337 ioc->retry_count++; 338 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 339 bfa_ioc_timer_start(ioc); 340 bfa_ioc_reset(ioc, BFA_TRUE); 341 break; 342 } 343 344 bfa_ioc_hw_sem_release(ioc); 345 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 346 break; 347 348 case IOC_E_DISABLE: 349 bfa_ioc_hw_sem_release(ioc); 350 bfa_ioc_timer_stop(ioc); 351 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 352 break; 353 354 default: 355 bfa_sm_fault(ioc, event); 356 } 357 } 358 359 360 static void 361 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) 362 { 363 bfa_ioc_timer_start(ioc); 364 bfa_ioc_send_enable(ioc); 365 } 366 367 /** 368 * Host IOC function is being enabled, awaiting response from firmware. 369 * Semaphore is acquired. 370 */ 371 static void 372 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) 373 { 374 bfa_trc(ioc, event); 375 376 switch (event) { 377 case IOC_E_FWRSP_ENABLE: 378 bfa_ioc_timer_stop(ioc); 379 bfa_ioc_hw_sem_release(ioc); 380 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 381 break; 382 383 case IOC_E_HWERROR: 384 bfa_ioc_timer_stop(ioc); 385 /* 386 * fall through 387 */ 388 389 case IOC_E_TIMEOUT: 390 ioc->retry_count++; 391 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 392 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, 393 BFI_IOC_UNINIT); 394 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 395 break; 396 } 397 398 bfa_ioc_hw_sem_release(ioc); 399 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 400 break; 401 402 case IOC_E_DISABLE: 403 bfa_ioc_timer_stop(ioc); 404 bfa_ioc_hw_sem_release(ioc); 405 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 406 break; 407 408 case IOC_E_FWREADY: 409 bfa_ioc_send_enable(ioc); 410 break; 411 412 default: 413 bfa_sm_fault(ioc, event); 414 } 415 } 416 417 418 static void 419 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) 420 { 421 bfa_ioc_timer_start(ioc); 422 bfa_ioc_send_getattr(ioc); 423 } 424 425 /** 426 * IOC configuration in progress. Timer is active. 427 */ 428 static void 429 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) 430 { 431 bfa_trc(ioc, event); 432 433 switch (event) { 434 case IOC_E_FWRSP_GETATTR: 435 bfa_ioc_timer_stop(ioc); 436 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 437 break; 438 439 case IOC_E_HWERROR: 440 bfa_ioc_timer_stop(ioc); 441 /* 442 * fall through 443 */ 444 445 case IOC_E_TIMEOUT: 446 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 447 break; 448 449 case IOC_E_DISABLE: 450 bfa_ioc_timer_stop(ioc); 451 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 452 break; 453 454 default: 455 bfa_sm_fault(ioc, event); 456 } 457 } 458 459 460 static void 461 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) 462 { 463 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 464 bfa_ioc_hb_monitor(ioc); 465 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); 466 } 467 468 static void 469 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) 470 { 471 bfa_trc(ioc, event); 472 473 switch (event) { 474 case IOC_E_ENABLE: 475 break; 476 477 case IOC_E_DISABLE: 478 bfa_ioc_hb_stop(ioc); 479 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 480 break; 481 482 case IOC_E_HWERROR: 483 case IOC_E_FWREADY: 484 /** 485 * Hard error or IOC recovery by other function. 486 * Treat it same as heartbeat failure. 487 */ 488 bfa_ioc_hb_stop(ioc); 489 /* 490 * !!! fall through !!! 491 */ 492 493 case IOC_E_HBFAIL: 494 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); 495 break; 496 497 default: 498 bfa_sm_fault(ioc, event); 499 } 500 } 501 502 503 static void 504 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) 505 { 506 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); 507 bfa_ioc_timer_start(ioc); 508 bfa_ioc_send_disable(ioc); 509 } 510 511 /** 512 * IOC is being disabled 513 */ 514 static void 515 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) 516 { 517 bfa_trc(ioc, event); 518 519 switch (event) { 520 case IOC_E_FWRSP_DISABLE: 521 bfa_ioc_timer_stop(ioc); 522 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 523 break; 524 525 case IOC_E_HWERROR: 526 bfa_ioc_timer_stop(ioc); 527 /* 528 * !!! fall through !!! 529 */ 530 531 case IOC_E_TIMEOUT: 532 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 533 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 534 break; 535 536 default: 537 bfa_sm_fault(ioc, event); 538 } 539 } 540 541 /** 542 * IOC disable completion entry. 543 */ 544 static void 545 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) 546 { 547 bfa_ioc_disable_comp(ioc); 548 } 549 550 static void 551 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) 552 { 553 bfa_trc(ioc, event); 554 555 switch (event) { 556 case IOC_E_ENABLE: 557 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 558 break; 559 560 case IOC_E_DISABLE: 561 ioc->cbfn->disable_cbfn(ioc->bfa); 562 break; 563 564 case IOC_E_FWREADY: 565 break; 566 567 case IOC_E_DETACH: 568 bfa_ioc_firmware_unlock(ioc); 569 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 570 break; 571 572 default: 573 bfa_sm_fault(ioc, event); 574 } 575 } 576 577 578 static void 579 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) 580 { 581 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 582 bfa_ioc_timer_start(ioc); 583 } 584 585 /** 586 * Hardware initialization failed. 587 */ 588 static void 589 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) 590 { 591 bfa_trc(ioc, event); 592 593 switch (event) { 594 case IOC_E_DISABLE: 595 bfa_ioc_timer_stop(ioc); 596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 597 break; 598 599 case IOC_E_DETACH: 600 bfa_ioc_timer_stop(ioc); 601 bfa_ioc_firmware_unlock(ioc); 602 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 603 break; 604 605 case IOC_E_TIMEOUT: 606 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 607 break; 608 609 default: 610 bfa_sm_fault(ioc, event); 611 } 612 } 613 614 615 static void 616 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) 617 { 618 struct list_head *qe; 619 struct bfa_ioc_hbfail_notify_s *notify; 620 621 /** 622 * Mark IOC as failed in hardware and stop firmware. 623 */ 624 bfa_ioc_lpu_stop(ioc); 625 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 626 627 /** 628 * Notify other functions on HB failure. 629 */ 630 bfa_ioc_notify_hbfail(ioc); 631 632 /** 633 * Notify driver and common modules registered for notification. 634 */ 635 ioc->cbfn->hbfail_cbfn(ioc->bfa); 636 list_for_each(qe, &ioc->hb_notify_q) { 637 notify = (struct bfa_ioc_hbfail_notify_s *)qe; 638 notify->cbfn(notify->cbarg); 639 } 640 641 /** 642 * Flush any queued up mailbox requests. 643 */ 644 bfa_ioc_mbox_hbfail(ioc); 645 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); 646 647 /** 648 * Trigger auto-recovery after a delay. 649 */ 650 if (ioc->auto_recover) { 651 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, 652 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER); 653 } 654 } 655 656 /** 657 * IOC heartbeat failure. 658 */ 659 static void 660 bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) 661 { 662 bfa_trc(ioc, event); 663 664 switch (event) { 665 666 case IOC_E_ENABLE: 667 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 668 break; 669 670 case IOC_E_DISABLE: 671 if (ioc->auto_recover) 672 bfa_ioc_timer_stop(ioc); 673 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 674 break; 675 676 case IOC_E_TIMEOUT: 677 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 678 break; 679 680 case IOC_E_FWREADY: 681 /** 682 * Recovery is already initiated by other function. 683 */ 684 break; 685 686 case IOC_E_HWERROR: 687 /* 688 * HB failure notification, ignore. 689 */ 690 break; 691 692 default: 693 bfa_sm_fault(ioc, event); 694 } 695 } 696 697 698 699 /** 700 * bfa_ioc_pvt BFA IOC private functions 701 */ 702 703 static void 704 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) 705 { 706 struct list_head *qe; 707 struct bfa_ioc_hbfail_notify_s *notify; 708 709 ioc->cbfn->disable_cbfn(ioc->bfa); 710 711 /** 712 * Notify common modules registered for notification. 713 */ 714 list_for_each(qe, &ioc->hb_notify_q) { 715 notify = (struct bfa_ioc_hbfail_notify_s *)qe; 716 notify->cbfn(notify->cbarg); 717 } 718 } 719 720 void 721 bfa_ioc_sem_timeout(void *ioc_arg) 722 { 723 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; 724 725 bfa_ioc_hw_sem_get(ioc); 726 } 727 728 bfa_boolean_t 729 bfa_ioc_sem_get(bfa_os_addr_t sem_reg) 730 { 731 u32 r32; 732 int cnt = 0; 733 #define BFA_SEM_SPINCNT 3000 734 735 r32 = bfa_reg_read(sem_reg); 736 737 while (r32 && (cnt < BFA_SEM_SPINCNT)) { 738 cnt++; 739 bfa_os_udelay(2); 740 r32 = bfa_reg_read(sem_reg); 741 } 742 743 if (r32 == 0) 744 return BFA_TRUE; 745 746 bfa_assert(cnt < BFA_SEM_SPINCNT); 747 return BFA_FALSE; 748 } 749 750 void 751 bfa_ioc_sem_release(bfa_os_addr_t sem_reg) 752 { 753 bfa_reg_write(sem_reg, 1); 754 } 755 756 static void 757 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) 758 { 759 u32 r32; 760 761 /** 762 * First read to the semaphore register will return 0, subsequent reads 763 * will return 1. Semaphore is released by writing 1 to the register 764 */ 765 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 766 if (r32 == 0) { 767 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); 768 return; 769 } 770 771 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, 772 ioc, BFA_IOC_HWSEM_TOV); 773 } 774 775 void 776 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) 777 { 778 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); 779 } 780 781 static void 782 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) 783 { 784 bfa_timer_stop(&ioc->sem_timer); 785 } 786 787 /** 788 * Initialize LPU local memory (aka secondary memory / SRAM) 789 */ 790 static void 791 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) 792 { 793 u32 pss_ctl; 794 int i; 795 #define PSS_LMEM_INIT_TIME 10000 796 797 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 798 pss_ctl &= ~__PSS_LMEM_RESET; 799 pss_ctl |= __PSS_LMEM_INIT_EN; 800 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */ 801 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 802 803 /** 804 * wait for memory initialization to be complete 805 */ 806 i = 0; 807 do { 808 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 809 i++; 810 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 811 812 /** 813 * If memory initialization is not successful, IOC timeout will catch 814 * such failures. 815 */ 816 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE); 817 bfa_trc(ioc, pss_ctl); 818 819 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 820 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 821 } 822 823 static void 824 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) 825 { 826 u32 pss_ctl; 827 828 /** 829 * Take processor out of reset. 830 */ 831 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 832 pss_ctl &= ~__PSS_LPU0_RESET; 833 834 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 835 } 836 837 static void 838 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) 839 { 840 u32 pss_ctl; 841 842 /** 843 * Put processors in reset. 844 */ 845 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 846 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 847 848 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 849 } 850 851 /** 852 * Get driver and firmware versions. 853 */ 854 void 855 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 856 { 857 u32 pgnum, pgoff; 858 u32 loff = 0; 859 int i; 860 u32 *fwsig = (u32 *) fwhdr; 861 862 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 863 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 864 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 865 866 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); 867 i++) { 868 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 869 loff += sizeof(u32); 870 } 871 } 872 873 /** 874 * Returns TRUE if same. 875 */ 876 bfa_boolean_t 877 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 878 { 879 struct bfi_ioc_image_hdr_s *drv_fwhdr; 880 int i; 881 882 drv_fwhdr = 883 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0); 884 885 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 886 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { 887 bfa_trc(ioc, i); 888 bfa_trc(ioc, fwhdr->md5sum[i]); 889 bfa_trc(ioc, drv_fwhdr->md5sum[i]); 890 return BFA_FALSE; 891 } 892 } 893 894 bfa_trc(ioc, fwhdr->md5sum[0]); 895 return BFA_TRUE; 896 } 897 898 /** 899 * Return true if current running version is valid. Firmware signature and 900 * execution context (driver/bios) must match. 901 */ 902 static bfa_boolean_t 903 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) 904 { 905 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 906 907 /** 908 * If bios/efi boot (flash based) -- return true 909 */ 910 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 911 return BFA_TRUE; 912 913 bfa_ioc_fwver_get(ioc, &fwhdr); 914 drv_fwhdr = 915 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0); 916 917 if (fwhdr.signature != drv_fwhdr->signature) { 918 bfa_trc(ioc, fwhdr.signature); 919 bfa_trc(ioc, drv_fwhdr->signature); 920 return BFA_FALSE; 921 } 922 923 if (fwhdr.exec != drv_fwhdr->exec) { 924 bfa_trc(ioc, fwhdr.exec); 925 bfa_trc(ioc, drv_fwhdr->exec); 926 return BFA_FALSE; 927 } 928 929 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 930 } 931 932 /** 933 * Conditionally flush any pending message from firmware at start. 934 */ 935 static void 936 bfa_ioc_msgflush(struct bfa_ioc_s *ioc) 937 { 938 u32 r32; 939 940 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 941 if (r32) 942 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); 943 } 944 945 946 static void 947 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) 948 { 949 enum bfi_ioc_state ioc_fwstate; 950 bfa_boolean_t fwvalid; 951 952 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 953 954 if (force) 955 ioc_fwstate = BFI_IOC_UNINIT; 956 957 bfa_trc(ioc, ioc_fwstate); 958 959 /** 960 * check if firmware is valid 961 */ 962 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 963 BFA_FALSE : bfa_ioc_fwver_valid(ioc); 964 965 if (!fwvalid) { 966 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 967 return; 968 } 969 970 /** 971 * If hardware initialization is in progress (initialized by other IOC), 972 * just wait for an initialization completion interrupt. 973 */ 974 if (ioc_fwstate == BFI_IOC_INITING) { 975 bfa_trc(ioc, ioc_fwstate); 976 ioc->cbfn->reset_cbfn(ioc->bfa); 977 return; 978 } 979 980 /** 981 * If IOC function is disabled and firmware version is same, 982 * just re-enable IOC. 983 */ 984 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 985 bfa_trc(ioc, ioc_fwstate); 986 987 /** 988 * When using MSI-X any pending firmware ready event should 989 * be flushed. Otherwise MSI-X interrupts are not delivered. 990 */ 991 bfa_ioc_msgflush(ioc); 992 ioc->cbfn->reset_cbfn(ioc->bfa); 993 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 994 return; 995 } 996 997 /** 998 * Initialize the h/w for any other states. 999 */ 1000 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1001 } 1002 1003 static void 1004 bfa_ioc_timeout(void *ioc_arg) 1005 { 1006 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; 1007 1008 bfa_trc(ioc, 0); 1009 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1010 } 1011 1012 void 1013 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) 1014 { 1015 u32 *msgp = (u32 *) ioc_msg; 1016 u32 i; 1017 1018 bfa_trc(ioc, msgp[0]); 1019 bfa_trc(ioc, len); 1020 1021 bfa_assert(len <= BFI_IOC_MSGLEN_MAX); 1022 1023 /* 1024 * first write msg to mailbox registers 1025 */ 1026 for (i = 0; i < len / sizeof(u32); i++) 1027 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 1028 bfa_os_wtole(msgp[i])); 1029 1030 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1031 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0); 1032 1033 /* 1034 * write 1 to mailbox CMD to trigger LPU event 1035 */ 1036 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); 1037 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1038 } 1039 1040 static void 1041 bfa_ioc_send_enable(struct bfa_ioc_s *ioc) 1042 { 1043 struct bfi_ioc_ctrl_req_s enable_req; 1044 1045 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1046 bfa_ioc_portid(ioc)); 1047 enable_req.ioc_class = ioc->ioc_mc; 1048 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1049 } 1050 1051 static void 1052 bfa_ioc_send_disable(struct bfa_ioc_s *ioc) 1053 { 1054 struct bfi_ioc_ctrl_req_s disable_req; 1055 1056 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1057 bfa_ioc_portid(ioc)); 1058 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1059 } 1060 1061 static void 1062 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) 1063 { 1064 struct bfi_ioc_getattr_req_s attr_req; 1065 1066 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1067 bfa_ioc_portid(ioc)); 1068 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); 1069 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); 1070 } 1071 1072 static void 1073 bfa_ioc_hb_check(void *cbarg) 1074 { 1075 struct bfa_ioc_s *ioc = cbarg; 1076 u32 hb_count; 1077 1078 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1079 if (ioc->hb_count == hb_count) { 1080 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, 1081 hb_count); 1082 bfa_ioc_recover(ioc); 1083 return; 1084 } else { 1085 ioc->hb_count = hb_count; 1086 } 1087 1088 bfa_ioc_mbox_poll(ioc); 1089 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, 1090 ioc, BFA_IOC_HB_TOV); 1091 } 1092 1093 static void 1094 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1095 { 1096 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1097 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1098 BFA_IOC_HB_TOV); 1099 } 1100 1101 static void 1102 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) 1103 { 1104 bfa_timer_stop(&ioc->ioc_timer); 1105 } 1106 1107 /** 1108 * Initiate a full firmware download. 1109 */ 1110 static void 1111 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, 1112 u32 boot_param) 1113 { 1114 u32 *fwimg; 1115 u32 pgnum, pgoff; 1116 u32 loff = 0; 1117 u32 chunkno = 0; 1118 u32 i; 1119 1120 /** 1121 * Initialize LMEM first before code download 1122 */ 1123 bfa_ioc_lmem_init(ioc); 1124 1125 /** 1126 * Flash based firmware boot 1127 */ 1128 bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc)); 1129 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 1130 boot_type = BFI_BOOT_TYPE_FLASH; 1131 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); 1132 1133 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1134 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1135 1136 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1137 1138 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { 1139 1140 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1141 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1142 fwimg = bfa_ioc_fwimg_get_chunk(ioc, 1143 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1144 } 1145 1146 /** 1147 * write smem 1148 */ 1149 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1150 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); 1151 1152 loff += sizeof(u32); 1153 1154 /** 1155 * handle page offset wrap around 1156 */ 1157 loff = PSS_SMEM_PGOFF(loff); 1158 if (loff == 0) { 1159 pgnum++; 1160 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1161 } 1162 } 1163 1164 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1165 bfa_ioc_smem_pgnum(ioc, 0)); 1166 1167 /* 1168 * Set boot type and boot param at the end. 1169 */ 1170 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1171 bfa_os_swap32(boot_type)); 1172 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, 1173 bfa_os_swap32(boot_param)); 1174 } 1175 1176 static void 1177 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) 1178 { 1179 bfa_ioc_hwinit(ioc, force); 1180 } 1181 1182 /** 1183 * Update BFA configuration from firmware configuration. 1184 */ 1185 static void 1186 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) 1187 { 1188 struct bfi_ioc_attr_s *attr = ioc->attr; 1189 1190 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); 1191 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); 1192 1193 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1194 } 1195 1196 /** 1197 * Attach time initialization of mbox logic. 1198 */ 1199 static void 1200 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) 1201 { 1202 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1203 int mc; 1204 1205 INIT_LIST_HEAD(&mod->cmd_q); 1206 for (mc = 0; mc < BFI_MC_MAX; mc++) { 1207 mod->mbhdlr[mc].cbfn = NULL; 1208 mod->mbhdlr[mc].cbarg = ioc->bfa; 1209 } 1210 } 1211 1212 /** 1213 * Mbox poll timer -- restarts any pending mailbox requests. 1214 */ 1215 static void 1216 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) 1217 { 1218 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1219 struct bfa_mbox_cmd_s *cmd; 1220 u32 stat; 1221 1222 /** 1223 * If no command pending, do nothing 1224 */ 1225 if (list_empty(&mod->cmd_q)) 1226 return; 1227 1228 /** 1229 * If previous command is not yet fetched by firmware, do nothing 1230 */ 1231 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1232 if (stat) 1233 return; 1234 1235 /** 1236 * Enqueue command to firmware. 1237 */ 1238 bfa_q_deq(&mod->cmd_q, &cmd); 1239 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1240 } 1241 1242 /** 1243 * Cleanup any pending requests. 1244 */ 1245 static void 1246 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) 1247 { 1248 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1249 struct bfa_mbox_cmd_s *cmd; 1250 1251 while (!list_empty(&mod->cmd_q)) 1252 bfa_q_deq(&mod->cmd_q, &cmd); 1253 } 1254 1255 /** 1256 * bfa_ioc_public 1257 */ 1258 1259 /** 1260 * Interface used by diag module to do firmware boot with memory test 1261 * as the entry vector. 1262 */ 1263 void 1264 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) 1265 { 1266 bfa_os_addr_t rb; 1267 1268 bfa_ioc_stats(ioc, ioc_boots); 1269 1270 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1271 return; 1272 1273 /** 1274 * Initialize IOC state of all functions on a chip reset. 1275 */ 1276 rb = ioc->pcidev.pci_bar_kva; 1277 if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1278 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); 1279 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); 1280 } else { 1281 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING); 1282 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING); 1283 } 1284 1285 bfa_ioc_download_fw(ioc, boot_type, boot_param); 1286 1287 /** 1288 * Enable interrupts just before starting LPU 1289 */ 1290 ioc->cbfn->reset_cbfn(ioc->bfa); 1291 bfa_ioc_lpu_start(ioc); 1292 } 1293 1294 /** 1295 * Enable/disable IOC failure auto recovery. 1296 */ 1297 void 1298 bfa_ioc_auto_recover(bfa_boolean_t auto_recover) 1299 { 1300 bfa_auto_recover = auto_recover; 1301 } 1302 1303 1304 bfa_boolean_t 1305 bfa_ioc_is_operational(struct bfa_ioc_s *ioc) 1306 { 1307 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 1308 } 1309 1310 void 1311 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) 1312 { 1313 u32 *msgp = mbmsg; 1314 u32 r32; 1315 int i; 1316 1317 /** 1318 * read the MBOX msg 1319 */ 1320 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 1321 i++) { 1322 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox + 1323 i * sizeof(u32)); 1324 msgp[i] = bfa_os_htonl(r32); 1325 } 1326 1327 /** 1328 * turn off mailbox interrupt by clearing mailbox status 1329 */ 1330 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); 1331 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1332 } 1333 1334 void 1335 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) 1336 { 1337 union bfi_ioc_i2h_msg_u *msg; 1338 1339 msg = (union bfi_ioc_i2h_msg_u *)m; 1340 1341 bfa_ioc_stats(ioc, ioc_isrs); 1342 1343 switch (msg->mh.msg_id) { 1344 case BFI_IOC_I2H_HBEAT: 1345 break; 1346 1347 case BFI_IOC_I2H_READY_EVENT: 1348 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1349 break; 1350 1351 case BFI_IOC_I2H_ENABLE_REPLY: 1352 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); 1353 break; 1354 1355 case BFI_IOC_I2H_DISABLE_REPLY: 1356 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); 1357 break; 1358 1359 case BFI_IOC_I2H_GETATTR_REPLY: 1360 bfa_ioc_getattr_reply(ioc); 1361 break; 1362 1363 default: 1364 bfa_trc(ioc, msg->mh.msg_id); 1365 bfa_assert(0); 1366 } 1367 } 1368 1369 /** 1370 * IOC attach time initialization and setup. 1371 * 1372 * @param[in] ioc memory for IOC 1373 * @param[in] bfa driver instance structure 1374 * @param[in] trcmod kernel trace module 1375 * @param[in] aen kernel aen event module 1376 * @param[in] logm kernel logging module 1377 */ 1378 void 1379 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, 1380 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, 1381 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm) 1382 { 1383 ioc->bfa = bfa; 1384 ioc->cbfn = cbfn; 1385 ioc->timer_mod = timer_mod; 1386 ioc->trcmod = trcmod; 1387 ioc->aen = aen; 1388 ioc->logm = logm; 1389 ioc->fcmode = BFA_FALSE; 1390 ioc->pllinit = BFA_FALSE; 1391 ioc->dbg_fwsave_once = BFA_TRUE; 1392 1393 bfa_ioc_mbox_attach(ioc); 1394 INIT_LIST_HEAD(&ioc->hb_notify_q); 1395 1396 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1397 } 1398 1399 /** 1400 * Driver detach time IOC cleanup. 1401 */ 1402 void 1403 bfa_ioc_detach(struct bfa_ioc_s *ioc) 1404 { 1405 bfa_fsm_send_event(ioc, IOC_E_DETACH); 1406 } 1407 1408 /** 1409 * Setup IOC PCI properties. 1410 * 1411 * @param[in] pcidev PCI device information for this IOC 1412 */ 1413 void 1414 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 1415 enum bfi_mclass mc) 1416 { 1417 ioc->ioc_mc = mc; 1418 ioc->pcidev = *pcidev; 1419 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); 1420 ioc->cna = ioc->ctdev && !ioc->fcmode; 1421 1422 /** 1423 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 1424 */ 1425 if (ioc->ctdev) 1426 bfa_ioc_set_ct_hwif(ioc); 1427 else 1428 bfa_ioc_set_cb_hwif(ioc); 1429 1430 bfa_ioc_map_port(ioc); 1431 bfa_ioc_reg_init(ioc); 1432 } 1433 1434 /** 1435 * Initialize IOC dma memory 1436 * 1437 * @param[in] dm_kva kernel virtual address of IOC dma memory 1438 * @param[in] dm_pa physical address of IOC dma memory 1439 */ 1440 void 1441 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 1442 { 1443 /** 1444 * dma memory for firmware attribute 1445 */ 1446 ioc->attr_dma.kva = dm_kva; 1447 ioc->attr_dma.pa = dm_pa; 1448 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva; 1449 } 1450 1451 /** 1452 * Return size of dma memory required. 1453 */ 1454 u32 1455 bfa_ioc_meminfo(void) 1456 { 1457 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); 1458 } 1459 1460 void 1461 bfa_ioc_enable(struct bfa_ioc_s *ioc) 1462 { 1463 bfa_ioc_stats(ioc, ioc_enables); 1464 ioc->dbg_fwsave_once = BFA_TRUE; 1465 1466 bfa_fsm_send_event(ioc, IOC_E_ENABLE); 1467 } 1468 1469 void 1470 bfa_ioc_disable(struct bfa_ioc_s *ioc) 1471 { 1472 bfa_ioc_stats(ioc, ioc_disables); 1473 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 1474 } 1475 1476 /** 1477 * Returns memory required for saving firmware trace in case of crash. 1478 * Driver must call this interface to allocate memory required for 1479 * automatic saving of firmware trace. Driver should call 1480 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this 1481 * trace memory. 1482 */ 1483 int 1484 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) 1485 { 1486 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 1487 } 1488 1489 /** 1490 * Initialize memory for saving firmware trace. Driver must initialize 1491 * trace memory before call bfa_ioc_enable(). 1492 */ 1493 void 1494 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 1495 { 1496 ioc->dbg_fwsave = dbg_fwsave; 1497 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); 1498 } 1499 1500 u32 1501 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr) 1502 { 1503 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 1504 } 1505 1506 u32 1507 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr) 1508 { 1509 return PSS_SMEM_PGOFF(fmaddr); 1510 } 1511 1512 /** 1513 * Register mailbox message handler functions 1514 * 1515 * @param[in] ioc IOC instance 1516 * @param[in] mcfuncs message class handler functions 1517 */ 1518 void 1519 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) 1520 { 1521 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1522 int mc; 1523 1524 for (mc = 0; mc < BFI_MC_MAX; mc++) 1525 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 1526 } 1527 1528 /** 1529 * Register mailbox message handler function, to be called by common modules 1530 */ 1531 void 1532 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, 1533 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 1534 { 1535 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1536 1537 mod->mbhdlr[mc].cbfn = cbfn; 1538 mod->mbhdlr[mc].cbarg = cbarg; 1539 } 1540 1541 /** 1542 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 1543 * Responsibility of caller to serialize 1544 * 1545 * @param[in] ioc IOC instance 1546 * @param[i] cmd Mailbox command 1547 */ 1548 void 1549 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) 1550 { 1551 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1552 u32 stat; 1553 1554 /** 1555 * If a previous command is pending, queue new command 1556 */ 1557 if (!list_empty(&mod->cmd_q)) { 1558 list_add_tail(&cmd->qe, &mod->cmd_q); 1559 return; 1560 } 1561 1562 /** 1563 * If mailbox is busy, queue command for poll timer 1564 */ 1565 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1566 if (stat) { 1567 list_add_tail(&cmd->qe, &mod->cmd_q); 1568 return; 1569 } 1570 1571 /** 1572 * mailbox is free -- queue command to firmware 1573 */ 1574 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1575 } 1576 1577 /** 1578 * Handle mailbox interrupts 1579 */ 1580 void 1581 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) 1582 { 1583 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1584 struct bfi_mbmsg_s m; 1585 int mc; 1586 1587 bfa_ioc_msgget(ioc, &m); 1588 1589 /** 1590 * Treat IOC message class as special. 1591 */ 1592 mc = m.mh.msg_class; 1593 if (mc == BFI_MC_IOC) { 1594 bfa_ioc_isr(ioc, &m); 1595 return; 1596 } 1597 1598 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 1599 return; 1600 1601 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 1602 } 1603 1604 void 1605 bfa_ioc_error_isr(struct bfa_ioc_s *ioc) 1606 { 1607 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 1608 } 1609 1610 #ifndef BFA_BIOS_BUILD 1611 1612 /** 1613 * return true if IOC is disabled 1614 */ 1615 bfa_boolean_t 1616 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) 1617 { 1618 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) 1619 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 1620 } 1621 1622 /** 1623 * return true if IOC firmware is different. 1624 */ 1625 bfa_boolean_t 1626 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) 1627 { 1628 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) 1629 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) 1630 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); 1631 } 1632 1633 #define bfa_ioc_state_disabled(__sm) \ 1634 (((__sm) == BFI_IOC_UNINIT) || \ 1635 ((__sm) == BFI_IOC_INITING) || \ 1636 ((__sm) == BFI_IOC_HWINIT) || \ 1637 ((__sm) == BFI_IOC_DISABLED) || \ 1638 ((__sm) == BFI_IOC_FAIL) || \ 1639 ((__sm) == BFI_IOC_CFG_DISABLED)) 1640 1641 /** 1642 * Check if adapter is disabled -- both IOCs should be in a disabled 1643 * state. 1644 */ 1645 bfa_boolean_t 1646 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) 1647 { 1648 u32 ioc_state; 1649 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 1650 1651 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 1652 return BFA_FALSE; 1653 1654 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG); 1655 if (!bfa_ioc_state_disabled(ioc_state)) 1656 return BFA_FALSE; 1657 1658 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); 1659 if (!bfa_ioc_state_disabled(ioc_state)) 1660 return BFA_FALSE; 1661 1662 return BFA_TRUE; 1663 } 1664 1665 /** 1666 * Add to IOC heartbeat failure notification queue. To be used by common 1667 * modules such as 1668 */ 1669 void 1670 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 1671 struct bfa_ioc_hbfail_notify_s *notify) 1672 { 1673 list_add_tail(¬ify->qe, &ioc->hb_notify_q); 1674 } 1675 1676 #define BFA_MFG_NAME "Brocade" 1677 void 1678 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 1679 struct bfa_adapter_attr_s *ad_attr) 1680 { 1681 struct bfi_ioc_attr_s *ioc_attr; 1682 1683 ioc_attr = ioc->attr; 1684 1685 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 1686 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 1687 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 1688 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 1689 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, 1690 sizeof(struct bfa_mfg_vpd_s)); 1691 1692 ad_attr->nports = bfa_ioc_get_nports(ioc); 1693 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 1694 1695 bfa_ioc_get_adapter_model(ioc, ad_attr->model); 1696 /* For now, model descr uses same model string */ 1697 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 1698 1699 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 1700 ad_attr->prototype = 1; 1701 else 1702 ad_attr->prototype = 0; 1703 1704 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 1705 ad_attr->mac = bfa_ioc_get_mac(ioc); 1706 1707 ad_attr->pcie_gen = ioc_attr->pcie_gen; 1708 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 1709 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 1710 ad_attr->asic_rev = ioc_attr->asic_rev; 1711 1712 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 1713 1714 ad_attr->cna_capable = ioc->cna; 1715 } 1716 1717 enum bfa_ioc_type_e 1718 bfa_ioc_get_type(struct bfa_ioc_s *ioc) 1719 { 1720 if (!ioc->ctdev || ioc->fcmode) 1721 return BFA_IOC_TYPE_FC; 1722 else if (ioc->ioc_mc == BFI_MC_IOCFC) 1723 return BFA_IOC_TYPE_FCoE; 1724 else if (ioc->ioc_mc == BFI_MC_LL) 1725 return BFA_IOC_TYPE_LL; 1726 else { 1727 bfa_assert(ioc->ioc_mc == BFI_MC_LL); 1728 return BFA_IOC_TYPE_LL; 1729 } 1730 } 1731 1732 void 1733 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) 1734 { 1735 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); 1736 bfa_os_memcpy((void *)serial_num, 1737 (void *)ioc->attr->brcd_serialnum, 1738 BFA_ADAPTER_SERIAL_NUM_LEN); 1739 } 1740 1741 void 1742 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) 1743 { 1744 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); 1745 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 1746 } 1747 1748 void 1749 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) 1750 { 1751 bfa_assert(chip_rev); 1752 1753 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 1754 1755 chip_rev[0] = 'R'; 1756 chip_rev[1] = 'e'; 1757 chip_rev[2] = 'v'; 1758 chip_rev[3] = '-'; 1759 chip_rev[4] = ioc->attr->asic_rev; 1760 chip_rev[5] = '\0'; 1761 } 1762 1763 void 1764 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) 1765 { 1766 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 1767 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, 1768 BFA_VERSION_LEN); 1769 } 1770 1771 void 1772 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) 1773 { 1774 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 1775 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 1776 } 1777 1778 void 1779 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) 1780 { 1781 struct bfi_ioc_attr_s *ioc_attr; 1782 u8 nports; 1783 u8 max_speed; 1784 1785 bfa_assert(model); 1786 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 1787 1788 ioc_attr = ioc->attr; 1789 1790 nports = bfa_ioc_get_nports(ioc); 1791 max_speed = bfa_ioc_speed_sup(ioc); 1792 1793 /** 1794 * model name 1795 */ 1796 if (max_speed == 10) { 1797 strcpy(model, "BR-10?0"); 1798 model[5] = '0' + nports; 1799 } else { 1800 strcpy(model, "Brocade-??5"); 1801 model[8] = '0' + max_speed; 1802 model[9] = '0' + nports; 1803 } 1804 } 1805 1806 enum bfa_ioc_state 1807 bfa_ioc_get_state(struct bfa_ioc_s *ioc) 1808 { 1809 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1810 } 1811 1812 void 1813 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) 1814 { 1815 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 1816 1817 ioc_attr->state = bfa_ioc_get_state(ioc); 1818 ioc_attr->port_id = ioc->port_id; 1819 1820 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 1821 1822 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 1823 1824 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; 1825 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; 1826 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 1827 } 1828 1829 /** 1830 * hal_wwn_public 1831 */ 1832 wwn_t 1833 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) 1834 { 1835 union { 1836 wwn_t wwn; 1837 u8 byte[sizeof(wwn_t)]; 1838 } 1839 w; 1840 1841 w.wwn = ioc->attr->mfg_wwn; 1842 1843 if (bfa_ioc_portid(ioc) == 1) 1844 w.byte[7]++; 1845 1846 return w.wwn; 1847 } 1848 1849 wwn_t 1850 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc) 1851 { 1852 union { 1853 wwn_t wwn; 1854 u8 byte[sizeof(wwn_t)]; 1855 } 1856 w; 1857 1858 w.wwn = ioc->attr->mfg_wwn; 1859 1860 if (bfa_ioc_portid(ioc) == 1) 1861 w.byte[7]++; 1862 1863 w.byte[0] = 0x20; 1864 1865 return w.wwn; 1866 } 1867 1868 wwn_t 1869 bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst) 1870 { 1871 union { 1872 wwn_t wwn; 1873 u8 byte[sizeof(wwn_t)]; 1874 } 1875 w , w5; 1876 1877 bfa_trc(ioc, inst); 1878 1879 w.wwn = ioc->attr->mfg_wwn; 1880 w5.byte[0] = 0x50 | w.byte[2] >> 4; 1881 w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4; 1882 w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4; 1883 w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4; 1884 w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4; 1885 w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4; 1886 w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8; 1887 w5.byte[7] = (inst & 0xff); 1888 1889 return w5.wwn; 1890 } 1891 1892 u64 1893 bfa_ioc_get_adid(struct bfa_ioc_s *ioc) 1894 { 1895 return ioc->attr->mfg_wwn; 1896 } 1897 1898 mac_t 1899 bfa_ioc_get_mac(struct bfa_ioc_s *ioc) 1900 { 1901 mac_t mac; 1902 1903 mac = ioc->attr->mfg_mac; 1904 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); 1905 1906 return mac; 1907 } 1908 1909 void 1910 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc) 1911 { 1912 ioc->fcmode = BFA_TRUE; 1913 ioc->port_id = bfa_ioc_pcifn(ioc); 1914 } 1915 1916 bfa_boolean_t 1917 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) 1918 { 1919 return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT); 1920 } 1921 1922 /** 1923 * Send AEN notification 1924 */ 1925 static void 1926 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) 1927 { 1928 union bfa_aen_data_u aen_data; 1929 struct bfa_log_mod_s *logmod = ioc->logm; 1930 s32 inst_num = 0; 1931 enum bfa_ioc_type_e ioc_type; 1932 1933 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num); 1934 1935 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); 1936 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); 1937 ioc_type = bfa_ioc_get_type(ioc); 1938 switch (ioc_type) { 1939 case BFA_IOC_TYPE_FC: 1940 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); 1941 break; 1942 case BFA_IOC_TYPE_FCoE: 1943 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); 1944 aen_data.ioc.mac = bfa_ioc_get_mac(ioc); 1945 break; 1946 case BFA_IOC_TYPE_LL: 1947 aen_data.ioc.mac = bfa_ioc_get_mac(ioc); 1948 break; 1949 default: 1950 bfa_assert(ioc_type == BFA_IOC_TYPE_FC); 1951 break; 1952 } 1953 aen_data.ioc.ioc_type = ioc_type; 1954 } 1955 1956 /** 1957 * Retrieve saved firmware trace from a prior IOC failure. 1958 */ 1959 bfa_status_t 1960 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) 1961 { 1962 int tlen; 1963 1964 if (ioc->dbg_fwsave_len == 0) 1965 return BFA_STATUS_ENOFSAVE; 1966 1967 tlen = *trclen; 1968 if (tlen > ioc->dbg_fwsave_len) 1969 tlen = ioc->dbg_fwsave_len; 1970 1971 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen); 1972 *trclen = tlen; 1973 return BFA_STATUS_OK; 1974 } 1975 1976 /** 1977 * Clear saved firmware trace 1978 */ 1979 void 1980 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) 1981 { 1982 ioc->dbg_fwsave_once = BFA_TRUE; 1983 } 1984 1985 /** 1986 * Retrieve saved firmware trace from a prior IOC failure. 1987 */ 1988 bfa_status_t 1989 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) 1990 { 1991 u32 pgnum; 1992 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); 1993 int i, tlen; 1994 u32 *tbuf = trcdata, r32; 1995 1996 bfa_trc(ioc, *trclen); 1997 1998 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1999 loff = bfa_ioc_smem_pgoff(ioc, loff); 2000 2001 /* 2002 * Hold semaphore to serialize pll init and fwtrc. 2003 */ 2004 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) 2005 return BFA_STATUS_FAILED; 2006 2007 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2008 2009 tlen = *trclen; 2010 if (tlen > BFA_DBG_FWTRC_LEN) 2011 tlen = BFA_DBG_FWTRC_LEN; 2012 tlen /= sizeof(u32); 2013 2014 bfa_trc(ioc, tlen); 2015 2016 for (i = 0; i < tlen; i++) { 2017 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 2018 tbuf[i] = bfa_os_ntohl(r32); 2019 loff += sizeof(u32); 2020 2021 /** 2022 * handle page offset wrap around 2023 */ 2024 loff = PSS_SMEM_PGOFF(loff); 2025 if (loff == 0) { 2026 pgnum++; 2027 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2028 } 2029 } 2030 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 2031 bfa_ioc_smem_pgnum(ioc, 0)); 2032 2033 /* 2034 * release semaphore. 2035 */ 2036 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2037 2038 bfa_trc(ioc, pgnum); 2039 2040 *trclen = tlen * sizeof(u32); 2041 return BFA_STATUS_OK; 2042 } 2043 2044 /** 2045 * Save firmware trace if configured. 2046 */ 2047 static void 2048 bfa_ioc_debug_save(struct bfa_ioc_s *ioc) 2049 { 2050 int tlen; 2051 2052 if (ioc->dbg_fwsave_len) { 2053 tlen = ioc->dbg_fwsave_len; 2054 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2055 } 2056 } 2057 2058 /** 2059 * Firmware failure detected. Start recovery actions. 2060 */ 2061 static void 2062 bfa_ioc_recover(struct bfa_ioc_s *ioc) 2063 { 2064 if (ioc->dbg_fwsave_once) { 2065 ioc->dbg_fwsave_once = BFA_FALSE; 2066 bfa_ioc_debug_save(ioc); 2067 } 2068 2069 bfa_ioc_stats(ioc, ioc_hbfails); 2070 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2071 } 2072 2073 #else 2074 2075 static void 2076 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) 2077 { 2078 } 2079 2080 static void 2081 bfa_ioc_recover(struct bfa_ioc_s *ioc) 2082 { 2083 bfa_assert(0); 2084 } 2085 2086 #endif 2087 2088 2089