1 /* 2 * Linux network driver for Brocade Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 15 * All rights reserved 16 * www.brocade.com 17 */ 18 19 #include "bfa_ioc.h" 20 #include "bfi_reg.h" 21 #include "bfa_defs.h" 22 23 /* IOC local definitions */ 24 25 #define bfa_ioc_state_disabled(__sm) \ 26 (((__sm) == BFI_IOC_UNINIT) || \ 27 ((__sm) == BFI_IOC_INITING) || \ 28 ((__sm) == BFI_IOC_HWINIT) || \ 29 ((__sm) == BFI_IOC_DISABLED) || \ 30 ((__sm) == BFI_IOC_FAIL) || \ 31 ((__sm) == BFI_IOC_CFG_DISABLED)) 32 33 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ 34 35 #define bfa_ioc_firmware_lock(__ioc) \ 36 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 37 #define bfa_ioc_firmware_unlock(__ioc) \ 38 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 39 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 40 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 41 #define bfa_ioc_notify_fail(__ioc) \ 42 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 43 #define bfa_ioc_sync_start(__ioc) \ 44 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 45 #define bfa_ioc_sync_join(__ioc) \ 46 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 47 #define bfa_ioc_sync_leave(__ioc) \ 48 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) 49 #define bfa_ioc_sync_ack(__ioc) \ 50 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 51 #define bfa_ioc_sync_complete(__ioc) \ 52 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 53 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ 54 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) 55 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ 56 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) 57 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ 58 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) 59 #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ 60 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) 61 62 #define bfa_ioc_mbox_cmd_pending(__ioc) \ 63 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 64 readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 65 66 static bool bfa_nw_auto_recover = true; 67 68 /* 69 * forward declarations 70 */ 71 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); 72 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); 73 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); 74 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); 75 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); 76 static void bfa_ioc_send_enable(struct bfa_ioc *ioc); 77 static void bfa_ioc_send_disable(struct bfa_ioc *ioc); 78 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); 79 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); 80 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); 81 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); 82 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 83 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 84 static void bfa_ioc_recover(struct bfa_ioc *ioc); 85 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 86 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 87 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 88 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); 89 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 90 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 91 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 92 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 93 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 94 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 95 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, 96 enum bfi_fwboot_type boot_type, u32 boot_param); 97 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 98 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 99 char *serial_num); 100 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 101 char *fw_ver); 102 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, 103 char *chip_rev); 104 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, 105 char *optrom_ver); 106 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 107 char *manufacturer); 108 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 109 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 110 111 /* IOC state machine definitions/declarations */ 112 enum ioc_event { 113 IOC_E_RESET = 1, /*!< IOC reset request */ 114 IOC_E_ENABLE = 2, /*!< IOC enable request */ 115 IOC_E_DISABLE = 3, /*!< IOC disable request */ 116 IOC_E_DETACH = 4, /*!< driver detach cleanup */ 117 IOC_E_ENABLED = 5, /*!< f/w enabled */ 118 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 119 IOC_E_DISABLED = 7, /*!< f/w disabled */ 120 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ 121 IOC_E_HBFAIL = 9, /*!< heartbeat failure */ 122 IOC_E_HWERROR = 10, /*!< hardware error interrupt */ 123 IOC_E_TIMEOUT = 11, /*!< timeout */ 124 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ 125 }; 126 127 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 128 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 129 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 130 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 131 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 132 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); 133 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 134 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 135 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 136 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); 137 138 static struct bfa_sm_table ioc_sm_table[] = { 139 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 140 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 141 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 142 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 143 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 144 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, 145 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 146 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 147 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 148 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, 149 }; 150 151 /* 152 * Forward declareations for iocpf state machine 153 */ 154 static void bfa_iocpf_enable(struct bfa_ioc *ioc); 155 static void bfa_iocpf_disable(struct bfa_ioc *ioc); 156 static void bfa_iocpf_fail(struct bfa_ioc *ioc); 157 static void bfa_iocpf_initfail(struct bfa_ioc *ioc); 158 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 159 static void bfa_iocpf_stop(struct bfa_ioc *ioc); 160 161 /* IOCPF state machine events */ 162 enum iocpf_event { 163 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 164 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 165 IOCPF_E_STOP = 3, /*!< stop on driver detach */ 166 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ 167 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ 168 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ 169 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ 170 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ 171 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 172 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 173 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 174 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 175 }; 176 177 /* IOCPF states */ 178 enum bfa_iocpf_state { 179 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 180 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 181 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ 182 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ 183 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ 184 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ 185 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ 186 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ 187 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ 188 }; 189 190 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); 191 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); 192 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); 193 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); 194 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); 195 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); 196 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); 197 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, 198 enum iocpf_event); 199 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); 200 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); 201 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); 202 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); 203 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, 204 enum iocpf_event); 205 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); 206 207 static struct bfa_sm_table iocpf_sm_table[] = { 208 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, 209 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, 210 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, 211 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, 212 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 213 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 214 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 215 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, 216 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 217 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, 218 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 219 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 220 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, 221 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 222 }; 223 224 /* IOC State Machine */ 225 226 /* Beginning state. IOC uninit state. */ 227 static void 228 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 229 { 230 } 231 232 /* IOC is in uninit state. */ 233 static void 234 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 235 { 236 switch (event) { 237 case IOC_E_RESET: 238 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 239 break; 240 241 default: 242 bfa_sm_fault(event); 243 } 244 } 245 246 /* Reset entry actions -- initialize state machine */ 247 static void 248 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 249 { 250 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 251 } 252 253 /* IOC is in reset state. */ 254 static void 255 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 256 { 257 switch (event) { 258 case IOC_E_ENABLE: 259 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 260 break; 261 262 case IOC_E_DISABLE: 263 bfa_ioc_disable_comp(ioc); 264 break; 265 266 case IOC_E_DETACH: 267 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 268 break; 269 270 default: 271 bfa_sm_fault(event); 272 } 273 } 274 275 static void 276 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 277 { 278 bfa_iocpf_enable(ioc); 279 } 280 281 /* Host IOC function is being enabled, awaiting response from firmware. 282 * Semaphore is acquired. 283 */ 284 static void 285 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 286 { 287 switch (event) { 288 case IOC_E_ENABLED: 289 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 290 break; 291 292 case IOC_E_PFFAILED: 293 /* !!! fall through !!! */ 294 case IOC_E_HWERROR: 295 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 296 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 297 if (event != IOC_E_PFFAILED) 298 bfa_iocpf_initfail(ioc); 299 break; 300 301 case IOC_E_HWFAILED: 302 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 303 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 304 break; 305 306 case IOC_E_DISABLE: 307 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 308 break; 309 310 case IOC_E_DETACH: 311 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 312 bfa_iocpf_stop(ioc); 313 break; 314 315 case IOC_E_ENABLE: 316 break; 317 318 default: 319 bfa_sm_fault(event); 320 } 321 } 322 323 /* Semaphore should be acquired for version check. */ 324 static void 325 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 326 { 327 mod_timer(&ioc->ioc_timer, jiffies + 328 msecs_to_jiffies(BFA_IOC_TOV)); 329 bfa_ioc_send_getattr(ioc); 330 } 331 332 /* IOC configuration in progress. Timer is active. */ 333 static void 334 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 335 { 336 switch (event) { 337 case IOC_E_FWRSP_GETATTR: 338 del_timer(&ioc->ioc_timer); 339 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 340 break; 341 342 case IOC_E_PFFAILED: 343 case IOC_E_HWERROR: 344 del_timer(&ioc->ioc_timer); 345 /* fall through */ 346 case IOC_E_TIMEOUT: 347 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 348 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 349 if (event != IOC_E_PFFAILED) 350 bfa_iocpf_getattrfail(ioc); 351 break; 352 353 case IOC_E_DISABLE: 354 del_timer(&ioc->ioc_timer); 355 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 356 break; 357 358 case IOC_E_ENABLE: 359 break; 360 361 default: 362 bfa_sm_fault(event); 363 } 364 } 365 366 static void 367 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 368 { 369 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 370 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 371 bfa_ioc_hb_monitor(ioc); 372 } 373 374 static void 375 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 376 { 377 switch (event) { 378 case IOC_E_ENABLE: 379 break; 380 381 case IOC_E_DISABLE: 382 bfa_ioc_hb_stop(ioc); 383 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 384 break; 385 386 case IOC_E_PFFAILED: 387 case IOC_E_HWERROR: 388 bfa_ioc_hb_stop(ioc); 389 /* !!! fall through !!! */ 390 case IOC_E_HBFAIL: 391 if (ioc->iocpf.auto_recover) 392 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 393 else 394 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 395 396 bfa_ioc_fail_notify(ioc); 397 398 if (event != IOC_E_PFFAILED) 399 bfa_iocpf_fail(ioc); 400 break; 401 402 default: 403 bfa_sm_fault(event); 404 } 405 } 406 407 static void 408 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 409 { 410 bfa_iocpf_disable(ioc); 411 } 412 413 /* IOC is being disabled */ 414 static void 415 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 416 { 417 switch (event) { 418 case IOC_E_DISABLED: 419 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 420 break; 421 422 case IOC_E_HWERROR: 423 /* 424 * No state change. Will move to disabled state 425 * after iocpf sm completes failure processing and 426 * moves to disabled state. 427 */ 428 bfa_iocpf_fail(ioc); 429 break; 430 431 case IOC_E_HWFAILED: 432 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 433 bfa_ioc_disable_comp(ioc); 434 break; 435 436 default: 437 bfa_sm_fault(event); 438 } 439 } 440 441 /* IOC disable completion entry. */ 442 static void 443 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 444 { 445 bfa_ioc_disable_comp(ioc); 446 } 447 448 static void 449 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 450 { 451 switch (event) { 452 case IOC_E_ENABLE: 453 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 454 break; 455 456 case IOC_E_DISABLE: 457 ioc->cbfn->disable_cbfn(ioc->bfa); 458 break; 459 460 case IOC_E_DETACH: 461 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 462 bfa_iocpf_stop(ioc); 463 break; 464 465 default: 466 bfa_sm_fault(event); 467 } 468 } 469 470 static void 471 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) 472 { 473 } 474 475 /* Hardware initialization retry. */ 476 static void 477 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 478 { 479 switch (event) { 480 case IOC_E_ENABLED: 481 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 482 break; 483 484 case IOC_E_PFFAILED: 485 case IOC_E_HWERROR: 486 /** 487 * Initialization retry failed. 488 */ 489 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 490 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 491 if (event != IOC_E_PFFAILED) 492 bfa_iocpf_initfail(ioc); 493 break; 494 495 case IOC_E_HWFAILED: 496 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 497 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 498 break; 499 500 case IOC_E_ENABLE: 501 break; 502 503 case IOC_E_DISABLE: 504 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 505 break; 506 507 case IOC_E_DETACH: 508 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 509 bfa_iocpf_stop(ioc); 510 break; 511 512 default: 513 bfa_sm_fault(event); 514 } 515 } 516 517 static void 518 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) 519 { 520 } 521 522 /* IOC failure. */ 523 static void 524 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 525 { 526 switch (event) { 527 case IOC_E_ENABLE: 528 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 529 break; 530 531 case IOC_E_DISABLE: 532 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 533 break; 534 535 case IOC_E_DETACH: 536 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 537 bfa_iocpf_stop(ioc); 538 break; 539 540 case IOC_E_HWERROR: 541 /* HB failure notification, ignore. */ 542 break; 543 544 default: 545 bfa_sm_fault(event); 546 } 547 } 548 549 static void 550 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) 551 { 552 } 553 554 /* IOC failure. */ 555 static void 556 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 557 { 558 switch (event) { 559 560 case IOC_E_ENABLE: 561 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 562 break; 563 564 case IOC_E_DISABLE: 565 ioc->cbfn->disable_cbfn(ioc->bfa); 566 break; 567 568 case IOC_E_DETACH: 569 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 570 break; 571 572 default: 573 bfa_sm_fault(event); 574 } 575 } 576 577 /* IOCPF State Machine */ 578 579 /* Reset entry actions -- initialize state machine */ 580 static void 581 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 582 { 583 iocpf->fw_mismatch_notified = false; 584 iocpf->auto_recover = bfa_nw_auto_recover; 585 } 586 587 /* Beginning state. IOC is in reset state. */ 588 static void 589 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 590 { 591 switch (event) { 592 case IOCPF_E_ENABLE: 593 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 594 break; 595 596 case IOCPF_E_STOP: 597 break; 598 599 default: 600 bfa_sm_fault(event); 601 } 602 } 603 604 /* Semaphore should be acquired for version check. */ 605 static void 606 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 607 { 608 bfa_ioc_hw_sem_init(iocpf->ioc); 609 bfa_ioc_hw_sem_get(iocpf->ioc); 610 } 611 612 /* Awaiting h/w semaphore to continue with version check. */ 613 static void 614 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 615 { 616 struct bfa_ioc *ioc = iocpf->ioc; 617 618 switch (event) { 619 case IOCPF_E_SEMLOCKED: 620 if (bfa_ioc_firmware_lock(ioc)) { 621 if (bfa_ioc_sync_start(ioc)) { 622 bfa_ioc_sync_join(ioc); 623 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 624 } else { 625 bfa_ioc_firmware_unlock(ioc); 626 bfa_nw_ioc_hw_sem_release(ioc); 627 mod_timer(&ioc->sem_timer, jiffies + 628 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 629 } 630 } else { 631 bfa_nw_ioc_hw_sem_release(ioc); 632 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 633 } 634 break; 635 636 case IOCPF_E_SEM_ERROR: 637 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 638 bfa_ioc_pf_hwfailed(ioc); 639 break; 640 641 case IOCPF_E_DISABLE: 642 bfa_ioc_hw_sem_get_cancel(ioc); 643 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 644 bfa_ioc_pf_disabled(ioc); 645 break; 646 647 case IOCPF_E_STOP: 648 bfa_ioc_hw_sem_get_cancel(ioc); 649 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 650 break; 651 652 default: 653 bfa_sm_fault(event); 654 } 655 } 656 657 /* Notify enable completion callback */ 658 static void 659 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 660 { 661 /* Call only the first time sm enters fwmismatch state. */ 662 if (!iocpf->fw_mismatch_notified) 663 bfa_ioc_pf_fwmismatch(iocpf->ioc); 664 665 iocpf->fw_mismatch_notified = true; 666 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 667 msecs_to_jiffies(BFA_IOC_TOV)); 668 } 669 670 /* Awaiting firmware version match. */ 671 static void 672 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 673 { 674 struct bfa_ioc *ioc = iocpf->ioc; 675 676 switch (event) { 677 case IOCPF_E_TIMEOUT: 678 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 679 break; 680 681 case IOCPF_E_DISABLE: 682 del_timer(&ioc->iocpf_timer); 683 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 684 bfa_ioc_pf_disabled(ioc); 685 break; 686 687 case IOCPF_E_STOP: 688 del_timer(&ioc->iocpf_timer); 689 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 690 break; 691 692 default: 693 bfa_sm_fault(event); 694 } 695 } 696 697 /* Request for semaphore. */ 698 static void 699 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 700 { 701 bfa_ioc_hw_sem_get(iocpf->ioc); 702 } 703 704 /* Awaiting semaphore for h/w initialzation. */ 705 static void 706 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 707 { 708 struct bfa_ioc *ioc = iocpf->ioc; 709 710 switch (event) { 711 case IOCPF_E_SEMLOCKED: 712 if (bfa_ioc_sync_complete(ioc)) { 713 bfa_ioc_sync_join(ioc); 714 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 715 } else { 716 bfa_nw_ioc_hw_sem_release(ioc); 717 mod_timer(&ioc->sem_timer, jiffies + 718 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 719 } 720 break; 721 722 case IOCPF_E_SEM_ERROR: 723 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 724 bfa_ioc_pf_hwfailed(ioc); 725 break; 726 727 case IOCPF_E_DISABLE: 728 bfa_ioc_hw_sem_get_cancel(ioc); 729 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 730 break; 731 732 default: 733 bfa_sm_fault(event); 734 } 735 } 736 737 static void 738 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 739 { 740 iocpf->poll_time = 0; 741 bfa_ioc_reset(iocpf->ioc, false); 742 } 743 744 /* Hardware is being initialized. Interrupts are enabled. 745 * Holding hardware semaphore lock. 746 */ 747 static void 748 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) 749 { 750 struct bfa_ioc *ioc = iocpf->ioc; 751 752 switch (event) { 753 case IOCPF_E_FWREADY: 754 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 755 break; 756 757 case IOCPF_E_TIMEOUT: 758 bfa_nw_ioc_hw_sem_release(ioc); 759 bfa_ioc_pf_failed(ioc); 760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 761 break; 762 763 case IOCPF_E_DISABLE: 764 del_timer(&ioc->iocpf_timer); 765 bfa_ioc_sync_leave(ioc); 766 bfa_nw_ioc_hw_sem_release(ioc); 767 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 768 break; 769 770 default: 771 bfa_sm_fault(event); 772 } 773 } 774 775 static void 776 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) 777 { 778 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 779 msecs_to_jiffies(BFA_IOC_TOV)); 780 /** 781 * Enable Interrupts before sending fw IOC ENABLE cmd. 782 */ 783 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); 784 bfa_ioc_send_enable(iocpf->ioc); 785 } 786 787 /* Host IOC function is being enabled, awaiting response from firmware. 788 * Semaphore is acquired. 789 */ 790 static void 791 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 792 { 793 struct bfa_ioc *ioc = iocpf->ioc; 794 795 switch (event) { 796 case IOCPF_E_FWRSP_ENABLE: 797 del_timer(&ioc->iocpf_timer); 798 bfa_nw_ioc_hw_sem_release(ioc); 799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 800 break; 801 802 case IOCPF_E_INITFAIL: 803 del_timer(&ioc->iocpf_timer); 804 /* 805 * !!! fall through !!! 806 */ 807 case IOCPF_E_TIMEOUT: 808 bfa_nw_ioc_hw_sem_release(ioc); 809 if (event == IOCPF_E_TIMEOUT) 810 bfa_ioc_pf_failed(ioc); 811 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 812 break; 813 814 case IOCPF_E_DISABLE: 815 del_timer(&ioc->iocpf_timer); 816 bfa_nw_ioc_hw_sem_release(ioc); 817 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 818 break; 819 820 default: 821 bfa_sm_fault(event); 822 } 823 } 824 825 static void 826 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 827 { 828 bfa_ioc_pf_enabled(iocpf->ioc); 829 } 830 831 static void 832 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 833 { 834 switch (event) { 835 case IOCPF_E_DISABLE: 836 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 837 break; 838 839 case IOCPF_E_GETATTRFAIL: 840 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 841 break; 842 843 case IOCPF_E_FAIL: 844 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 845 break; 846 847 default: 848 bfa_sm_fault(event); 849 } 850 } 851 852 static void 853 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) 854 { 855 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 856 msecs_to_jiffies(BFA_IOC_TOV)); 857 bfa_ioc_send_disable(iocpf->ioc); 858 } 859 860 /* IOC is being disabled */ 861 static void 862 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 863 { 864 struct bfa_ioc *ioc = iocpf->ioc; 865 866 switch (event) { 867 case IOCPF_E_FWRSP_DISABLE: 868 del_timer(&ioc->iocpf_timer); 869 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 870 break; 871 872 case IOCPF_E_FAIL: 873 del_timer(&ioc->iocpf_timer); 874 /* 875 * !!! fall through !!! 876 */ 877 878 case IOCPF_E_TIMEOUT: 879 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 880 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 881 break; 882 883 case IOCPF_E_FWRSP_ENABLE: 884 break; 885 886 default: 887 bfa_sm_fault(event); 888 } 889 } 890 891 static void 892 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) 893 { 894 bfa_ioc_hw_sem_get(iocpf->ioc); 895 } 896 897 /* IOC hb ack request is being removed. */ 898 static void 899 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 900 { 901 struct bfa_ioc *ioc = iocpf->ioc; 902 903 switch (event) { 904 case IOCPF_E_SEMLOCKED: 905 bfa_ioc_sync_leave(ioc); 906 bfa_nw_ioc_hw_sem_release(ioc); 907 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 908 break; 909 910 case IOCPF_E_SEM_ERROR: 911 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 912 bfa_ioc_pf_hwfailed(ioc); 913 break; 914 915 case IOCPF_E_FAIL: 916 break; 917 918 default: 919 bfa_sm_fault(event); 920 } 921 } 922 923 /* IOC disable completion entry. */ 924 static void 925 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 926 { 927 bfa_ioc_mbox_flush(iocpf->ioc); 928 bfa_ioc_pf_disabled(iocpf->ioc); 929 } 930 931 static void 932 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) 933 { 934 struct bfa_ioc *ioc = iocpf->ioc; 935 936 switch (event) { 937 case IOCPF_E_ENABLE: 938 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 939 break; 940 941 case IOCPF_E_STOP: 942 bfa_ioc_firmware_unlock(ioc); 943 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 944 break; 945 946 default: 947 bfa_sm_fault(event); 948 } 949 } 950 951 static void 952 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) 953 { 954 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); 955 bfa_ioc_hw_sem_get(iocpf->ioc); 956 } 957 958 /* Hardware initialization failed. */ 959 static void 960 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 961 { 962 struct bfa_ioc *ioc = iocpf->ioc; 963 964 switch (event) { 965 case IOCPF_E_SEMLOCKED: 966 bfa_ioc_notify_fail(ioc); 967 bfa_ioc_sync_leave(ioc); 968 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 969 bfa_nw_ioc_hw_sem_release(ioc); 970 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 971 break; 972 973 case IOCPF_E_SEM_ERROR: 974 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 975 bfa_ioc_pf_hwfailed(ioc); 976 break; 977 978 case IOCPF_E_DISABLE: 979 bfa_ioc_hw_sem_get_cancel(ioc); 980 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 981 break; 982 983 case IOCPF_E_STOP: 984 bfa_ioc_hw_sem_get_cancel(ioc); 985 bfa_ioc_firmware_unlock(ioc); 986 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 987 break; 988 989 case IOCPF_E_FAIL: 990 break; 991 992 default: 993 bfa_sm_fault(event); 994 } 995 } 996 997 static void 998 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 999 { 1000 } 1001 1002 /* Hardware initialization failed. */ 1003 static void 1004 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1005 { 1006 struct bfa_ioc *ioc = iocpf->ioc; 1007 1008 switch (event) { 1009 case IOCPF_E_DISABLE: 1010 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1011 break; 1012 1013 case IOCPF_E_STOP: 1014 bfa_ioc_firmware_unlock(ioc); 1015 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1016 break; 1017 1018 default: 1019 bfa_sm_fault(event); 1020 } 1021 } 1022 1023 static void 1024 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) 1025 { 1026 /** 1027 * Mark IOC as failed in hardware and stop firmware. 1028 */ 1029 bfa_ioc_lpu_stop(iocpf->ioc); 1030 1031 /** 1032 * Flush any queued up mailbox requests. 1033 */ 1034 bfa_ioc_mbox_flush(iocpf->ioc); 1035 bfa_ioc_hw_sem_get(iocpf->ioc); 1036 } 1037 1038 /* IOC is in failed state. */ 1039 static void 1040 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1041 { 1042 struct bfa_ioc *ioc = iocpf->ioc; 1043 1044 switch (event) { 1045 case IOCPF_E_SEMLOCKED: 1046 bfa_ioc_sync_ack(ioc); 1047 bfa_ioc_notify_fail(ioc); 1048 if (!iocpf->auto_recover) { 1049 bfa_ioc_sync_leave(ioc); 1050 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1051 bfa_nw_ioc_hw_sem_release(ioc); 1052 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1053 } else { 1054 if (bfa_ioc_sync_complete(ioc)) 1055 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1056 else { 1057 bfa_nw_ioc_hw_sem_release(ioc); 1058 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1059 } 1060 } 1061 break; 1062 1063 case IOCPF_E_SEM_ERROR: 1064 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1065 bfa_ioc_pf_hwfailed(ioc); 1066 break; 1067 1068 case IOCPF_E_DISABLE: 1069 bfa_ioc_hw_sem_get_cancel(ioc); 1070 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1071 break; 1072 1073 case IOCPF_E_FAIL: 1074 break; 1075 1076 default: 1077 bfa_sm_fault(event); 1078 } 1079 } 1080 1081 static void 1082 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) 1083 { 1084 } 1085 1086 /* IOC is in failed state. */ 1087 static void 1088 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1089 { 1090 switch (event) { 1091 case IOCPF_E_DISABLE: 1092 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1093 break; 1094 1095 default: 1096 bfa_sm_fault(event); 1097 } 1098 } 1099 1100 /* BFA IOC private functions */ 1101 1102 /* Notify common modules registered for notification. */ 1103 static void 1104 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1105 { 1106 struct bfa_ioc_notify *notify; 1107 struct list_head *qe; 1108 1109 list_for_each(qe, &ioc->notify_q) { 1110 notify = (struct bfa_ioc_notify *)qe; 1111 notify->cbfn(notify->cbarg, event); 1112 } 1113 } 1114 1115 static void 1116 bfa_ioc_disable_comp(struct bfa_ioc *ioc) 1117 { 1118 ioc->cbfn->disable_cbfn(ioc->bfa); 1119 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); 1120 } 1121 1122 bool 1123 bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1124 { 1125 u32 r32; 1126 int cnt = 0; 1127 #define BFA_SEM_SPINCNT 3000 1128 1129 r32 = readl(sem_reg); 1130 1131 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { 1132 cnt++; 1133 udelay(2); 1134 r32 = readl(sem_reg); 1135 } 1136 1137 if (!(r32 & 1)) 1138 return true; 1139 1140 return false; 1141 } 1142 1143 void 1144 bfa_nw_ioc_sem_release(void __iomem *sem_reg) 1145 { 1146 readl(sem_reg); 1147 writel(1, sem_reg); 1148 } 1149 1150 /* Clear fwver hdr */ 1151 static void 1152 bfa_ioc_fwver_clear(struct bfa_ioc *ioc) 1153 { 1154 u32 pgnum, pgoff, loff = 0; 1155 int i; 1156 1157 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); 1158 pgoff = PSS_SMEM_PGOFF(loff); 1159 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1160 1161 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { 1162 writel(0, ioc->ioc_regs.smem_page_start + loff); 1163 loff += sizeof(u32); 1164 } 1165 } 1166 1167 1168 static void 1169 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1170 { 1171 struct bfi_ioc_image_hdr fwhdr; 1172 u32 fwstate, r32; 1173 1174 /* Spin on init semaphore to serialize. */ 1175 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1176 while (r32 & 0x1) { 1177 udelay(20); 1178 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1179 } 1180 1181 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1182 if (fwstate == BFI_IOC_UNINIT) { 1183 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1184 return; 1185 } 1186 1187 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1188 1189 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { 1190 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1191 return; 1192 } 1193 1194 bfa_ioc_fwver_clear(ioc); 1195 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1196 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1197 1198 /* 1199 * Try to lock and then unlock the semaphore. 1200 */ 1201 readl(ioc->ioc_regs.ioc_sem_reg); 1202 writel(1, ioc->ioc_regs.ioc_sem_reg); 1203 1204 /* Unlock init semaphore */ 1205 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1206 } 1207 1208 static void 1209 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) 1210 { 1211 u32 r32; 1212 1213 /** 1214 * First read to the semaphore register will return 0, subsequent reads 1215 * will return 1. Semaphore is released by writing 1 to the register 1216 */ 1217 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1218 if (r32 == ~0) { 1219 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); 1220 return; 1221 } 1222 if (!(r32 & 1)) { 1223 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1224 return; 1225 } 1226 1227 mod_timer(&ioc->sem_timer, jiffies + 1228 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 1229 } 1230 1231 void 1232 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) 1233 { 1234 writel(1, ioc->ioc_regs.ioc_sem_reg); 1235 } 1236 1237 static void 1238 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1239 { 1240 del_timer(&ioc->sem_timer); 1241 } 1242 1243 /* Initialize LPU local memory (aka secondary memory / SRAM) */ 1244 static void 1245 bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1246 { 1247 u32 pss_ctl; 1248 int i; 1249 #define PSS_LMEM_INIT_TIME 10000 1250 1251 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1252 pss_ctl &= ~__PSS_LMEM_RESET; 1253 pss_ctl |= __PSS_LMEM_INIT_EN; 1254 1255 /* 1256 * i2c workaround 12.5khz clock 1257 */ 1258 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1259 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1260 1261 /** 1262 * wait for memory initialization to be complete 1263 */ 1264 i = 0; 1265 do { 1266 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1267 i++; 1268 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1269 1270 /** 1271 * If memory initialization is not successful, IOC timeout will catch 1272 * such failures. 1273 */ 1274 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); 1275 1276 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1277 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1278 } 1279 1280 static void 1281 bfa_ioc_lpu_start(struct bfa_ioc *ioc) 1282 { 1283 u32 pss_ctl; 1284 1285 /** 1286 * Take processor out of reset. 1287 */ 1288 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1289 pss_ctl &= ~__PSS_LPU0_RESET; 1290 1291 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1292 } 1293 1294 static void 1295 bfa_ioc_lpu_stop(struct bfa_ioc *ioc) 1296 { 1297 u32 pss_ctl; 1298 1299 /** 1300 * Put processors in reset. 1301 */ 1302 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1303 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1304 1305 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1306 } 1307 1308 /* Get driver and firmware versions. */ 1309 void 1310 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1311 { 1312 u32 pgnum; 1313 u32 loff = 0; 1314 int i; 1315 u32 *fwsig = (u32 *) fwhdr; 1316 1317 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1318 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1319 1320 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); 1321 i++) { 1322 fwsig[i] = 1323 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); 1324 loff += sizeof(u32); 1325 } 1326 } 1327 1328 static bool 1329 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, 1330 struct bfi_ioc_image_hdr *fwhdr_2) 1331 { 1332 int i; 1333 1334 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1335 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) 1336 return false; 1337 } 1338 1339 return true; 1340 } 1341 1342 /* Returns TRUE if major minor and maintainence are same. 1343 * If patch version are same, check for MD5 Checksum to be same. 1344 */ 1345 static bool 1346 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, 1347 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1348 { 1349 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) 1350 return false; 1351 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) 1352 return false; 1353 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) 1354 return false; 1355 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) 1356 return false; 1357 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && 1358 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && 1359 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) 1360 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); 1361 1362 return true; 1363 } 1364 1365 static bool 1366 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) 1367 { 1368 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) 1369 return false; 1370 1371 return true; 1372 } 1373 1374 static bool 1375 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) 1376 { 1377 if (fwhdr->fwver.phase == 0 && 1378 fwhdr->fwver.build == 0) 1379 return false; 1380 1381 return true; 1382 } 1383 1384 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ 1385 static enum bfi_ioc_img_ver_cmp 1386 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, 1387 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1388 { 1389 if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false) 1390 return BFI_IOC_IMG_VER_INCOMP; 1391 1392 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) 1393 return BFI_IOC_IMG_VER_BETTER; 1394 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) 1395 return BFI_IOC_IMG_VER_OLD; 1396 1397 /* GA takes priority over internal builds of the same patch stream. 1398 * At this point major minor maint and patch numbers are same. 1399 */ 1400 if (fwhdr_is_ga(base_fwhdr) == true) 1401 if (fwhdr_is_ga(fwhdr_to_cmp)) 1402 return BFI_IOC_IMG_VER_SAME; 1403 else 1404 return BFI_IOC_IMG_VER_OLD; 1405 else 1406 if (fwhdr_is_ga(fwhdr_to_cmp)) 1407 return BFI_IOC_IMG_VER_BETTER; 1408 1409 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) 1410 return BFI_IOC_IMG_VER_BETTER; 1411 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) 1412 return BFI_IOC_IMG_VER_OLD; 1413 1414 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) 1415 return BFI_IOC_IMG_VER_BETTER; 1416 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) 1417 return BFI_IOC_IMG_VER_OLD; 1418 1419 /* All Version Numbers are equal. 1420 * Md5 check to be done as a part of compatibility check. 1421 */ 1422 return BFI_IOC_IMG_VER_SAME; 1423 } 1424 1425 /* register definitions */ 1426 #define FLI_CMD_REG 0x0001d000 1427 #define FLI_WRDATA_REG 0x0001d00c 1428 #define FLI_RDDATA_REG 0x0001d010 1429 #define FLI_ADDR_REG 0x0001d004 1430 #define FLI_DEV_STATUS_REG 0x0001d014 1431 1432 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ 1433 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ 1434 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ 1435 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ 1436 1437 #define NFC_STATE_RUNNING 0x20000001 1438 #define NFC_STATE_PAUSED 0x00004560 1439 #define NFC_VER_VALID 0x147 1440 1441 enum bfa_flash_cmd { 1442 BFA_FLASH_FAST_READ = 0x0b, /* fast read */ 1443 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ 1444 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ 1445 BFA_FLASH_WRITE = 0x02, /* write */ 1446 BFA_FLASH_READ_STATUS = 0x05, /* read status */ 1447 }; 1448 1449 /* hardware error definition */ 1450 enum bfa_flash_err { 1451 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ 1452 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ 1453 BFA_FLASH_BAD = -3, /*!< flash bad */ 1454 BFA_FLASH_BUSY = -4, /*!< flash busy */ 1455 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ 1456 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ 1457 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ 1458 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ 1459 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ 1460 }; 1461 1462 /* flash command register data structure */ 1463 union bfa_flash_cmd_reg { 1464 struct { 1465 #ifdef __BIG_ENDIAN 1466 u32 act:1; 1467 u32 rsv:1; 1468 u32 write_cnt:9; 1469 u32 read_cnt:9; 1470 u32 addr_cnt:4; 1471 u32 cmd:8; 1472 #else 1473 u32 cmd:8; 1474 u32 addr_cnt:4; 1475 u32 read_cnt:9; 1476 u32 write_cnt:9; 1477 u32 rsv:1; 1478 u32 act:1; 1479 #endif 1480 } r; 1481 u32 i; 1482 }; 1483 1484 /* flash device status register data structure */ 1485 union bfa_flash_dev_status_reg { 1486 struct { 1487 #ifdef __BIG_ENDIAN 1488 u32 rsv:21; 1489 u32 fifo_cnt:6; 1490 u32 busy:1; 1491 u32 init_status:1; 1492 u32 present:1; 1493 u32 bad:1; 1494 u32 good:1; 1495 #else 1496 u32 good:1; 1497 u32 bad:1; 1498 u32 present:1; 1499 u32 init_status:1; 1500 u32 busy:1; 1501 u32 fifo_cnt:6; 1502 u32 rsv:21; 1503 #endif 1504 } r; 1505 u32 i; 1506 }; 1507 1508 /* flash address register data structure */ 1509 union bfa_flash_addr_reg { 1510 struct { 1511 #ifdef __BIG_ENDIAN 1512 u32 addr:24; 1513 u32 dummy:8; 1514 #else 1515 u32 dummy:8; 1516 u32 addr:24; 1517 #endif 1518 } r; 1519 u32 i; 1520 }; 1521 1522 /* Flash raw private functions */ 1523 static void 1524 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, 1525 u8 rd_cnt, u8 ad_cnt, u8 op) 1526 { 1527 union bfa_flash_cmd_reg cmd; 1528 1529 cmd.i = 0; 1530 cmd.r.act = 1; 1531 cmd.r.write_cnt = wr_cnt; 1532 cmd.r.read_cnt = rd_cnt; 1533 cmd.r.addr_cnt = ad_cnt; 1534 cmd.r.cmd = op; 1535 writel(cmd.i, (pci_bar + FLI_CMD_REG)); 1536 } 1537 1538 static void 1539 bfa_flash_set_addr(void __iomem *pci_bar, u32 address) 1540 { 1541 union bfa_flash_addr_reg addr; 1542 1543 addr.r.addr = address & 0x00ffffff; 1544 addr.r.dummy = 0; 1545 writel(addr.i, (pci_bar + FLI_ADDR_REG)); 1546 } 1547 1548 static int 1549 bfa_flash_cmd_act_check(void __iomem *pci_bar) 1550 { 1551 union bfa_flash_cmd_reg cmd; 1552 1553 cmd.i = readl(pci_bar + FLI_CMD_REG); 1554 1555 if (cmd.r.act) 1556 return BFA_FLASH_ERR_CMD_ACT; 1557 1558 return 0; 1559 } 1560 1561 /* Flush FLI data fifo. */ 1562 static u32 1563 bfa_flash_fifo_flush(void __iomem *pci_bar) 1564 { 1565 u32 i; 1566 u32 t; 1567 union bfa_flash_dev_status_reg dev_status; 1568 1569 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1570 1571 if (!dev_status.r.fifo_cnt) 1572 return 0; 1573 1574 /* fifo counter in terms of words */ 1575 for (i = 0; i < dev_status.r.fifo_cnt; i++) 1576 t = readl(pci_bar + FLI_RDDATA_REG); 1577 1578 /* Check the device status. It may take some time. */ 1579 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1580 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1581 if (!dev_status.r.fifo_cnt) 1582 break; 1583 } 1584 1585 if (dev_status.r.fifo_cnt) 1586 return BFA_FLASH_ERR_FIFO_CNT; 1587 1588 return 0; 1589 } 1590 1591 /* Read flash status. */ 1592 static u32 1593 bfa_flash_status_read(void __iomem *pci_bar) 1594 { 1595 union bfa_flash_dev_status_reg dev_status; 1596 u32 status; 1597 u32 ret_status; 1598 int i; 1599 1600 status = bfa_flash_fifo_flush(pci_bar); 1601 if (status < 0) 1602 return status; 1603 1604 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); 1605 1606 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1607 status = bfa_flash_cmd_act_check(pci_bar); 1608 if (!status) 1609 break; 1610 } 1611 1612 if (status) 1613 return status; 1614 1615 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1616 if (!dev_status.r.fifo_cnt) 1617 return BFA_FLASH_BUSY; 1618 1619 ret_status = readl(pci_bar + FLI_RDDATA_REG); 1620 ret_status >>= 24; 1621 1622 status = bfa_flash_fifo_flush(pci_bar); 1623 if (status < 0) 1624 return status; 1625 1626 return ret_status; 1627 } 1628 1629 /* Start flash read operation. */ 1630 static u32 1631 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1632 char *buf) 1633 { 1634 u32 status; 1635 1636 /* len must be mutiple of 4 and not exceeding fifo size */ 1637 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1638 return BFA_FLASH_ERR_LEN; 1639 1640 /* check status */ 1641 status = bfa_flash_status_read(pci_bar); 1642 if (status == BFA_FLASH_BUSY) 1643 status = bfa_flash_status_read(pci_bar); 1644 1645 if (status < 0) 1646 return status; 1647 1648 /* check if write-in-progress bit is cleared */ 1649 if (status & BFA_FLASH_WIP_MASK) 1650 return BFA_FLASH_ERR_WIP; 1651 1652 bfa_flash_set_addr(pci_bar, offset); 1653 1654 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); 1655 1656 return 0; 1657 } 1658 1659 /* Check flash read operation. */ 1660 static u32 1661 bfa_flash_read_check(void __iomem *pci_bar) 1662 { 1663 if (bfa_flash_cmd_act_check(pci_bar)) 1664 return 1; 1665 1666 return 0; 1667 } 1668 1669 /* End flash read operation. */ 1670 static void 1671 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) 1672 { 1673 u32 i; 1674 1675 /* read data fifo up to 32 words */ 1676 for (i = 0; i < len; i += 4) { 1677 u32 w = readl(pci_bar + FLI_RDDATA_REG); 1678 *((u32 *)(buf + i)) = swab32(w); 1679 } 1680 1681 bfa_flash_fifo_flush(pci_bar); 1682 } 1683 1684 /* Perform flash raw read. */ 1685 1686 #define FLASH_BLOCKING_OP_MAX 500 1687 #define FLASH_SEM_LOCK_REG 0x18820 1688 1689 static int 1690 bfa_raw_sem_get(void __iomem *bar) 1691 { 1692 int locked; 1693 1694 locked = readl((bar + FLASH_SEM_LOCK_REG)); 1695 1696 return !locked; 1697 } 1698 1699 static enum bfa_status 1700 bfa_flash_sem_get(void __iomem *bar) 1701 { 1702 u32 n = FLASH_BLOCKING_OP_MAX; 1703 1704 while (!bfa_raw_sem_get(bar)) { 1705 if (--n <= 0) 1706 return BFA_STATUS_BADFLASH; 1707 mdelay(10); 1708 } 1709 return BFA_STATUS_OK; 1710 } 1711 1712 static void 1713 bfa_flash_sem_put(void __iomem *bar) 1714 { 1715 writel(0, (bar + FLASH_SEM_LOCK_REG)); 1716 } 1717 1718 static enum bfa_status 1719 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1720 u32 len) 1721 { 1722 u32 n, status; 1723 u32 off, l, s, residue, fifo_sz; 1724 1725 residue = len; 1726 off = 0; 1727 fifo_sz = BFA_FLASH_FIFO_SIZE; 1728 status = bfa_flash_sem_get(pci_bar); 1729 if (status != BFA_STATUS_OK) 1730 return status; 1731 1732 while (residue) { 1733 s = offset + off; 1734 n = s / fifo_sz; 1735 l = (n + 1) * fifo_sz - s; 1736 if (l > residue) 1737 l = residue; 1738 1739 status = bfa_flash_read_start(pci_bar, offset + off, l, 1740 &buf[off]); 1741 if (status < 0) { 1742 bfa_flash_sem_put(pci_bar); 1743 return BFA_STATUS_FAILED; 1744 } 1745 1746 n = BFA_FLASH_BLOCKING_OP_MAX; 1747 while (bfa_flash_read_check(pci_bar)) { 1748 if (--n <= 0) { 1749 bfa_flash_sem_put(pci_bar); 1750 return BFA_STATUS_FAILED; 1751 } 1752 } 1753 1754 bfa_flash_read_end(pci_bar, l, &buf[off]); 1755 1756 residue -= l; 1757 off += l; 1758 } 1759 bfa_flash_sem_put(pci_bar); 1760 1761 return BFA_STATUS_OK; 1762 } 1763 1764 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ 1765 1766 static enum bfa_status 1767 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, 1768 u32 *fwimg) 1769 { 1770 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, 1771 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), 1772 (char *)fwimg, BFI_FLASH_CHUNK_SZ); 1773 } 1774 1775 static enum bfi_ioc_img_ver_cmp 1776 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, 1777 struct bfi_ioc_image_hdr *base_fwhdr) 1778 { 1779 struct bfi_ioc_image_hdr *flash_fwhdr; 1780 enum bfa_status status; 1781 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; 1782 1783 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); 1784 if (status != BFA_STATUS_OK) 1785 return BFI_IOC_IMG_VER_INCOMP; 1786 1787 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; 1788 if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) 1789 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); 1790 else 1791 return BFI_IOC_IMG_VER_INCOMP; 1792 } 1793 1794 /** 1795 * Returns TRUE if driver is willing to work with current smem f/w version. 1796 */ 1797 bool 1798 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1799 { 1800 struct bfi_ioc_image_hdr *drv_fwhdr; 1801 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; 1802 1803 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1804 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1805 1806 /* If smem is incompatible or old, driver should not work with it. */ 1807 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); 1808 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || 1809 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { 1810 return false; 1811 } 1812 1813 /* IF Flash has a better F/W than smem do not work with smem. 1814 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. 1815 * If Flash is old or incomp work with smem iff smem f/w == drv f/w. 1816 */ 1817 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); 1818 1819 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) 1820 return false; 1821 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) 1822 return true; 1823 else 1824 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? 1825 true : false; 1826 } 1827 1828 /* Return true if current running version is valid. Firmware signature and 1829 * execution context (driver/bios) must match. 1830 */ 1831 static bool 1832 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1833 { 1834 struct bfi_ioc_image_hdr fwhdr; 1835 1836 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1837 if (swab32(fwhdr.bootenv) != boot_env) 1838 return false; 1839 1840 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1841 } 1842 1843 /* Conditionally flush any pending message from firmware at start. */ 1844 static void 1845 bfa_ioc_msgflush(struct bfa_ioc *ioc) 1846 { 1847 u32 r32; 1848 1849 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 1850 if (r32) 1851 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1852 } 1853 1854 static void 1855 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1856 { 1857 enum bfi_ioc_state ioc_fwstate; 1858 bool fwvalid; 1859 u32 boot_env; 1860 1861 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1862 1863 if (force) 1864 ioc_fwstate = BFI_IOC_UNINIT; 1865 1866 boot_env = BFI_FWBOOT_ENV_OS; 1867 1868 /** 1869 * check if firmware is valid 1870 */ 1871 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1872 false : bfa_ioc_fwver_valid(ioc, boot_env); 1873 1874 if (!fwvalid) { 1875 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1876 BFA_STATUS_OK) 1877 bfa_ioc_poll_fwinit(ioc); 1878 1879 return; 1880 } 1881 1882 /** 1883 * If hardware initialization is in progress (initialized by other IOC), 1884 * just wait for an initialization completion interrupt. 1885 */ 1886 if (ioc_fwstate == BFI_IOC_INITING) { 1887 bfa_ioc_poll_fwinit(ioc); 1888 return; 1889 } 1890 1891 /** 1892 * If IOC function is disabled and firmware version is same, 1893 * just re-enable IOC. 1894 */ 1895 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 1896 /** 1897 * When using MSI-X any pending firmware ready event should 1898 * be flushed. Otherwise MSI-X interrupts are not delivered. 1899 */ 1900 bfa_ioc_msgflush(ioc); 1901 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1902 return; 1903 } 1904 1905 /** 1906 * Initialize the h/w for any other states. 1907 */ 1908 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1909 BFA_STATUS_OK) 1910 bfa_ioc_poll_fwinit(ioc); 1911 } 1912 1913 void 1914 bfa_nw_ioc_timeout(void *ioc_arg) 1915 { 1916 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 1917 1918 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1919 } 1920 1921 static void 1922 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 1923 { 1924 u32 *msgp = (u32 *) ioc_msg; 1925 u32 i; 1926 1927 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); 1928 1929 /* 1930 * first write msg to mailbox registers 1931 */ 1932 for (i = 0; i < len / sizeof(u32); i++) 1933 writel(cpu_to_le32(msgp[i]), 1934 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1935 1936 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1937 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1938 1939 /* 1940 * write 1 to mailbox CMD to trigger LPU event 1941 */ 1942 writel(1, ioc->ioc_regs.hfn_mbox_cmd); 1943 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); 1944 } 1945 1946 static void 1947 bfa_ioc_send_enable(struct bfa_ioc *ioc) 1948 { 1949 struct bfi_ioc_ctrl_req enable_req; 1950 struct timeval tv; 1951 1952 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1953 bfa_ioc_portid(ioc)); 1954 enable_req.clscode = htons(ioc->clscode); 1955 do_gettimeofday(&tv); 1956 enable_req.tv_sec = ntohl(tv.tv_sec); 1957 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1958 } 1959 1960 static void 1961 bfa_ioc_send_disable(struct bfa_ioc *ioc) 1962 { 1963 struct bfi_ioc_ctrl_req disable_req; 1964 1965 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1966 bfa_ioc_portid(ioc)); 1967 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1968 } 1969 1970 static void 1971 bfa_ioc_send_getattr(struct bfa_ioc *ioc) 1972 { 1973 struct bfi_ioc_getattr_req attr_req; 1974 1975 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1976 bfa_ioc_portid(ioc)); 1977 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); 1978 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); 1979 } 1980 1981 void 1982 bfa_nw_ioc_hb_check(void *cbarg) 1983 { 1984 struct bfa_ioc *ioc = cbarg; 1985 u32 hb_count; 1986 1987 hb_count = readl(ioc->ioc_regs.heartbeat); 1988 if (ioc->hb_count == hb_count) { 1989 bfa_ioc_recover(ioc); 1990 return; 1991 } else { 1992 ioc->hb_count = hb_count; 1993 } 1994 1995 bfa_ioc_mbox_poll(ioc); 1996 mod_timer(&ioc->hb_timer, jiffies + 1997 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1998 } 1999 2000 static void 2001 bfa_ioc_hb_monitor(struct bfa_ioc *ioc) 2002 { 2003 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); 2004 mod_timer(&ioc->hb_timer, jiffies + 2005 msecs_to_jiffies(BFA_IOC_HB_TOV)); 2006 } 2007 2008 static void 2009 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 2010 { 2011 del_timer(&ioc->hb_timer); 2012 } 2013 2014 /* Initiate a full firmware download. */ 2015 static enum bfa_status 2016 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 2017 u32 boot_env) 2018 { 2019 u32 *fwimg; 2020 u32 pgnum; 2021 u32 loff = 0; 2022 u32 chunkno = 0; 2023 u32 i; 2024 u32 asicmode; 2025 u32 fwimg_size; 2026 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; 2027 enum bfa_status status; 2028 2029 if (boot_env == BFI_FWBOOT_ENV_OS && 2030 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2031 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); 2032 2033 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2034 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); 2035 if (status != BFA_STATUS_OK) 2036 return status; 2037 2038 fwimg = fwimg_buf; 2039 } else { 2040 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); 2041 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 2042 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2043 } 2044 2045 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 2046 2047 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2048 2049 for (i = 0; i < fwimg_size; i++) { 2050 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 2051 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 2052 if (boot_env == BFI_FWBOOT_ENV_OS && 2053 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2054 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2055 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), 2056 fwimg_buf); 2057 if (status != BFA_STATUS_OK) 2058 return status; 2059 2060 fwimg = fwimg_buf; 2061 } else { 2062 fwimg = bfa_cb_image_get_chunk( 2063 bfa_ioc_asic_gen(ioc), 2064 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2065 } 2066 } 2067 2068 /** 2069 * write smem 2070 */ 2071 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])), 2072 ((ioc->ioc_regs.smem_page_start) + (loff))); 2073 2074 loff += sizeof(u32); 2075 2076 /** 2077 * handle page offset wrap around 2078 */ 2079 loff = PSS_SMEM_PGOFF(loff); 2080 if (loff == 0) { 2081 pgnum++; 2082 writel(pgnum, 2083 ioc->ioc_regs.host_page_num_fn); 2084 } 2085 } 2086 2087 writel(bfa_ioc_smem_pgnum(ioc, 0), 2088 ioc->ioc_regs.host_page_num_fn); 2089 2090 /* 2091 * Set boot type, env and device mode at the end. 2092 */ 2093 if (boot_env == BFI_FWBOOT_ENV_OS && 2094 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2095 boot_type = BFI_FWBOOT_TYPE_NORMAL; 2096 } 2097 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, 2098 ioc->port0_mode, ioc->port1_mode); 2099 writel(asicmode, ((ioc->ioc_regs.smem_page_start) 2100 + BFI_FWBOOT_DEVMODE_OFF)); 2101 writel(boot_type, ((ioc->ioc_regs.smem_page_start) 2102 + (BFI_FWBOOT_TYPE_OFF))); 2103 writel(boot_env, ((ioc->ioc_regs.smem_page_start) 2104 + (BFI_FWBOOT_ENV_OFF))); 2105 return BFA_STATUS_OK; 2106 } 2107 2108 static void 2109 bfa_ioc_reset(struct bfa_ioc *ioc, bool force) 2110 { 2111 bfa_ioc_hwinit(ioc, force); 2112 } 2113 2114 /* BFA ioc enable reply by firmware */ 2115 static void 2116 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 2117 u8 cap_bm) 2118 { 2119 struct bfa_iocpf *iocpf = &ioc->iocpf; 2120 2121 ioc->port_mode = ioc->port_mode_cfg = port_mode; 2122 ioc->ad_cap_bm = cap_bm; 2123 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 2124 } 2125 2126 /* Update BFA configuration from firmware configuration. */ 2127 static void 2128 bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 2129 { 2130 struct bfi_ioc_attr *attr = ioc->attr; 2131 2132 attr->adapter_prop = ntohl(attr->adapter_prop); 2133 attr->card_type = ntohl(attr->card_type); 2134 attr->maxfrsize = ntohs(attr->maxfrsize); 2135 2136 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 2137 } 2138 2139 /* Attach time initialization of mbox logic. */ 2140 static void 2141 bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 2142 { 2143 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2144 int mc; 2145 2146 INIT_LIST_HEAD(&mod->cmd_q); 2147 for (mc = 0; mc < BFI_MC_MAX; mc++) { 2148 mod->mbhdlr[mc].cbfn = NULL; 2149 mod->mbhdlr[mc].cbarg = ioc->bfa; 2150 } 2151 } 2152 2153 /* Mbox poll timer -- restarts any pending mailbox requests. */ 2154 static void 2155 bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 2156 { 2157 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2158 struct bfa_mbox_cmd *cmd; 2159 bfa_mbox_cmd_cbfn_t cbfn; 2160 void *cbarg; 2161 u32 stat; 2162 2163 /** 2164 * If no command pending, do nothing 2165 */ 2166 if (list_empty(&mod->cmd_q)) 2167 return; 2168 2169 /** 2170 * If previous command is not yet fetched by firmware, do nothing 2171 */ 2172 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2173 if (stat) 2174 return; 2175 2176 /** 2177 * Enqueue command to firmware. 2178 */ 2179 bfa_q_deq(&mod->cmd_q, &cmd); 2180 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2181 2182 /** 2183 * Give a callback to the client, indicating that the command is sent 2184 */ 2185 if (cmd->cbfn) { 2186 cbfn = cmd->cbfn; 2187 cbarg = cmd->cbarg; 2188 cmd->cbfn = NULL; 2189 cbfn(cbarg); 2190 } 2191 } 2192 2193 /* Cleanup any pending requests. */ 2194 static void 2195 bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 2196 { 2197 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2198 struct bfa_mbox_cmd *cmd; 2199 2200 while (!list_empty(&mod->cmd_q)) 2201 bfa_q_deq(&mod->cmd_q, &cmd); 2202 } 2203 2204 /** 2205 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap 2206 * 2207 * @ioc: memory for IOC 2208 * @tbuf: app memory to store data from smem 2209 * @soff: smem offset 2210 * @sz: size of smem in bytes 2211 */ 2212 static int 2213 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 2214 { 2215 u32 pgnum, loff, r32; 2216 int i, len; 2217 u32 *buf = tbuf; 2218 2219 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); 2220 loff = PSS_SMEM_PGOFF(soff); 2221 2222 /* 2223 * Hold semaphore to serialize pll init and fwtrc. 2224 */ 2225 if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0) 2226 return 1; 2227 2228 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2229 2230 len = sz/sizeof(u32); 2231 for (i = 0; i < len; i++) { 2232 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); 2233 buf[i] = be32_to_cpu(r32); 2234 loff += sizeof(u32); 2235 2236 /** 2237 * handle page offset wrap around 2238 */ 2239 loff = PSS_SMEM_PGOFF(loff); 2240 if (loff == 0) { 2241 pgnum++; 2242 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2243 } 2244 } 2245 2246 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), 2247 ioc->ioc_regs.host_page_num_fn); 2248 2249 /* 2250 * release semaphore 2251 */ 2252 readl(ioc->ioc_regs.ioc_init_sem_reg); 2253 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 2254 return 0; 2255 } 2256 2257 /* Retrieve saved firmware trace from a prior IOC failure. */ 2258 int 2259 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2260 { 2261 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; 2262 int tlen, status = 0; 2263 2264 tlen = *trclen; 2265 if (tlen > BNA_DBG_FWTRC_LEN) 2266 tlen = BNA_DBG_FWTRC_LEN; 2267 2268 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); 2269 *trclen = tlen; 2270 return status; 2271 } 2272 2273 /* Save firmware trace if configured. */ 2274 static void 2275 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 2276 { 2277 int tlen; 2278 2279 if (ioc->dbg_fwsave_once) { 2280 ioc->dbg_fwsave_once = 0; 2281 if (ioc->dbg_fwsave_len) { 2282 tlen = ioc->dbg_fwsave_len; 2283 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2284 } 2285 } 2286 } 2287 2288 /* Retrieve saved firmware trace from a prior IOC failure. */ 2289 int 2290 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2291 { 2292 int tlen; 2293 2294 if (ioc->dbg_fwsave_len == 0) 2295 return BFA_STATUS_ENOFSAVE; 2296 2297 tlen = *trclen; 2298 if (tlen > ioc->dbg_fwsave_len) 2299 tlen = ioc->dbg_fwsave_len; 2300 2301 memcpy(trcdata, ioc->dbg_fwsave, tlen); 2302 *trclen = tlen; 2303 return BFA_STATUS_OK; 2304 } 2305 2306 static void 2307 bfa_ioc_fail_notify(struct bfa_ioc *ioc) 2308 { 2309 /** 2310 * Notify driver and common modules registered for notification. 2311 */ 2312 ioc->cbfn->hbfail_cbfn(ioc->bfa); 2313 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); 2314 bfa_nw_ioc_debug_save_ftrc(ioc); 2315 } 2316 2317 /* IOCPF to IOC interface */ 2318 static void 2319 bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 2320 { 2321 bfa_fsm_send_event(ioc, IOC_E_ENABLED); 2322 } 2323 2324 static void 2325 bfa_ioc_pf_disabled(struct bfa_ioc *ioc) 2326 { 2327 bfa_fsm_send_event(ioc, IOC_E_DISABLED); 2328 } 2329 2330 static void 2331 bfa_ioc_pf_failed(struct bfa_ioc *ioc) 2332 { 2333 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 2334 } 2335 2336 static void 2337 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) 2338 { 2339 bfa_fsm_send_event(ioc, IOC_E_HWFAILED); 2340 } 2341 2342 static void 2343 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) 2344 { 2345 /** 2346 * Provide enable completion callback and AEN notification. 2347 */ 2348 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 2349 } 2350 2351 /* IOC public */ 2352 static enum bfa_status 2353 bfa_ioc_pll_init(struct bfa_ioc *ioc) 2354 { 2355 /* 2356 * Hold semaphore so that nobody can access the chip during init. 2357 */ 2358 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 2359 2360 bfa_ioc_pll_init_asic(ioc); 2361 2362 ioc->pllinit = true; 2363 2364 /* Initialize LMEM */ 2365 bfa_ioc_lmem_init(ioc); 2366 2367 /* 2368 * release semaphore. 2369 */ 2370 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2371 2372 return BFA_STATUS_OK; 2373 } 2374 2375 /* Interface used by diag module to do firmware boot with memory test 2376 * as the entry vector. 2377 */ 2378 static enum bfa_status 2379 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, 2380 u32 boot_env) 2381 { 2382 struct bfi_ioc_image_hdr *drv_fwhdr; 2383 enum bfa_status status; 2384 bfa_ioc_stats(ioc, ioc_boots); 2385 2386 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 2387 return BFA_STATUS_FAILED; 2388 if (boot_env == BFI_FWBOOT_ENV_OS && 2389 boot_type == BFI_FWBOOT_TYPE_NORMAL) { 2390 drv_fwhdr = (struct bfi_ioc_image_hdr *) 2391 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 2392 /* Work with Flash iff flash f/w is better than driver f/w. 2393 * Otherwise push drivers firmware. 2394 */ 2395 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == 2396 BFI_IOC_IMG_VER_BETTER) 2397 boot_type = BFI_FWBOOT_TYPE_FLASH; 2398 } 2399 2400 /** 2401 * Initialize IOC state of all functions on a chip reset. 2402 */ 2403 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2404 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2405 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2406 } else { 2407 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); 2408 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); 2409 } 2410 2411 bfa_ioc_msgflush(ioc); 2412 status = bfa_ioc_download_fw(ioc, boot_type, boot_env); 2413 if (status == BFA_STATUS_OK) 2414 bfa_ioc_lpu_start(ioc); 2415 else 2416 bfa_nw_iocpf_timeout(ioc); 2417 2418 return status; 2419 } 2420 2421 /* Enable/disable IOC failure auto recovery. */ 2422 void 2423 bfa_nw_ioc_auto_recover(bool auto_recover) 2424 { 2425 bfa_nw_auto_recover = auto_recover; 2426 } 2427 2428 static bool 2429 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 2430 { 2431 u32 *msgp = mbmsg; 2432 u32 r32; 2433 int i; 2434 2435 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 2436 if ((r32 & 1) == 0) 2437 return false; 2438 2439 /** 2440 * read the MBOX msg 2441 */ 2442 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 2443 i++) { 2444 r32 = readl(ioc->ioc_regs.lpu_mbox + 2445 i * sizeof(u32)); 2446 msgp[i] = htonl(r32); 2447 } 2448 2449 /** 2450 * turn off mailbox interrupt by clearing mailbox status 2451 */ 2452 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 2453 readl(ioc->ioc_regs.lpu_mbox_cmd); 2454 2455 return true; 2456 } 2457 2458 static void 2459 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 2460 { 2461 union bfi_ioc_i2h_msg_u *msg; 2462 struct bfa_iocpf *iocpf = &ioc->iocpf; 2463 2464 msg = (union bfi_ioc_i2h_msg_u *) m; 2465 2466 bfa_ioc_stats(ioc, ioc_isrs); 2467 2468 switch (msg->mh.msg_id) { 2469 case BFI_IOC_I2H_HBEAT: 2470 break; 2471 2472 case BFI_IOC_I2H_ENABLE_REPLY: 2473 bfa_ioc_enable_reply(ioc, 2474 (enum bfa_mode)msg->fw_event.port_mode, 2475 msg->fw_event.cap_bm); 2476 break; 2477 2478 case BFI_IOC_I2H_DISABLE_REPLY: 2479 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); 2480 break; 2481 2482 case BFI_IOC_I2H_GETATTR_REPLY: 2483 bfa_ioc_getattr_reply(ioc); 2484 break; 2485 2486 default: 2487 BUG_ON(1); 2488 } 2489 } 2490 2491 /** 2492 * bfa_nw_ioc_attach - IOC attach time initialization and setup. 2493 * 2494 * @ioc: memory for IOC 2495 * @bfa: driver instance structure 2496 */ 2497 void 2498 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 2499 { 2500 ioc->bfa = bfa; 2501 ioc->cbfn = cbfn; 2502 ioc->fcmode = false; 2503 ioc->pllinit = false; 2504 ioc->dbg_fwsave_once = true; 2505 ioc->iocpf.ioc = ioc; 2506 2507 bfa_ioc_mbox_attach(ioc); 2508 INIT_LIST_HEAD(&ioc->notify_q); 2509 2510 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 2511 bfa_fsm_send_event(ioc, IOC_E_RESET); 2512 } 2513 2514 /* Driver detach time IOC cleanup. */ 2515 void 2516 bfa_nw_ioc_detach(struct bfa_ioc *ioc) 2517 { 2518 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2519 2520 /* Done with detach, empty the notify_q. */ 2521 INIT_LIST_HEAD(&ioc->notify_q); 2522 } 2523 2524 /** 2525 * bfa_nw_ioc_pci_init - Setup IOC PCI properties. 2526 * 2527 * @pcidev: PCI device information for this IOC 2528 */ 2529 void 2530 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 2531 enum bfi_pcifn_class clscode) 2532 { 2533 ioc->clscode = clscode; 2534 ioc->pcidev = *pcidev; 2535 2536 /** 2537 * Initialize IOC and device personality 2538 */ 2539 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; 2540 ioc->asic_mode = BFI_ASIC_MODE_FC; 2541 2542 switch (pcidev->device_id) { 2543 case PCI_DEVICE_ID_BROCADE_CT: 2544 ioc->asic_gen = BFI_ASIC_GEN_CT; 2545 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2546 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2547 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; 2548 ioc->ad_cap_bm = BFA_CM_CNA; 2549 break; 2550 2551 case BFA_PCI_DEVICE_ID_CT2: 2552 ioc->asic_gen = BFI_ASIC_GEN_CT2; 2553 if (clscode == BFI_PCIFN_CLASS_FC && 2554 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { 2555 ioc->asic_mode = BFI_ASIC_MODE_FC16; 2556 ioc->fcmode = true; 2557 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; 2558 ioc->ad_cap_bm = BFA_CM_HBA; 2559 } else { 2560 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2561 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2562 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { 2563 ioc->port_mode = 2564 ioc->port_mode_cfg = BFA_MODE_CNA; 2565 ioc->ad_cap_bm = BFA_CM_CNA; 2566 } else { 2567 ioc->port_mode = 2568 ioc->port_mode_cfg = BFA_MODE_NIC; 2569 ioc->ad_cap_bm = BFA_CM_NIC; 2570 } 2571 } 2572 break; 2573 2574 default: 2575 BUG_ON(1); 2576 } 2577 2578 /** 2579 * Set asic specific interfaces. 2580 */ 2581 if (ioc->asic_gen == BFI_ASIC_GEN_CT) 2582 bfa_nw_ioc_set_ct_hwif(ioc); 2583 else { 2584 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); 2585 bfa_nw_ioc_set_ct2_hwif(ioc); 2586 bfa_nw_ioc_ct2_poweron(ioc); 2587 } 2588 2589 bfa_ioc_map_port(ioc); 2590 bfa_ioc_reg_init(ioc); 2591 } 2592 2593 /** 2594 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory 2595 * 2596 * @dm_kva: kernel virtual address of IOC dma memory 2597 * @dm_pa: physical address of IOC dma memory 2598 */ 2599 void 2600 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2601 { 2602 /** 2603 * dma memory for firmware attribute 2604 */ 2605 ioc->attr_dma.kva = dm_kva; 2606 ioc->attr_dma.pa = dm_pa; 2607 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2608 } 2609 2610 /* Return size of dma memory required. */ 2611 u32 2612 bfa_nw_ioc_meminfo(void) 2613 { 2614 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 2615 } 2616 2617 void 2618 bfa_nw_ioc_enable(struct bfa_ioc *ioc) 2619 { 2620 bfa_ioc_stats(ioc, ioc_enables); 2621 ioc->dbg_fwsave_once = true; 2622 2623 bfa_fsm_send_event(ioc, IOC_E_ENABLE); 2624 } 2625 2626 void 2627 bfa_nw_ioc_disable(struct bfa_ioc *ioc) 2628 { 2629 bfa_ioc_stats(ioc, ioc_disables); 2630 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2631 } 2632 2633 /* Initialize memory for saving firmware trace. */ 2634 void 2635 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2636 { 2637 ioc->dbg_fwsave = dbg_fwsave; 2638 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; 2639 } 2640 2641 static u32 2642 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 2643 { 2644 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2645 } 2646 2647 /* Register mailbox message handler function, to be called by common modules */ 2648 void 2649 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2650 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2651 { 2652 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2653 2654 mod->mbhdlr[mc].cbfn = cbfn; 2655 mod->mbhdlr[mc].cbarg = cbarg; 2656 } 2657 2658 /** 2659 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. 2660 * 2661 * @ioc: IOC instance 2662 * @cmd: Mailbox command 2663 * 2664 * Waits if mailbox is busy. Responsibility of caller to serialize 2665 */ 2666 bool 2667 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2668 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) 2669 { 2670 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2671 u32 stat; 2672 2673 cmd->cbfn = cbfn; 2674 cmd->cbarg = cbarg; 2675 2676 /** 2677 * If a previous command is pending, queue new command 2678 */ 2679 if (!list_empty(&mod->cmd_q)) { 2680 list_add_tail(&cmd->qe, &mod->cmd_q); 2681 return true; 2682 } 2683 2684 /** 2685 * If mailbox is busy, queue command for poll timer 2686 */ 2687 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2688 if (stat) { 2689 list_add_tail(&cmd->qe, &mod->cmd_q); 2690 return true; 2691 } 2692 2693 /** 2694 * mailbox is free -- queue command to firmware 2695 */ 2696 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2697 2698 return false; 2699 } 2700 2701 /* Handle mailbox interrupts */ 2702 void 2703 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2704 { 2705 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2706 struct bfi_mbmsg m; 2707 int mc; 2708 2709 if (bfa_ioc_msgget(ioc, &m)) { 2710 /** 2711 * Treat IOC message class as special. 2712 */ 2713 mc = m.mh.msg_class; 2714 if (mc == BFI_MC_IOC) { 2715 bfa_ioc_isr(ioc, &m); 2716 return; 2717 } 2718 2719 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2720 return; 2721 2722 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2723 } 2724 2725 bfa_ioc_lpu_read_stat(ioc); 2726 2727 /** 2728 * Try to send pending mailbox commands 2729 */ 2730 bfa_ioc_mbox_poll(ioc); 2731 } 2732 2733 void 2734 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) 2735 { 2736 bfa_ioc_stats(ioc, ioc_hbfails); 2737 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2738 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2739 } 2740 2741 /* return true if IOC is disabled */ 2742 bool 2743 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2744 { 2745 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || 2746 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2747 } 2748 2749 /* return true if IOC is operational */ 2750 bool 2751 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2752 { 2753 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2754 } 2755 2756 /* Add to IOC heartbeat failure notification queue. To be used by common 2757 * modules such as cee, port, diag. 2758 */ 2759 void 2760 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, 2761 struct bfa_ioc_notify *notify) 2762 { 2763 list_add_tail(¬ify->qe, &ioc->notify_q); 2764 } 2765 2766 #define BFA_MFG_NAME "Brocade" 2767 static void 2768 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 2769 struct bfa_adapter_attr *ad_attr) 2770 { 2771 struct bfi_ioc_attr *ioc_attr; 2772 2773 ioc_attr = ioc->attr; 2774 2775 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 2776 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2777 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2778 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2779 memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2780 sizeof(struct bfa_mfg_vpd)); 2781 2782 ad_attr->nports = bfa_ioc_get_nports(ioc); 2783 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 2784 2785 bfa_ioc_get_adapter_model(ioc, ad_attr->model); 2786 /* For now, model descr uses same model string */ 2787 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 2788 2789 ad_attr->card_type = ioc_attr->card_type; 2790 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); 2791 2792 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 2793 ad_attr->prototype = 1; 2794 else 2795 ad_attr->prototype = 0; 2796 2797 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2798 ad_attr->mac = bfa_nw_ioc_get_mac(ioc); 2799 2800 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2801 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2802 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 2803 ad_attr->asic_rev = ioc_attr->asic_rev; 2804 2805 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2806 } 2807 2808 static enum bfa_ioc_type 2809 bfa_ioc_get_type(struct bfa_ioc *ioc) 2810 { 2811 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) 2812 return BFA_IOC_TYPE_LL; 2813 2814 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); 2815 2816 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) 2817 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; 2818 } 2819 2820 static void 2821 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 2822 { 2823 memcpy(serial_num, 2824 (void *)ioc->attr->brcd_serialnum, 2825 BFA_ADAPTER_SERIAL_NUM_LEN); 2826 } 2827 2828 static void 2829 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 2830 { 2831 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2832 } 2833 2834 static void 2835 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 2836 { 2837 BUG_ON(!(chip_rev)); 2838 2839 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2840 2841 chip_rev[0] = 'R'; 2842 chip_rev[1] = 'e'; 2843 chip_rev[2] = 'v'; 2844 chip_rev[3] = '-'; 2845 chip_rev[4] = ioc->attr->asic_rev; 2846 chip_rev[5] = '\0'; 2847 } 2848 2849 static void 2850 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 2851 { 2852 memcpy(optrom_ver, ioc->attr->optrom_version, 2853 BFA_VERSION_LEN); 2854 } 2855 2856 static void 2857 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2858 { 2859 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2860 } 2861 2862 static void 2863 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 2864 { 2865 struct bfi_ioc_attr *ioc_attr; 2866 2867 BUG_ON(!(model)); 2868 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2869 2870 ioc_attr = ioc->attr; 2871 2872 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2873 BFA_MFG_NAME, ioc_attr->card_type); 2874 } 2875 2876 static enum bfa_ioc_state 2877 bfa_ioc_get_state(struct bfa_ioc *ioc) 2878 { 2879 enum bfa_iocpf_state iocpf_st; 2880 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2881 2882 if (ioc_st == BFA_IOC_ENABLING || 2883 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { 2884 2885 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2886 2887 switch (iocpf_st) { 2888 case BFA_IOCPF_SEMWAIT: 2889 ioc_st = BFA_IOC_SEMWAIT; 2890 break; 2891 2892 case BFA_IOCPF_HWINIT: 2893 ioc_st = BFA_IOC_HWINIT; 2894 break; 2895 2896 case BFA_IOCPF_FWMISMATCH: 2897 ioc_st = BFA_IOC_FWMISMATCH; 2898 break; 2899 2900 case BFA_IOCPF_FAIL: 2901 ioc_st = BFA_IOC_FAIL; 2902 break; 2903 2904 case BFA_IOCPF_INITFAIL: 2905 ioc_st = BFA_IOC_INITFAIL; 2906 break; 2907 2908 default: 2909 break; 2910 } 2911 } 2912 return ioc_st; 2913 } 2914 2915 void 2916 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 2917 { 2918 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2919 2920 ioc_attr->state = bfa_ioc_get_state(ioc); 2921 ioc_attr->port_id = bfa_ioc_portid(ioc); 2922 ioc_attr->port_mode = ioc->port_mode; 2923 2924 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2925 ioc_attr->cap_bm = ioc->ad_cap_bm; 2926 2927 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2928 2929 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2930 2931 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); 2932 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); 2933 ioc_attr->def_fn = bfa_ioc_is_default(ioc); 2934 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2935 } 2936 2937 /* WWN public */ 2938 static u64 2939 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2940 { 2941 return ioc->attr->pwwn; 2942 } 2943 2944 mac_t 2945 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) 2946 { 2947 return ioc->attr->mac; 2948 } 2949 2950 /* Firmware failure detected. Start recovery actions. */ 2951 static void 2952 bfa_ioc_recover(struct bfa_ioc *ioc) 2953 { 2954 pr_crit("Heart Beat of IOC has failed\n"); 2955 bfa_ioc_stats(ioc, ioc_hbfails); 2956 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2957 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2958 } 2959 2960 /* BFA IOC PF private functions */ 2961 2962 static void 2963 bfa_iocpf_enable(struct bfa_ioc *ioc) 2964 { 2965 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); 2966 } 2967 2968 static void 2969 bfa_iocpf_disable(struct bfa_ioc *ioc) 2970 { 2971 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 2972 } 2973 2974 static void 2975 bfa_iocpf_fail(struct bfa_ioc *ioc) 2976 { 2977 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 2978 } 2979 2980 static void 2981 bfa_iocpf_initfail(struct bfa_ioc *ioc) 2982 { 2983 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 2984 } 2985 2986 static void 2987 bfa_iocpf_getattrfail(struct bfa_ioc *ioc) 2988 { 2989 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 2990 } 2991 2992 static void 2993 bfa_iocpf_stop(struct bfa_ioc *ioc) 2994 { 2995 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); 2996 } 2997 2998 void 2999 bfa_nw_iocpf_timeout(void *ioc_arg) 3000 { 3001 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 3002 enum bfa_iocpf_state iocpf_st; 3003 3004 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 3005 3006 if (iocpf_st == BFA_IOCPF_HWINIT) 3007 bfa_ioc_poll_fwinit(ioc); 3008 else 3009 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3010 } 3011 3012 void 3013 bfa_nw_iocpf_sem_timeout(void *ioc_arg) 3014 { 3015 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 3016 3017 bfa_ioc_hw_sem_get(ioc); 3018 } 3019 3020 static void 3021 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) 3022 { 3023 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 3024 3025 if (fwstate == BFI_IOC_DISABLED) { 3026 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 3027 return; 3028 } 3029 3030 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3031 bfa_nw_iocpf_timeout(ioc); 3032 } else { 3033 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3034 mod_timer(&ioc->iocpf_timer, jiffies + 3035 msecs_to_jiffies(BFA_IOC_POLL_TOV)); 3036 } 3037 } 3038 3039 /* 3040 * Flash module specific 3041 */ 3042 3043 /* 3044 * FLASH DMA buffer should be big enough to hold both MFG block and 3045 * asic block(64k) at the same time and also should be 2k aligned to 3046 * avoid write segement to cross sector boundary. 3047 */ 3048 #define BFA_FLASH_SEG_SZ 2048 3049 #define BFA_FLASH_DMA_BUF_SZ \ 3050 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) 3051 3052 static void 3053 bfa_flash_cb(struct bfa_flash *flash) 3054 { 3055 flash->op_busy = 0; 3056 if (flash->cbfn) 3057 flash->cbfn(flash->cbarg, flash->status); 3058 } 3059 3060 static void 3061 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) 3062 { 3063 struct bfa_flash *flash = cbarg; 3064 3065 switch (event) { 3066 case BFA_IOC_E_DISABLED: 3067 case BFA_IOC_E_FAILED: 3068 if (flash->op_busy) { 3069 flash->status = BFA_STATUS_IOC_FAILURE; 3070 flash->cbfn(flash->cbarg, flash->status); 3071 flash->op_busy = 0; 3072 } 3073 break; 3074 default: 3075 break; 3076 } 3077 } 3078 3079 /* 3080 * Send flash write request. 3081 */ 3082 static void 3083 bfa_flash_write_send(struct bfa_flash *flash) 3084 { 3085 struct bfi_flash_write_req *msg = 3086 (struct bfi_flash_write_req *) flash->mb.msg; 3087 u32 len; 3088 3089 msg->type = be32_to_cpu(flash->type); 3090 msg->instance = flash->instance; 3091 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3092 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3093 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3094 msg->length = be32_to_cpu(len); 3095 3096 /* indicate if it's the last msg of the whole write operation */ 3097 msg->last = (len == flash->residue) ? 1 : 0; 3098 3099 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, 3100 bfa_ioc_portid(flash->ioc)); 3101 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3102 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); 3103 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3104 3105 flash->residue -= len; 3106 flash->offset += len; 3107 } 3108 3109 /** 3110 * bfa_flash_read_send - Send flash read request. 3111 * 3112 * @cbarg: callback argument 3113 */ 3114 static void 3115 bfa_flash_read_send(void *cbarg) 3116 { 3117 struct bfa_flash *flash = cbarg; 3118 struct bfi_flash_read_req *msg = 3119 (struct bfi_flash_read_req *) flash->mb.msg; 3120 u32 len; 3121 3122 msg->type = be32_to_cpu(flash->type); 3123 msg->instance = flash->instance; 3124 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3125 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3126 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3127 msg->length = be32_to_cpu(len); 3128 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, 3129 bfa_ioc_portid(flash->ioc)); 3130 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3131 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3132 } 3133 3134 /** 3135 * bfa_flash_intr - Process flash response messages upon receiving interrupts. 3136 * 3137 * @flasharg: flash structure 3138 * @msg: message structure 3139 */ 3140 static void 3141 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 3142 { 3143 struct bfa_flash *flash = flasharg; 3144 u32 status; 3145 3146 union { 3147 struct bfi_flash_query_rsp *query; 3148 struct bfi_flash_write_rsp *write; 3149 struct bfi_flash_read_rsp *read; 3150 struct bfi_mbmsg *msg; 3151 } m; 3152 3153 m.msg = msg; 3154 3155 /* receiving response after ioc failure */ 3156 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) 3157 return; 3158 3159 switch (msg->mh.msg_id) { 3160 case BFI_FLASH_I2H_QUERY_RSP: 3161 status = be32_to_cpu(m.query->status); 3162 if (status == BFA_STATUS_OK) { 3163 u32 i; 3164 struct bfa_flash_attr *attr, *f; 3165 3166 attr = (struct bfa_flash_attr *) flash->ubuf; 3167 f = (struct bfa_flash_attr *) flash->dbuf_kva; 3168 attr->status = be32_to_cpu(f->status); 3169 attr->npart = be32_to_cpu(f->npart); 3170 for (i = 0; i < attr->npart; i++) { 3171 attr->part[i].part_type = 3172 be32_to_cpu(f->part[i].part_type); 3173 attr->part[i].part_instance = 3174 be32_to_cpu(f->part[i].part_instance); 3175 attr->part[i].part_off = 3176 be32_to_cpu(f->part[i].part_off); 3177 attr->part[i].part_size = 3178 be32_to_cpu(f->part[i].part_size); 3179 attr->part[i].part_len = 3180 be32_to_cpu(f->part[i].part_len); 3181 attr->part[i].part_status = 3182 be32_to_cpu(f->part[i].part_status); 3183 } 3184 } 3185 flash->status = status; 3186 bfa_flash_cb(flash); 3187 break; 3188 case BFI_FLASH_I2H_WRITE_RSP: 3189 status = be32_to_cpu(m.write->status); 3190 if (status != BFA_STATUS_OK || flash->residue == 0) { 3191 flash->status = status; 3192 bfa_flash_cb(flash); 3193 } else 3194 bfa_flash_write_send(flash); 3195 break; 3196 case BFI_FLASH_I2H_READ_RSP: 3197 status = be32_to_cpu(m.read->status); 3198 if (status != BFA_STATUS_OK) { 3199 flash->status = status; 3200 bfa_flash_cb(flash); 3201 } else { 3202 u32 len = be32_to_cpu(m.read->length); 3203 memcpy(flash->ubuf + flash->offset, 3204 flash->dbuf_kva, len); 3205 flash->residue -= len; 3206 flash->offset += len; 3207 if (flash->residue == 0) { 3208 flash->status = status; 3209 bfa_flash_cb(flash); 3210 } else 3211 bfa_flash_read_send(flash); 3212 } 3213 break; 3214 case BFI_FLASH_I2H_BOOT_VER_RSP: 3215 case BFI_FLASH_I2H_EVENT: 3216 break; 3217 default: 3218 WARN_ON(1); 3219 } 3220 } 3221 3222 /* 3223 * Flash memory info API. 3224 */ 3225 u32 3226 bfa_nw_flash_meminfo(void) 3227 { 3228 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3229 } 3230 3231 /** 3232 * bfa_nw_flash_attach - Flash attach API. 3233 * 3234 * @flash: flash structure 3235 * @ioc: ioc structure 3236 * @dev: device structure 3237 */ 3238 void 3239 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 3240 { 3241 flash->ioc = ioc; 3242 flash->cbfn = NULL; 3243 flash->cbarg = NULL; 3244 flash->op_busy = 0; 3245 3246 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); 3247 bfa_q_qe_init(&flash->ioc_notify); 3248 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); 3249 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 3250 } 3251 3252 /** 3253 * bfa_nw_flash_memclaim - Claim memory for flash 3254 * 3255 * @flash: flash structure 3256 * @dm_kva: pointer to virtual memory address 3257 * @dm_pa: physical memory address 3258 */ 3259 void 3260 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 3261 { 3262 flash->dbuf_kva = dm_kva; 3263 flash->dbuf_pa = dm_pa; 3264 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); 3265 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3266 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3267 } 3268 3269 /** 3270 * bfa_nw_flash_get_attr - Get flash attribute. 3271 * 3272 * @flash: flash structure 3273 * @attr: flash attribute structure 3274 * @cbfn: callback function 3275 * @cbarg: callback argument 3276 * 3277 * Return status. 3278 */ 3279 enum bfa_status 3280 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, 3281 bfa_cb_flash cbfn, void *cbarg) 3282 { 3283 struct bfi_flash_query_req *msg = 3284 (struct bfi_flash_query_req *) flash->mb.msg; 3285 3286 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3287 return BFA_STATUS_IOC_NON_OP; 3288 3289 if (flash->op_busy) 3290 return BFA_STATUS_DEVBUSY; 3291 3292 flash->op_busy = 1; 3293 flash->cbfn = cbfn; 3294 flash->cbarg = cbarg; 3295 flash->ubuf = (u8 *) attr; 3296 3297 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, 3298 bfa_ioc_portid(flash->ioc)); 3299 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); 3300 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3301 3302 return BFA_STATUS_OK; 3303 } 3304 3305 /** 3306 * bfa_nw_flash_update_part - Update flash partition. 3307 * 3308 * @flash: flash structure 3309 * @type: flash partition type 3310 * @instance: flash partition instance 3311 * @buf: update data buffer 3312 * @len: data buffer length 3313 * @offset: offset relative to the partition starting address 3314 * @cbfn: callback function 3315 * @cbarg: callback argument 3316 * 3317 * Return status. 3318 */ 3319 enum bfa_status 3320 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, 3321 void *buf, u32 len, u32 offset, 3322 bfa_cb_flash cbfn, void *cbarg) 3323 { 3324 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3325 return BFA_STATUS_IOC_NON_OP; 3326 3327 /* 3328 * 'len' must be in word (4-byte) boundary 3329 */ 3330 if (!len || (len & 0x03)) 3331 return BFA_STATUS_FLASH_BAD_LEN; 3332 3333 if (type == BFA_FLASH_PART_MFG) 3334 return BFA_STATUS_EINVAL; 3335 3336 if (flash->op_busy) 3337 return BFA_STATUS_DEVBUSY; 3338 3339 flash->op_busy = 1; 3340 flash->cbfn = cbfn; 3341 flash->cbarg = cbarg; 3342 flash->type = type; 3343 flash->instance = instance; 3344 flash->residue = len; 3345 flash->offset = 0; 3346 flash->addr_off = offset; 3347 flash->ubuf = buf; 3348 3349 bfa_flash_write_send(flash); 3350 3351 return BFA_STATUS_OK; 3352 } 3353 3354 /** 3355 * bfa_nw_flash_read_part - Read flash partition. 3356 * 3357 * @flash: flash structure 3358 * @type: flash partition type 3359 * @instance: flash partition instance 3360 * @buf: read data buffer 3361 * @len: data buffer length 3362 * @offset: offset relative to the partition starting address 3363 * @cbfn: callback function 3364 * @cbarg: callback argument 3365 * 3366 * Return status. 3367 */ 3368 enum bfa_status 3369 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, 3370 void *buf, u32 len, u32 offset, 3371 bfa_cb_flash cbfn, void *cbarg) 3372 { 3373 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3374 return BFA_STATUS_IOC_NON_OP; 3375 3376 /* 3377 * 'len' must be in word (4-byte) boundary 3378 */ 3379 if (!len || (len & 0x03)) 3380 return BFA_STATUS_FLASH_BAD_LEN; 3381 3382 if (flash->op_busy) 3383 return BFA_STATUS_DEVBUSY; 3384 3385 flash->op_busy = 1; 3386 flash->cbfn = cbfn; 3387 flash->cbarg = cbarg; 3388 flash->type = type; 3389 flash->instance = instance; 3390 flash->residue = len; 3391 flash->offset = 0; 3392 flash->addr_off = offset; 3393 flash->ubuf = buf; 3394 3395 bfa_flash_read_send(flash); 3396 3397 return BFA_STATUS_OK; 3398 } 3399