1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Linux network driver for QLogic BR-series Converged Network Adapter. 4 */ 5 /* 6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 7 * Copyright (c) 2014-2015 QLogic Corporation 8 * All rights reserved 9 * www.qlogic.com 10 */ 11 12 #include "bfa_ioc.h" 13 #include "bfi_reg.h" 14 #include "bfa_defs.h" 15 16 /* IOC local definitions */ 17 18 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ 19 20 #define bfa_ioc_firmware_lock(__ioc) \ 21 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 22 #define bfa_ioc_firmware_unlock(__ioc) \ 23 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 24 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 25 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 26 #define bfa_ioc_notify_fail(__ioc) \ 27 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 28 #define bfa_ioc_sync_start(__ioc) \ 29 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 30 #define bfa_ioc_sync_join(__ioc) \ 31 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 32 #define bfa_ioc_sync_leave(__ioc) \ 33 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) 34 #define bfa_ioc_sync_ack(__ioc) \ 35 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 36 #define bfa_ioc_sync_complete(__ioc) \ 37 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 38 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ 39 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) 40 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ 41 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) 42 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ 43 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) 44 45 static bool bfa_nw_auto_recover = true; 46 47 /* 48 * forward declarations 49 */ 50 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); 51 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); 52 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); 53 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); 54 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); 55 static void bfa_ioc_send_enable(struct bfa_ioc *ioc); 56 static void bfa_ioc_send_disable(struct bfa_ioc *ioc); 57 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); 58 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); 59 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); 60 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); 61 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 62 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 63 static void bfa_ioc_recover(struct bfa_ioc *ioc); 64 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 65 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 66 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 67 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); 68 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 69 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 70 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 71 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 72 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 73 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 74 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, 75 enum bfi_fwboot_type boot_type, u32 boot_param); 76 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 77 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 78 char *serial_num); 79 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 80 char *fw_ver); 81 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, 82 char *chip_rev); 83 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, 84 char *optrom_ver); 85 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 86 char *manufacturer); 87 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 88 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 89 90 /* IOC state machine definitions/declarations */ 91 enum ioc_event { 92 IOC_E_RESET = 1, /*!< IOC reset request */ 93 IOC_E_ENABLE = 2, /*!< IOC enable request */ 94 IOC_E_DISABLE = 3, /*!< IOC disable request */ 95 IOC_E_DETACH = 4, /*!< driver detach cleanup */ 96 IOC_E_ENABLED = 5, /*!< f/w enabled */ 97 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 98 IOC_E_DISABLED = 7, /*!< f/w disabled */ 99 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ 100 IOC_E_HBFAIL = 9, /*!< heartbeat failure */ 101 IOC_E_HWERROR = 10, /*!< hardware error interrupt */ 102 IOC_E_TIMEOUT = 11, /*!< timeout */ 103 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ 104 }; 105 106 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 107 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 108 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 109 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 110 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 111 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); 112 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 113 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 114 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 115 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); 116 117 static struct bfa_sm_table ioc_sm_table[] = { 118 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 119 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 120 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 121 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 122 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 123 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, 124 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 125 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 126 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 127 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, 128 }; 129 130 /* 131 * Forward declareations for iocpf state machine 132 */ 133 static void bfa_iocpf_enable(struct bfa_ioc *ioc); 134 static void bfa_iocpf_disable(struct bfa_ioc *ioc); 135 static void bfa_iocpf_fail(struct bfa_ioc *ioc); 136 static void bfa_iocpf_initfail(struct bfa_ioc *ioc); 137 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 138 static void bfa_iocpf_stop(struct bfa_ioc *ioc); 139 140 /* IOCPF state machine events */ 141 enum iocpf_event { 142 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 143 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 144 IOCPF_E_STOP = 3, /*!< stop on driver detach */ 145 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ 146 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ 147 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ 148 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ 149 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ 150 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 151 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 152 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 153 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 154 }; 155 156 /* IOCPF states */ 157 enum bfa_iocpf_state { 158 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 159 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 160 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ 161 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ 162 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ 163 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ 164 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ 165 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ 166 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ 167 }; 168 169 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); 170 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); 171 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); 172 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); 173 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); 174 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); 175 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); 176 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, 177 enum iocpf_event); 178 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); 179 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); 180 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); 181 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); 182 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, 183 enum iocpf_event); 184 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); 185 186 static struct bfa_sm_table iocpf_sm_table[] = { 187 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, 188 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, 189 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, 190 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, 191 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 192 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 193 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 194 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, 195 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 196 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, 197 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 198 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 199 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, 200 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 201 }; 202 203 /* IOC State Machine */ 204 205 /* Beginning state. IOC uninit state. */ 206 static void 207 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 208 { 209 } 210 211 /* IOC is in uninit state. */ 212 static void 213 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 214 { 215 switch (event) { 216 case IOC_E_RESET: 217 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 218 break; 219 220 default: 221 bfa_sm_fault(event); 222 } 223 } 224 225 /* Reset entry actions -- initialize state machine */ 226 static void 227 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 228 { 229 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 230 } 231 232 /* IOC is in reset state. */ 233 static void 234 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 235 { 236 switch (event) { 237 case IOC_E_ENABLE: 238 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 239 break; 240 241 case IOC_E_DISABLE: 242 bfa_ioc_disable_comp(ioc); 243 break; 244 245 case IOC_E_DETACH: 246 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 247 break; 248 249 default: 250 bfa_sm_fault(event); 251 } 252 } 253 254 static void 255 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 256 { 257 bfa_iocpf_enable(ioc); 258 } 259 260 /* Host IOC function is being enabled, awaiting response from firmware. 261 * Semaphore is acquired. 262 */ 263 static void 264 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 265 { 266 switch (event) { 267 case IOC_E_ENABLED: 268 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 269 break; 270 271 case IOC_E_PFFAILED: 272 /* !!! fall through !!! */ 273 case IOC_E_HWERROR: 274 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 275 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 276 if (event != IOC_E_PFFAILED) 277 bfa_iocpf_initfail(ioc); 278 break; 279 280 case IOC_E_HWFAILED: 281 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 282 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 283 break; 284 285 case IOC_E_DISABLE: 286 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 287 break; 288 289 case IOC_E_DETACH: 290 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 291 bfa_iocpf_stop(ioc); 292 break; 293 294 case IOC_E_ENABLE: 295 break; 296 297 default: 298 bfa_sm_fault(event); 299 } 300 } 301 302 /* Semaphore should be acquired for version check. */ 303 static void 304 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 305 { 306 mod_timer(&ioc->ioc_timer, jiffies + 307 msecs_to_jiffies(BFA_IOC_TOV)); 308 bfa_ioc_send_getattr(ioc); 309 } 310 311 /* IOC configuration in progress. Timer is active. */ 312 static void 313 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 314 { 315 switch (event) { 316 case IOC_E_FWRSP_GETATTR: 317 del_timer(&ioc->ioc_timer); 318 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 319 break; 320 321 case IOC_E_PFFAILED: 322 case IOC_E_HWERROR: 323 del_timer(&ioc->ioc_timer); 324 /* fall through */ 325 case IOC_E_TIMEOUT: 326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 328 if (event != IOC_E_PFFAILED) 329 bfa_iocpf_getattrfail(ioc); 330 break; 331 332 case IOC_E_DISABLE: 333 del_timer(&ioc->ioc_timer); 334 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 335 break; 336 337 case IOC_E_ENABLE: 338 break; 339 340 default: 341 bfa_sm_fault(event); 342 } 343 } 344 345 static void 346 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 347 { 348 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 349 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 350 bfa_ioc_hb_monitor(ioc); 351 } 352 353 static void 354 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 355 { 356 switch (event) { 357 case IOC_E_ENABLE: 358 break; 359 360 case IOC_E_DISABLE: 361 bfa_ioc_hb_stop(ioc); 362 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 363 break; 364 365 case IOC_E_PFFAILED: 366 case IOC_E_HWERROR: 367 bfa_ioc_hb_stop(ioc); 368 /* !!! fall through !!! */ 369 case IOC_E_HBFAIL: 370 if (ioc->iocpf.auto_recover) 371 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 372 else 373 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 374 375 bfa_ioc_fail_notify(ioc); 376 377 if (event != IOC_E_PFFAILED) 378 bfa_iocpf_fail(ioc); 379 break; 380 381 default: 382 bfa_sm_fault(event); 383 } 384 } 385 386 static void 387 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 388 { 389 bfa_iocpf_disable(ioc); 390 } 391 392 /* IOC is being disabled */ 393 static void 394 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 395 { 396 switch (event) { 397 case IOC_E_DISABLED: 398 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 399 break; 400 401 case IOC_E_HWERROR: 402 /* 403 * No state change. Will move to disabled state 404 * after iocpf sm completes failure processing and 405 * moves to disabled state. 406 */ 407 bfa_iocpf_fail(ioc); 408 break; 409 410 case IOC_E_HWFAILED: 411 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 412 bfa_ioc_disable_comp(ioc); 413 break; 414 415 default: 416 bfa_sm_fault(event); 417 } 418 } 419 420 /* IOC disable completion entry. */ 421 static void 422 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 423 { 424 bfa_ioc_disable_comp(ioc); 425 } 426 427 static void 428 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 429 { 430 switch (event) { 431 case IOC_E_ENABLE: 432 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 433 break; 434 435 case IOC_E_DISABLE: 436 ioc->cbfn->disable_cbfn(ioc->bfa); 437 break; 438 439 case IOC_E_DETACH: 440 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 441 bfa_iocpf_stop(ioc); 442 break; 443 444 default: 445 bfa_sm_fault(event); 446 } 447 } 448 449 static void 450 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) 451 { 452 } 453 454 /* Hardware initialization retry. */ 455 static void 456 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 457 { 458 switch (event) { 459 case IOC_E_ENABLED: 460 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 461 break; 462 463 case IOC_E_PFFAILED: 464 case IOC_E_HWERROR: 465 /** 466 * Initialization retry failed. 467 */ 468 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 469 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 470 if (event != IOC_E_PFFAILED) 471 bfa_iocpf_initfail(ioc); 472 break; 473 474 case IOC_E_HWFAILED: 475 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 476 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 477 break; 478 479 case IOC_E_ENABLE: 480 break; 481 482 case IOC_E_DISABLE: 483 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 484 break; 485 486 case IOC_E_DETACH: 487 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 488 bfa_iocpf_stop(ioc); 489 break; 490 491 default: 492 bfa_sm_fault(event); 493 } 494 } 495 496 static void 497 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) 498 { 499 } 500 501 /* IOC failure. */ 502 static void 503 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 504 { 505 switch (event) { 506 case IOC_E_ENABLE: 507 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 508 break; 509 510 case IOC_E_DISABLE: 511 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 512 break; 513 514 case IOC_E_DETACH: 515 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 516 bfa_iocpf_stop(ioc); 517 break; 518 519 case IOC_E_HWERROR: 520 /* HB failure notification, ignore. */ 521 break; 522 523 default: 524 bfa_sm_fault(event); 525 } 526 } 527 528 static void 529 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) 530 { 531 } 532 533 /* IOC failure. */ 534 static void 535 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 536 { 537 switch (event) { 538 539 case IOC_E_ENABLE: 540 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 541 break; 542 543 case IOC_E_DISABLE: 544 ioc->cbfn->disable_cbfn(ioc->bfa); 545 break; 546 547 case IOC_E_DETACH: 548 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 549 break; 550 551 default: 552 bfa_sm_fault(event); 553 } 554 } 555 556 /* IOCPF State Machine */ 557 558 /* Reset entry actions -- initialize state machine */ 559 static void 560 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 561 { 562 iocpf->fw_mismatch_notified = false; 563 iocpf->auto_recover = bfa_nw_auto_recover; 564 } 565 566 /* Beginning state. IOC is in reset state. */ 567 static void 568 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 569 { 570 switch (event) { 571 case IOCPF_E_ENABLE: 572 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 573 break; 574 575 case IOCPF_E_STOP: 576 break; 577 578 default: 579 bfa_sm_fault(event); 580 } 581 } 582 583 /* Semaphore should be acquired for version check. */ 584 static void 585 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 586 { 587 bfa_ioc_hw_sem_init(iocpf->ioc); 588 bfa_ioc_hw_sem_get(iocpf->ioc); 589 } 590 591 /* Awaiting h/w semaphore to continue with version check. */ 592 static void 593 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 594 { 595 struct bfa_ioc *ioc = iocpf->ioc; 596 597 switch (event) { 598 case IOCPF_E_SEMLOCKED: 599 if (bfa_ioc_firmware_lock(ioc)) { 600 if (bfa_ioc_sync_start(ioc)) { 601 bfa_ioc_sync_join(ioc); 602 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 603 } else { 604 bfa_ioc_firmware_unlock(ioc); 605 bfa_nw_ioc_hw_sem_release(ioc); 606 mod_timer(&ioc->sem_timer, jiffies + 607 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 608 } 609 } else { 610 bfa_nw_ioc_hw_sem_release(ioc); 611 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 612 } 613 break; 614 615 case IOCPF_E_SEM_ERROR: 616 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 617 bfa_ioc_pf_hwfailed(ioc); 618 break; 619 620 case IOCPF_E_DISABLE: 621 bfa_ioc_hw_sem_get_cancel(ioc); 622 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 623 bfa_ioc_pf_disabled(ioc); 624 break; 625 626 case IOCPF_E_STOP: 627 bfa_ioc_hw_sem_get_cancel(ioc); 628 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 629 break; 630 631 default: 632 bfa_sm_fault(event); 633 } 634 } 635 636 /* Notify enable completion callback */ 637 static void 638 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 639 { 640 /* Call only the first time sm enters fwmismatch state. */ 641 if (!iocpf->fw_mismatch_notified) 642 bfa_ioc_pf_fwmismatch(iocpf->ioc); 643 644 iocpf->fw_mismatch_notified = true; 645 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 646 msecs_to_jiffies(BFA_IOC_TOV)); 647 } 648 649 /* Awaiting firmware version match. */ 650 static void 651 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 652 { 653 struct bfa_ioc *ioc = iocpf->ioc; 654 655 switch (event) { 656 case IOCPF_E_TIMEOUT: 657 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 658 break; 659 660 case IOCPF_E_DISABLE: 661 del_timer(&ioc->iocpf_timer); 662 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 663 bfa_ioc_pf_disabled(ioc); 664 break; 665 666 case IOCPF_E_STOP: 667 del_timer(&ioc->iocpf_timer); 668 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 669 break; 670 671 default: 672 bfa_sm_fault(event); 673 } 674 } 675 676 /* Request for semaphore. */ 677 static void 678 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 679 { 680 bfa_ioc_hw_sem_get(iocpf->ioc); 681 } 682 683 /* Awaiting semaphore for h/w initialzation. */ 684 static void 685 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 686 { 687 struct bfa_ioc *ioc = iocpf->ioc; 688 689 switch (event) { 690 case IOCPF_E_SEMLOCKED: 691 if (bfa_ioc_sync_complete(ioc)) { 692 bfa_ioc_sync_join(ioc); 693 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 694 } else { 695 bfa_nw_ioc_hw_sem_release(ioc); 696 mod_timer(&ioc->sem_timer, jiffies + 697 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 698 } 699 break; 700 701 case IOCPF_E_SEM_ERROR: 702 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 703 bfa_ioc_pf_hwfailed(ioc); 704 break; 705 706 case IOCPF_E_DISABLE: 707 bfa_ioc_hw_sem_get_cancel(ioc); 708 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 709 break; 710 711 default: 712 bfa_sm_fault(event); 713 } 714 } 715 716 static void 717 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 718 { 719 iocpf->poll_time = 0; 720 bfa_ioc_reset(iocpf->ioc, false); 721 } 722 723 /* Hardware is being initialized. Interrupts are enabled. 724 * Holding hardware semaphore lock. 725 */ 726 static void 727 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) 728 { 729 struct bfa_ioc *ioc = iocpf->ioc; 730 731 switch (event) { 732 case IOCPF_E_FWREADY: 733 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 734 break; 735 736 case IOCPF_E_TIMEOUT: 737 bfa_nw_ioc_hw_sem_release(ioc); 738 bfa_ioc_pf_failed(ioc); 739 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 740 break; 741 742 case IOCPF_E_DISABLE: 743 del_timer(&ioc->iocpf_timer); 744 bfa_ioc_sync_leave(ioc); 745 bfa_nw_ioc_hw_sem_release(ioc); 746 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 747 break; 748 749 default: 750 bfa_sm_fault(event); 751 } 752 } 753 754 static void 755 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) 756 { 757 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 758 msecs_to_jiffies(BFA_IOC_TOV)); 759 /** 760 * Enable Interrupts before sending fw IOC ENABLE cmd. 761 */ 762 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); 763 bfa_ioc_send_enable(iocpf->ioc); 764 } 765 766 /* Host IOC function is being enabled, awaiting response from firmware. 767 * Semaphore is acquired. 768 */ 769 static void 770 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 771 { 772 struct bfa_ioc *ioc = iocpf->ioc; 773 774 switch (event) { 775 case IOCPF_E_FWRSP_ENABLE: 776 del_timer(&ioc->iocpf_timer); 777 bfa_nw_ioc_hw_sem_release(ioc); 778 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 779 break; 780 781 case IOCPF_E_INITFAIL: 782 del_timer(&ioc->iocpf_timer); 783 /* fall through */ 784 785 case IOCPF_E_TIMEOUT: 786 bfa_nw_ioc_hw_sem_release(ioc); 787 if (event == IOCPF_E_TIMEOUT) 788 bfa_ioc_pf_failed(ioc); 789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 790 break; 791 792 case IOCPF_E_DISABLE: 793 del_timer(&ioc->iocpf_timer); 794 bfa_nw_ioc_hw_sem_release(ioc); 795 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 796 break; 797 798 default: 799 bfa_sm_fault(event); 800 } 801 } 802 803 static void 804 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 805 { 806 bfa_ioc_pf_enabled(iocpf->ioc); 807 } 808 809 static void 810 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 811 { 812 switch (event) { 813 case IOCPF_E_DISABLE: 814 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 815 break; 816 817 case IOCPF_E_GETATTRFAIL: 818 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 819 break; 820 821 case IOCPF_E_FAIL: 822 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 823 break; 824 825 default: 826 bfa_sm_fault(event); 827 } 828 } 829 830 static void 831 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) 832 { 833 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 834 msecs_to_jiffies(BFA_IOC_TOV)); 835 bfa_ioc_send_disable(iocpf->ioc); 836 } 837 838 /* IOC is being disabled */ 839 static void 840 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 841 { 842 struct bfa_ioc *ioc = iocpf->ioc; 843 844 switch (event) { 845 case IOCPF_E_FWRSP_DISABLE: 846 del_timer(&ioc->iocpf_timer); 847 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 848 break; 849 850 case IOCPF_E_FAIL: 851 del_timer(&ioc->iocpf_timer); 852 /* fall through*/ 853 854 case IOCPF_E_TIMEOUT: 855 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 857 break; 858 859 case IOCPF_E_FWRSP_ENABLE: 860 break; 861 862 default: 863 bfa_sm_fault(event); 864 } 865 } 866 867 static void 868 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) 869 { 870 bfa_ioc_hw_sem_get(iocpf->ioc); 871 } 872 873 /* IOC hb ack request is being removed. */ 874 static void 875 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 876 { 877 struct bfa_ioc *ioc = iocpf->ioc; 878 879 switch (event) { 880 case IOCPF_E_SEMLOCKED: 881 bfa_ioc_sync_leave(ioc); 882 bfa_nw_ioc_hw_sem_release(ioc); 883 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 884 break; 885 886 case IOCPF_E_SEM_ERROR: 887 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 888 bfa_ioc_pf_hwfailed(ioc); 889 break; 890 891 case IOCPF_E_FAIL: 892 break; 893 894 default: 895 bfa_sm_fault(event); 896 } 897 } 898 899 /* IOC disable completion entry. */ 900 static void 901 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 902 { 903 bfa_ioc_mbox_flush(iocpf->ioc); 904 bfa_ioc_pf_disabled(iocpf->ioc); 905 } 906 907 static void 908 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) 909 { 910 struct bfa_ioc *ioc = iocpf->ioc; 911 912 switch (event) { 913 case IOCPF_E_ENABLE: 914 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 915 break; 916 917 case IOCPF_E_STOP: 918 bfa_ioc_firmware_unlock(ioc); 919 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 920 break; 921 922 default: 923 bfa_sm_fault(event); 924 } 925 } 926 927 static void 928 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) 929 { 930 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); 931 bfa_ioc_hw_sem_get(iocpf->ioc); 932 } 933 934 /* Hardware initialization failed. */ 935 static void 936 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 937 { 938 struct bfa_ioc *ioc = iocpf->ioc; 939 940 switch (event) { 941 case IOCPF_E_SEMLOCKED: 942 bfa_ioc_notify_fail(ioc); 943 bfa_ioc_sync_leave(ioc); 944 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 945 bfa_nw_ioc_hw_sem_release(ioc); 946 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 947 break; 948 949 case IOCPF_E_SEM_ERROR: 950 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 951 bfa_ioc_pf_hwfailed(ioc); 952 break; 953 954 case IOCPF_E_DISABLE: 955 bfa_ioc_hw_sem_get_cancel(ioc); 956 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 957 break; 958 959 case IOCPF_E_STOP: 960 bfa_ioc_hw_sem_get_cancel(ioc); 961 bfa_ioc_firmware_unlock(ioc); 962 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 963 break; 964 965 case IOCPF_E_FAIL: 966 break; 967 968 default: 969 bfa_sm_fault(event); 970 } 971 } 972 973 static void 974 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 975 { 976 } 977 978 /* Hardware initialization failed. */ 979 static void 980 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 981 { 982 struct bfa_ioc *ioc = iocpf->ioc; 983 984 switch (event) { 985 case IOCPF_E_DISABLE: 986 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 987 break; 988 989 case IOCPF_E_STOP: 990 bfa_ioc_firmware_unlock(ioc); 991 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 992 break; 993 994 default: 995 bfa_sm_fault(event); 996 } 997 } 998 999 static void 1000 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) 1001 { 1002 /** 1003 * Mark IOC as failed in hardware and stop firmware. 1004 */ 1005 bfa_ioc_lpu_stop(iocpf->ioc); 1006 1007 /** 1008 * Flush any queued up mailbox requests. 1009 */ 1010 bfa_ioc_mbox_flush(iocpf->ioc); 1011 bfa_ioc_hw_sem_get(iocpf->ioc); 1012 } 1013 1014 /* IOC is in failed state. */ 1015 static void 1016 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1017 { 1018 struct bfa_ioc *ioc = iocpf->ioc; 1019 1020 switch (event) { 1021 case IOCPF_E_SEMLOCKED: 1022 bfa_ioc_sync_ack(ioc); 1023 bfa_ioc_notify_fail(ioc); 1024 if (!iocpf->auto_recover) { 1025 bfa_ioc_sync_leave(ioc); 1026 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1027 bfa_nw_ioc_hw_sem_release(ioc); 1028 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1029 } else { 1030 if (bfa_ioc_sync_complete(ioc)) 1031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1032 else { 1033 bfa_nw_ioc_hw_sem_release(ioc); 1034 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1035 } 1036 } 1037 break; 1038 1039 case IOCPF_E_SEM_ERROR: 1040 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1041 bfa_ioc_pf_hwfailed(ioc); 1042 break; 1043 1044 case IOCPF_E_DISABLE: 1045 bfa_ioc_hw_sem_get_cancel(ioc); 1046 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1047 break; 1048 1049 case IOCPF_E_FAIL: 1050 break; 1051 1052 default: 1053 bfa_sm_fault(event); 1054 } 1055 } 1056 1057 static void 1058 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) 1059 { 1060 } 1061 1062 /* IOC is in failed state. */ 1063 static void 1064 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1065 { 1066 switch (event) { 1067 case IOCPF_E_DISABLE: 1068 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1069 break; 1070 1071 default: 1072 bfa_sm_fault(event); 1073 } 1074 } 1075 1076 /* BFA IOC private functions */ 1077 1078 /* Notify common modules registered for notification. */ 1079 static void 1080 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1081 { 1082 struct bfa_ioc_notify *notify; 1083 1084 list_for_each_entry(notify, &ioc->notify_q, qe) 1085 notify->cbfn(notify->cbarg, event); 1086 } 1087 1088 static void 1089 bfa_ioc_disable_comp(struct bfa_ioc *ioc) 1090 { 1091 ioc->cbfn->disable_cbfn(ioc->bfa); 1092 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); 1093 } 1094 1095 bool 1096 bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1097 { 1098 u32 r32; 1099 int cnt = 0; 1100 #define BFA_SEM_SPINCNT 3000 1101 1102 r32 = readl(sem_reg); 1103 1104 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { 1105 cnt++; 1106 udelay(2); 1107 r32 = readl(sem_reg); 1108 } 1109 1110 if (!(r32 & 1)) 1111 return true; 1112 1113 return false; 1114 } 1115 1116 void 1117 bfa_nw_ioc_sem_release(void __iomem *sem_reg) 1118 { 1119 readl(sem_reg); 1120 writel(1, sem_reg); 1121 } 1122 1123 /* Clear fwver hdr */ 1124 static void 1125 bfa_ioc_fwver_clear(struct bfa_ioc *ioc) 1126 { 1127 u32 pgnum, pgoff, loff = 0; 1128 int i; 1129 1130 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); 1131 pgoff = PSS_SMEM_PGOFF(loff); 1132 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1133 1134 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { 1135 writel(0, ioc->ioc_regs.smem_page_start + loff); 1136 loff += sizeof(u32); 1137 } 1138 } 1139 1140 1141 static void 1142 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1143 { 1144 struct bfi_ioc_image_hdr fwhdr; 1145 u32 fwstate, r32; 1146 1147 /* Spin on init semaphore to serialize. */ 1148 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1149 while (r32 & 0x1) { 1150 udelay(20); 1151 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1152 } 1153 1154 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1155 if (fwstate == BFI_IOC_UNINIT) { 1156 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1157 return; 1158 } 1159 1160 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1161 1162 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { 1163 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1164 return; 1165 } 1166 1167 bfa_ioc_fwver_clear(ioc); 1168 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1169 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1170 1171 /* 1172 * Try to lock and then unlock the semaphore. 1173 */ 1174 readl(ioc->ioc_regs.ioc_sem_reg); 1175 writel(1, ioc->ioc_regs.ioc_sem_reg); 1176 1177 /* Unlock init semaphore */ 1178 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1179 } 1180 1181 static void 1182 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) 1183 { 1184 u32 r32; 1185 1186 /** 1187 * First read to the semaphore register will return 0, subsequent reads 1188 * will return 1. Semaphore is released by writing 1 to the register 1189 */ 1190 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1191 if (r32 == ~0) { 1192 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); 1193 return; 1194 } 1195 if (!(r32 & 1)) { 1196 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1197 return; 1198 } 1199 1200 mod_timer(&ioc->sem_timer, jiffies + 1201 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 1202 } 1203 1204 void 1205 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) 1206 { 1207 writel(1, ioc->ioc_regs.ioc_sem_reg); 1208 } 1209 1210 static void 1211 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1212 { 1213 del_timer(&ioc->sem_timer); 1214 } 1215 1216 /* Initialize LPU local memory (aka secondary memory / SRAM) */ 1217 static void 1218 bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1219 { 1220 u32 pss_ctl; 1221 int i; 1222 #define PSS_LMEM_INIT_TIME 10000 1223 1224 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1225 pss_ctl &= ~__PSS_LMEM_RESET; 1226 pss_ctl |= __PSS_LMEM_INIT_EN; 1227 1228 /* 1229 * i2c workaround 12.5khz clock 1230 */ 1231 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1232 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1233 1234 /** 1235 * wait for memory initialization to be complete 1236 */ 1237 i = 0; 1238 do { 1239 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1240 i++; 1241 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1242 1243 /** 1244 * If memory initialization is not successful, IOC timeout will catch 1245 * such failures. 1246 */ 1247 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); 1248 1249 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1250 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1251 } 1252 1253 static void 1254 bfa_ioc_lpu_start(struct bfa_ioc *ioc) 1255 { 1256 u32 pss_ctl; 1257 1258 /** 1259 * Take processor out of reset. 1260 */ 1261 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1262 pss_ctl &= ~__PSS_LPU0_RESET; 1263 1264 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1265 } 1266 1267 static void 1268 bfa_ioc_lpu_stop(struct bfa_ioc *ioc) 1269 { 1270 u32 pss_ctl; 1271 1272 /** 1273 * Put processors in reset. 1274 */ 1275 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1276 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1277 1278 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1279 } 1280 1281 /* Get driver and firmware versions. */ 1282 void 1283 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1284 { 1285 u32 pgnum; 1286 u32 loff = 0; 1287 int i; 1288 u32 *fwsig = (u32 *) fwhdr; 1289 1290 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1291 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1292 1293 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); 1294 i++) { 1295 fwsig[i] = 1296 swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 1297 loff += sizeof(u32); 1298 } 1299 } 1300 1301 static bool 1302 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, 1303 struct bfi_ioc_image_hdr *fwhdr_2) 1304 { 1305 int i; 1306 1307 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1308 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) 1309 return false; 1310 } 1311 1312 return true; 1313 } 1314 1315 /* Returns TRUE if major minor and maintenance are same. 1316 * If patch version are same, check for MD5 Checksum to be same. 1317 */ 1318 static bool 1319 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, 1320 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1321 { 1322 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) 1323 return false; 1324 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) 1325 return false; 1326 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) 1327 return false; 1328 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) 1329 return false; 1330 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && 1331 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && 1332 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) 1333 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); 1334 1335 return true; 1336 } 1337 1338 static bool 1339 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) 1340 { 1341 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) 1342 return false; 1343 1344 return true; 1345 } 1346 1347 static bool 1348 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) 1349 { 1350 if (fwhdr->fwver.phase == 0 && 1351 fwhdr->fwver.build == 0) 1352 return false; 1353 1354 return true; 1355 } 1356 1357 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ 1358 static enum bfi_ioc_img_ver_cmp 1359 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, 1360 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1361 { 1362 if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp)) 1363 return BFI_IOC_IMG_VER_INCOMP; 1364 1365 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) 1366 return BFI_IOC_IMG_VER_BETTER; 1367 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) 1368 return BFI_IOC_IMG_VER_OLD; 1369 1370 /* GA takes priority over internal builds of the same patch stream. 1371 * At this point major minor maint and patch numbers are same. 1372 */ 1373 if (fwhdr_is_ga(base_fwhdr)) 1374 if (fwhdr_is_ga(fwhdr_to_cmp)) 1375 return BFI_IOC_IMG_VER_SAME; 1376 else 1377 return BFI_IOC_IMG_VER_OLD; 1378 else 1379 if (fwhdr_is_ga(fwhdr_to_cmp)) 1380 return BFI_IOC_IMG_VER_BETTER; 1381 1382 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) 1383 return BFI_IOC_IMG_VER_BETTER; 1384 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) 1385 return BFI_IOC_IMG_VER_OLD; 1386 1387 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) 1388 return BFI_IOC_IMG_VER_BETTER; 1389 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) 1390 return BFI_IOC_IMG_VER_OLD; 1391 1392 /* All Version Numbers are equal. 1393 * Md5 check to be done as a part of compatibility check. 1394 */ 1395 return BFI_IOC_IMG_VER_SAME; 1396 } 1397 1398 /* register definitions */ 1399 #define FLI_CMD_REG 0x0001d000 1400 #define FLI_WRDATA_REG 0x0001d00c 1401 #define FLI_RDDATA_REG 0x0001d010 1402 #define FLI_ADDR_REG 0x0001d004 1403 #define FLI_DEV_STATUS_REG 0x0001d014 1404 1405 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ 1406 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ 1407 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ 1408 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ 1409 1410 #define NFC_STATE_RUNNING 0x20000001 1411 #define NFC_STATE_PAUSED 0x00004560 1412 #define NFC_VER_VALID 0x147 1413 1414 enum bfa_flash_cmd { 1415 BFA_FLASH_FAST_READ = 0x0b, /* fast read */ 1416 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ 1417 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ 1418 BFA_FLASH_WRITE = 0x02, /* write */ 1419 BFA_FLASH_READ_STATUS = 0x05, /* read status */ 1420 }; 1421 1422 /* hardware error definition */ 1423 enum bfa_flash_err { 1424 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ 1425 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ 1426 BFA_FLASH_BAD = -3, /*!< flash bad */ 1427 BFA_FLASH_BUSY = -4, /*!< flash busy */ 1428 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ 1429 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ 1430 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ 1431 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ 1432 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ 1433 }; 1434 1435 /* flash command register data structure */ 1436 union bfa_flash_cmd_reg { 1437 struct { 1438 #ifdef __BIG_ENDIAN 1439 u32 act:1; 1440 u32 rsv:1; 1441 u32 write_cnt:9; 1442 u32 read_cnt:9; 1443 u32 addr_cnt:4; 1444 u32 cmd:8; 1445 #else 1446 u32 cmd:8; 1447 u32 addr_cnt:4; 1448 u32 read_cnt:9; 1449 u32 write_cnt:9; 1450 u32 rsv:1; 1451 u32 act:1; 1452 #endif 1453 } r; 1454 u32 i; 1455 }; 1456 1457 /* flash device status register data structure */ 1458 union bfa_flash_dev_status_reg { 1459 struct { 1460 #ifdef __BIG_ENDIAN 1461 u32 rsv:21; 1462 u32 fifo_cnt:6; 1463 u32 busy:1; 1464 u32 init_status:1; 1465 u32 present:1; 1466 u32 bad:1; 1467 u32 good:1; 1468 #else 1469 u32 good:1; 1470 u32 bad:1; 1471 u32 present:1; 1472 u32 init_status:1; 1473 u32 busy:1; 1474 u32 fifo_cnt:6; 1475 u32 rsv:21; 1476 #endif 1477 } r; 1478 u32 i; 1479 }; 1480 1481 /* flash address register data structure */ 1482 union bfa_flash_addr_reg { 1483 struct { 1484 #ifdef __BIG_ENDIAN 1485 u32 addr:24; 1486 u32 dummy:8; 1487 #else 1488 u32 dummy:8; 1489 u32 addr:24; 1490 #endif 1491 } r; 1492 u32 i; 1493 }; 1494 1495 /* Flash raw private functions */ 1496 static void 1497 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, 1498 u8 rd_cnt, u8 ad_cnt, u8 op) 1499 { 1500 union bfa_flash_cmd_reg cmd; 1501 1502 cmd.i = 0; 1503 cmd.r.act = 1; 1504 cmd.r.write_cnt = wr_cnt; 1505 cmd.r.read_cnt = rd_cnt; 1506 cmd.r.addr_cnt = ad_cnt; 1507 cmd.r.cmd = op; 1508 writel(cmd.i, (pci_bar + FLI_CMD_REG)); 1509 } 1510 1511 static void 1512 bfa_flash_set_addr(void __iomem *pci_bar, u32 address) 1513 { 1514 union bfa_flash_addr_reg addr; 1515 1516 addr.r.addr = address & 0x00ffffff; 1517 addr.r.dummy = 0; 1518 writel(addr.i, (pci_bar + FLI_ADDR_REG)); 1519 } 1520 1521 static int 1522 bfa_flash_cmd_act_check(void __iomem *pci_bar) 1523 { 1524 union bfa_flash_cmd_reg cmd; 1525 1526 cmd.i = readl(pci_bar + FLI_CMD_REG); 1527 1528 if (cmd.r.act) 1529 return BFA_FLASH_ERR_CMD_ACT; 1530 1531 return 0; 1532 } 1533 1534 /* Flush FLI data fifo. */ 1535 static int 1536 bfa_flash_fifo_flush(void __iomem *pci_bar) 1537 { 1538 u32 i; 1539 u32 t; 1540 union bfa_flash_dev_status_reg dev_status; 1541 1542 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1543 1544 if (!dev_status.r.fifo_cnt) 1545 return 0; 1546 1547 /* fifo counter in terms of words */ 1548 for (i = 0; i < dev_status.r.fifo_cnt; i++) 1549 t = readl(pci_bar + FLI_RDDATA_REG); 1550 1551 /* Check the device status. It may take some time. */ 1552 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1553 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1554 if (!dev_status.r.fifo_cnt) 1555 break; 1556 } 1557 1558 if (dev_status.r.fifo_cnt) 1559 return BFA_FLASH_ERR_FIFO_CNT; 1560 1561 return 0; 1562 } 1563 1564 /* Read flash status. */ 1565 static int 1566 bfa_flash_status_read(void __iomem *pci_bar) 1567 { 1568 union bfa_flash_dev_status_reg dev_status; 1569 int status; 1570 u32 ret_status; 1571 int i; 1572 1573 status = bfa_flash_fifo_flush(pci_bar); 1574 if (status < 0) 1575 return status; 1576 1577 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); 1578 1579 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1580 status = bfa_flash_cmd_act_check(pci_bar); 1581 if (!status) 1582 break; 1583 } 1584 1585 if (status) 1586 return status; 1587 1588 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1589 if (!dev_status.r.fifo_cnt) 1590 return BFA_FLASH_BUSY; 1591 1592 ret_status = readl(pci_bar + FLI_RDDATA_REG); 1593 ret_status >>= 24; 1594 1595 status = bfa_flash_fifo_flush(pci_bar); 1596 if (status < 0) 1597 return status; 1598 1599 return ret_status; 1600 } 1601 1602 /* Start flash read operation. */ 1603 static int 1604 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1605 char *buf) 1606 { 1607 int status; 1608 1609 /* len must be mutiple of 4 and not exceeding fifo size */ 1610 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1611 return BFA_FLASH_ERR_LEN; 1612 1613 /* check status */ 1614 status = bfa_flash_status_read(pci_bar); 1615 if (status == BFA_FLASH_BUSY) 1616 status = bfa_flash_status_read(pci_bar); 1617 1618 if (status < 0) 1619 return status; 1620 1621 /* check if write-in-progress bit is cleared */ 1622 if (status & BFA_FLASH_WIP_MASK) 1623 return BFA_FLASH_ERR_WIP; 1624 1625 bfa_flash_set_addr(pci_bar, offset); 1626 1627 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); 1628 1629 return 0; 1630 } 1631 1632 /* Check flash read operation. */ 1633 static u32 1634 bfa_flash_read_check(void __iomem *pci_bar) 1635 { 1636 if (bfa_flash_cmd_act_check(pci_bar)) 1637 return 1; 1638 1639 return 0; 1640 } 1641 1642 /* End flash read operation. */ 1643 static void 1644 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) 1645 { 1646 u32 i; 1647 1648 /* read data fifo up to 32 words */ 1649 for (i = 0; i < len; i += 4) { 1650 u32 w = readl(pci_bar + FLI_RDDATA_REG); 1651 *((u32 *)(buf + i)) = swab32(w); 1652 } 1653 1654 bfa_flash_fifo_flush(pci_bar); 1655 } 1656 1657 /* Perform flash raw read. */ 1658 1659 #define FLASH_BLOCKING_OP_MAX 500 1660 #define FLASH_SEM_LOCK_REG 0x18820 1661 1662 static int 1663 bfa_raw_sem_get(void __iomem *bar) 1664 { 1665 int locked; 1666 1667 locked = readl(bar + FLASH_SEM_LOCK_REG); 1668 1669 return !locked; 1670 } 1671 1672 static enum bfa_status 1673 bfa_flash_sem_get(void __iomem *bar) 1674 { 1675 u32 n = FLASH_BLOCKING_OP_MAX; 1676 1677 while (!bfa_raw_sem_get(bar)) { 1678 if (--n <= 0) 1679 return BFA_STATUS_BADFLASH; 1680 mdelay(10); 1681 } 1682 return BFA_STATUS_OK; 1683 } 1684 1685 static void 1686 bfa_flash_sem_put(void __iomem *bar) 1687 { 1688 writel(0, (bar + FLASH_SEM_LOCK_REG)); 1689 } 1690 1691 static enum bfa_status 1692 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1693 u32 len) 1694 { 1695 u32 n; 1696 int status; 1697 u32 off, l, s, residue, fifo_sz; 1698 1699 residue = len; 1700 off = 0; 1701 fifo_sz = BFA_FLASH_FIFO_SIZE; 1702 status = bfa_flash_sem_get(pci_bar); 1703 if (status != BFA_STATUS_OK) 1704 return status; 1705 1706 while (residue) { 1707 s = offset + off; 1708 n = s / fifo_sz; 1709 l = (n + 1) * fifo_sz - s; 1710 if (l > residue) 1711 l = residue; 1712 1713 status = bfa_flash_read_start(pci_bar, offset + off, l, 1714 &buf[off]); 1715 if (status < 0) { 1716 bfa_flash_sem_put(pci_bar); 1717 return BFA_STATUS_FAILED; 1718 } 1719 1720 n = BFA_FLASH_BLOCKING_OP_MAX; 1721 while (bfa_flash_read_check(pci_bar)) { 1722 if (--n <= 0) { 1723 bfa_flash_sem_put(pci_bar); 1724 return BFA_STATUS_FAILED; 1725 } 1726 } 1727 1728 bfa_flash_read_end(pci_bar, l, &buf[off]); 1729 1730 residue -= l; 1731 off += l; 1732 } 1733 bfa_flash_sem_put(pci_bar); 1734 1735 return BFA_STATUS_OK; 1736 } 1737 1738 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ 1739 1740 static enum bfa_status 1741 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, 1742 u32 *fwimg) 1743 { 1744 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, 1745 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), 1746 (char *)fwimg, BFI_FLASH_CHUNK_SZ); 1747 } 1748 1749 static enum bfi_ioc_img_ver_cmp 1750 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, 1751 struct bfi_ioc_image_hdr *base_fwhdr) 1752 { 1753 struct bfi_ioc_image_hdr *flash_fwhdr; 1754 enum bfa_status status; 1755 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; 1756 1757 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); 1758 if (status != BFA_STATUS_OK) 1759 return BFI_IOC_IMG_VER_INCOMP; 1760 1761 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; 1762 if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) 1763 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); 1764 else 1765 return BFI_IOC_IMG_VER_INCOMP; 1766 } 1767 1768 /** 1769 * Returns TRUE if driver is willing to work with current smem f/w version. 1770 */ 1771 bool 1772 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1773 { 1774 struct bfi_ioc_image_hdr *drv_fwhdr; 1775 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; 1776 1777 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1778 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1779 1780 /* If smem is incompatible or old, driver should not work with it. */ 1781 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); 1782 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || 1783 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { 1784 return false; 1785 } 1786 1787 /* IF Flash has a better F/W than smem do not work with smem. 1788 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. 1789 * If Flash is old or incomp work with smem iff smem f/w == drv f/w. 1790 */ 1791 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); 1792 1793 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) 1794 return false; 1795 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) 1796 return true; 1797 else 1798 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? 1799 true : false; 1800 } 1801 1802 /* Return true if current running version is valid. Firmware signature and 1803 * execution context (driver/bios) must match. 1804 */ 1805 static bool 1806 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1807 { 1808 struct bfi_ioc_image_hdr fwhdr; 1809 1810 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1811 if (swab32(fwhdr.bootenv) != boot_env) 1812 return false; 1813 1814 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1815 } 1816 1817 /* Conditionally flush any pending message from firmware at start. */ 1818 static void 1819 bfa_ioc_msgflush(struct bfa_ioc *ioc) 1820 { 1821 u32 r32; 1822 1823 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 1824 if (r32) 1825 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1826 } 1827 1828 static void 1829 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1830 { 1831 enum bfi_ioc_state ioc_fwstate; 1832 bool fwvalid; 1833 u32 boot_env; 1834 1835 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1836 1837 if (force) 1838 ioc_fwstate = BFI_IOC_UNINIT; 1839 1840 boot_env = BFI_FWBOOT_ENV_OS; 1841 1842 /** 1843 * check if firmware is valid 1844 */ 1845 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1846 false : bfa_ioc_fwver_valid(ioc, boot_env); 1847 1848 if (!fwvalid) { 1849 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1850 BFA_STATUS_OK) 1851 bfa_ioc_poll_fwinit(ioc); 1852 1853 return; 1854 } 1855 1856 /** 1857 * If hardware initialization is in progress (initialized by other IOC), 1858 * just wait for an initialization completion interrupt. 1859 */ 1860 if (ioc_fwstate == BFI_IOC_INITING) { 1861 bfa_ioc_poll_fwinit(ioc); 1862 return; 1863 } 1864 1865 /** 1866 * If IOC function is disabled and firmware version is same, 1867 * just re-enable IOC. 1868 */ 1869 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 1870 /** 1871 * When using MSI-X any pending firmware ready event should 1872 * be flushed. Otherwise MSI-X interrupts are not delivered. 1873 */ 1874 bfa_ioc_msgflush(ioc); 1875 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1876 return; 1877 } 1878 1879 /** 1880 * Initialize the h/w for any other states. 1881 */ 1882 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1883 BFA_STATUS_OK) 1884 bfa_ioc_poll_fwinit(ioc); 1885 } 1886 1887 void 1888 bfa_nw_ioc_timeout(struct bfa_ioc *ioc) 1889 { 1890 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1891 } 1892 1893 static void 1894 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 1895 { 1896 u32 *msgp = (u32 *) ioc_msg; 1897 u32 i; 1898 1899 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); 1900 1901 /* 1902 * first write msg to mailbox registers 1903 */ 1904 for (i = 0; i < len / sizeof(u32); i++) 1905 writel(cpu_to_le32(msgp[i]), 1906 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1907 1908 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1909 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1910 1911 /* 1912 * write 1 to mailbox CMD to trigger LPU event 1913 */ 1914 writel(1, ioc->ioc_regs.hfn_mbox_cmd); 1915 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); 1916 } 1917 1918 static void 1919 bfa_ioc_send_enable(struct bfa_ioc *ioc) 1920 { 1921 struct bfi_ioc_ctrl_req enable_req; 1922 1923 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1924 bfa_ioc_portid(ioc)); 1925 enable_req.clscode = htons(ioc->clscode); 1926 enable_req.rsvd = htons(0); 1927 /* overflow in 2106 */ 1928 enable_req.tv_sec = ntohl(ktime_get_real_seconds()); 1929 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1930 } 1931 1932 static void 1933 bfa_ioc_send_disable(struct bfa_ioc *ioc) 1934 { 1935 struct bfi_ioc_ctrl_req disable_req; 1936 1937 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1938 bfa_ioc_portid(ioc)); 1939 disable_req.clscode = htons(ioc->clscode); 1940 disable_req.rsvd = htons(0); 1941 /* overflow in 2106 */ 1942 disable_req.tv_sec = ntohl(ktime_get_real_seconds()); 1943 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1944 } 1945 1946 static void 1947 bfa_ioc_send_getattr(struct bfa_ioc *ioc) 1948 { 1949 struct bfi_ioc_getattr_req attr_req; 1950 1951 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1952 bfa_ioc_portid(ioc)); 1953 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); 1954 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); 1955 } 1956 1957 void 1958 bfa_nw_ioc_hb_check(struct bfa_ioc *ioc) 1959 { 1960 u32 hb_count; 1961 1962 hb_count = readl(ioc->ioc_regs.heartbeat); 1963 if (ioc->hb_count == hb_count) { 1964 bfa_ioc_recover(ioc); 1965 return; 1966 } else { 1967 ioc->hb_count = hb_count; 1968 } 1969 1970 bfa_ioc_mbox_poll(ioc); 1971 mod_timer(&ioc->hb_timer, jiffies + 1972 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1973 } 1974 1975 static void 1976 bfa_ioc_hb_monitor(struct bfa_ioc *ioc) 1977 { 1978 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); 1979 mod_timer(&ioc->hb_timer, jiffies + 1980 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1981 } 1982 1983 static void 1984 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 1985 { 1986 del_timer(&ioc->hb_timer); 1987 } 1988 1989 /* Initiate a full firmware download. */ 1990 static enum bfa_status 1991 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1992 u32 boot_env) 1993 { 1994 u32 *fwimg; 1995 u32 pgnum; 1996 u32 loff = 0; 1997 u32 chunkno = 0; 1998 u32 i; 1999 u32 asicmode; 2000 u32 fwimg_size; 2001 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; 2002 enum bfa_status status; 2003 2004 if (boot_env == BFI_FWBOOT_ENV_OS && 2005 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2006 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); 2007 2008 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2009 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); 2010 if (status != BFA_STATUS_OK) 2011 return status; 2012 2013 fwimg = fwimg_buf; 2014 } else { 2015 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); 2016 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 2017 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2018 } 2019 2020 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 2021 2022 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2023 2024 for (i = 0; i < fwimg_size; i++) { 2025 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 2026 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 2027 if (boot_env == BFI_FWBOOT_ENV_OS && 2028 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2029 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2030 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), 2031 fwimg_buf); 2032 if (status != BFA_STATUS_OK) 2033 return status; 2034 2035 fwimg = fwimg_buf; 2036 } else { 2037 fwimg = bfa_cb_image_get_chunk( 2038 bfa_ioc_asic_gen(ioc), 2039 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2040 } 2041 } 2042 2043 /** 2044 * write smem 2045 */ 2046 writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]), 2047 ioc->ioc_regs.smem_page_start + loff); 2048 2049 loff += sizeof(u32); 2050 2051 /** 2052 * handle page offset wrap around 2053 */ 2054 loff = PSS_SMEM_PGOFF(loff); 2055 if (loff == 0) { 2056 pgnum++; 2057 writel(pgnum, 2058 ioc->ioc_regs.host_page_num_fn); 2059 } 2060 } 2061 2062 writel(bfa_ioc_smem_pgnum(ioc, 0), 2063 ioc->ioc_regs.host_page_num_fn); 2064 2065 /* 2066 * Set boot type, env and device mode at the end. 2067 */ 2068 if (boot_env == BFI_FWBOOT_ENV_OS && 2069 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2070 boot_type = BFI_FWBOOT_TYPE_NORMAL; 2071 } 2072 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, 2073 ioc->port0_mode, ioc->port1_mode); 2074 writel(asicmode, ((ioc->ioc_regs.smem_page_start) 2075 + BFI_FWBOOT_DEVMODE_OFF)); 2076 writel(boot_type, ((ioc->ioc_regs.smem_page_start) 2077 + (BFI_FWBOOT_TYPE_OFF))); 2078 writel(boot_env, ((ioc->ioc_regs.smem_page_start) 2079 + (BFI_FWBOOT_ENV_OFF))); 2080 return BFA_STATUS_OK; 2081 } 2082 2083 static void 2084 bfa_ioc_reset(struct bfa_ioc *ioc, bool force) 2085 { 2086 bfa_ioc_hwinit(ioc, force); 2087 } 2088 2089 /* BFA ioc enable reply by firmware */ 2090 static void 2091 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 2092 u8 cap_bm) 2093 { 2094 struct bfa_iocpf *iocpf = &ioc->iocpf; 2095 2096 ioc->port_mode = ioc->port_mode_cfg = port_mode; 2097 ioc->ad_cap_bm = cap_bm; 2098 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 2099 } 2100 2101 /* Update BFA configuration from firmware configuration. */ 2102 static void 2103 bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 2104 { 2105 struct bfi_ioc_attr *attr = ioc->attr; 2106 2107 attr->adapter_prop = ntohl(attr->adapter_prop); 2108 attr->card_type = ntohl(attr->card_type); 2109 attr->maxfrsize = ntohs(attr->maxfrsize); 2110 2111 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 2112 } 2113 2114 /* Attach time initialization of mbox logic. */ 2115 static void 2116 bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 2117 { 2118 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2119 int mc; 2120 2121 INIT_LIST_HEAD(&mod->cmd_q); 2122 for (mc = 0; mc < BFI_MC_MAX; mc++) { 2123 mod->mbhdlr[mc].cbfn = NULL; 2124 mod->mbhdlr[mc].cbarg = ioc->bfa; 2125 } 2126 } 2127 2128 /* Mbox poll timer -- restarts any pending mailbox requests. */ 2129 static void 2130 bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 2131 { 2132 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2133 struct bfa_mbox_cmd *cmd; 2134 bfa_mbox_cmd_cbfn_t cbfn; 2135 void *cbarg; 2136 u32 stat; 2137 2138 /** 2139 * If no command pending, do nothing 2140 */ 2141 if (list_empty(&mod->cmd_q)) 2142 return; 2143 2144 /** 2145 * If previous command is not yet fetched by firmware, do nothing 2146 */ 2147 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2148 if (stat) 2149 return; 2150 2151 /** 2152 * Enqueue command to firmware. 2153 */ 2154 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2155 list_del(&cmd->qe); 2156 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2157 2158 /** 2159 * Give a callback to the client, indicating that the command is sent 2160 */ 2161 if (cmd->cbfn) { 2162 cbfn = cmd->cbfn; 2163 cbarg = cmd->cbarg; 2164 cmd->cbfn = NULL; 2165 cbfn(cbarg); 2166 } 2167 } 2168 2169 /* Cleanup any pending requests. */ 2170 static void 2171 bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 2172 { 2173 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2174 struct bfa_mbox_cmd *cmd; 2175 2176 while (!list_empty(&mod->cmd_q)) { 2177 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2178 list_del(&cmd->qe); 2179 } 2180 } 2181 2182 /** 2183 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap 2184 * 2185 * @ioc: memory for IOC 2186 * @tbuf: app memory to store data from smem 2187 * @soff: smem offset 2188 * @sz: size of smem in bytes 2189 */ 2190 static int 2191 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 2192 { 2193 u32 pgnum, loff, r32; 2194 int i, len; 2195 u32 *buf = tbuf; 2196 2197 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); 2198 loff = PSS_SMEM_PGOFF(soff); 2199 2200 /* 2201 * Hold semaphore to serialize pll init and fwtrc. 2202 */ 2203 if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) 2204 return 1; 2205 2206 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2207 2208 len = sz/sizeof(u32); 2209 for (i = 0; i < len; i++) { 2210 r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 2211 buf[i] = be32_to_cpu(r32); 2212 loff += sizeof(u32); 2213 2214 /** 2215 * handle page offset wrap around 2216 */ 2217 loff = PSS_SMEM_PGOFF(loff); 2218 if (loff == 0) { 2219 pgnum++; 2220 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2221 } 2222 } 2223 2224 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), 2225 ioc->ioc_regs.host_page_num_fn); 2226 2227 /* 2228 * release semaphore 2229 */ 2230 readl(ioc->ioc_regs.ioc_init_sem_reg); 2231 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 2232 return 0; 2233 } 2234 2235 /* Retrieve saved firmware trace from a prior IOC failure. */ 2236 int 2237 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2238 { 2239 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; 2240 int tlen, status = 0; 2241 2242 tlen = *trclen; 2243 if (tlen > BNA_DBG_FWTRC_LEN) 2244 tlen = BNA_DBG_FWTRC_LEN; 2245 2246 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); 2247 *trclen = tlen; 2248 return status; 2249 } 2250 2251 /* Save firmware trace if configured. */ 2252 static void 2253 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 2254 { 2255 int tlen; 2256 2257 if (ioc->dbg_fwsave_once) { 2258 ioc->dbg_fwsave_once = false; 2259 if (ioc->dbg_fwsave_len) { 2260 tlen = ioc->dbg_fwsave_len; 2261 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2262 } 2263 } 2264 } 2265 2266 /* Retrieve saved firmware trace from a prior IOC failure. */ 2267 int 2268 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2269 { 2270 int tlen; 2271 2272 if (ioc->dbg_fwsave_len == 0) 2273 return BFA_STATUS_ENOFSAVE; 2274 2275 tlen = *trclen; 2276 if (tlen > ioc->dbg_fwsave_len) 2277 tlen = ioc->dbg_fwsave_len; 2278 2279 memcpy(trcdata, ioc->dbg_fwsave, tlen); 2280 *trclen = tlen; 2281 return BFA_STATUS_OK; 2282 } 2283 2284 static void 2285 bfa_ioc_fail_notify(struct bfa_ioc *ioc) 2286 { 2287 /** 2288 * Notify driver and common modules registered for notification. 2289 */ 2290 ioc->cbfn->hbfail_cbfn(ioc->bfa); 2291 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); 2292 bfa_nw_ioc_debug_save_ftrc(ioc); 2293 } 2294 2295 /* IOCPF to IOC interface */ 2296 static void 2297 bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 2298 { 2299 bfa_fsm_send_event(ioc, IOC_E_ENABLED); 2300 } 2301 2302 static void 2303 bfa_ioc_pf_disabled(struct bfa_ioc *ioc) 2304 { 2305 bfa_fsm_send_event(ioc, IOC_E_DISABLED); 2306 } 2307 2308 static void 2309 bfa_ioc_pf_failed(struct bfa_ioc *ioc) 2310 { 2311 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 2312 } 2313 2314 static void 2315 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) 2316 { 2317 bfa_fsm_send_event(ioc, IOC_E_HWFAILED); 2318 } 2319 2320 static void 2321 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) 2322 { 2323 /** 2324 * Provide enable completion callback and AEN notification. 2325 */ 2326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 2327 } 2328 2329 /* IOC public */ 2330 static enum bfa_status 2331 bfa_ioc_pll_init(struct bfa_ioc *ioc) 2332 { 2333 /* 2334 * Hold semaphore so that nobody can access the chip during init. 2335 */ 2336 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 2337 2338 bfa_ioc_pll_init_asic(ioc); 2339 2340 ioc->pllinit = true; 2341 2342 /* Initialize LMEM */ 2343 bfa_ioc_lmem_init(ioc); 2344 2345 /* 2346 * release semaphore. 2347 */ 2348 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2349 2350 return BFA_STATUS_OK; 2351 } 2352 2353 /* Interface used by diag module to do firmware boot with memory test 2354 * as the entry vector. 2355 */ 2356 static enum bfa_status 2357 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, 2358 u32 boot_env) 2359 { 2360 struct bfi_ioc_image_hdr *drv_fwhdr; 2361 enum bfa_status status; 2362 bfa_ioc_stats(ioc, ioc_boots); 2363 2364 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 2365 return BFA_STATUS_FAILED; 2366 if (boot_env == BFI_FWBOOT_ENV_OS && 2367 boot_type == BFI_FWBOOT_TYPE_NORMAL) { 2368 drv_fwhdr = (struct bfi_ioc_image_hdr *) 2369 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 2370 /* Work with Flash iff flash f/w is better than driver f/w. 2371 * Otherwise push drivers firmware. 2372 */ 2373 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == 2374 BFI_IOC_IMG_VER_BETTER) 2375 boot_type = BFI_FWBOOT_TYPE_FLASH; 2376 } 2377 2378 /** 2379 * Initialize IOC state of all functions on a chip reset. 2380 */ 2381 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2382 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2383 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2384 } else { 2385 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); 2386 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); 2387 } 2388 2389 bfa_ioc_msgflush(ioc); 2390 status = bfa_ioc_download_fw(ioc, boot_type, boot_env); 2391 if (status == BFA_STATUS_OK) 2392 bfa_ioc_lpu_start(ioc); 2393 else 2394 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2395 2396 return status; 2397 } 2398 2399 /* Enable/disable IOC failure auto recovery. */ 2400 void 2401 bfa_nw_ioc_auto_recover(bool auto_recover) 2402 { 2403 bfa_nw_auto_recover = auto_recover; 2404 } 2405 2406 static bool 2407 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 2408 { 2409 u32 *msgp = mbmsg; 2410 u32 r32; 2411 int i; 2412 2413 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 2414 if ((r32 & 1) == 0) 2415 return false; 2416 2417 /** 2418 * read the MBOX msg 2419 */ 2420 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 2421 i++) { 2422 r32 = readl(ioc->ioc_regs.lpu_mbox + 2423 i * sizeof(u32)); 2424 msgp[i] = htonl(r32); 2425 } 2426 2427 /** 2428 * turn off mailbox interrupt by clearing mailbox status 2429 */ 2430 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 2431 readl(ioc->ioc_regs.lpu_mbox_cmd); 2432 2433 return true; 2434 } 2435 2436 static void 2437 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 2438 { 2439 union bfi_ioc_i2h_msg_u *msg; 2440 struct bfa_iocpf *iocpf = &ioc->iocpf; 2441 2442 msg = (union bfi_ioc_i2h_msg_u *) m; 2443 2444 bfa_ioc_stats(ioc, ioc_isrs); 2445 2446 switch (msg->mh.msg_id) { 2447 case BFI_IOC_I2H_HBEAT: 2448 break; 2449 2450 case BFI_IOC_I2H_ENABLE_REPLY: 2451 bfa_ioc_enable_reply(ioc, 2452 (enum bfa_mode)msg->fw_event.port_mode, 2453 msg->fw_event.cap_bm); 2454 break; 2455 2456 case BFI_IOC_I2H_DISABLE_REPLY: 2457 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); 2458 break; 2459 2460 case BFI_IOC_I2H_GETATTR_REPLY: 2461 bfa_ioc_getattr_reply(ioc); 2462 break; 2463 2464 default: 2465 BUG_ON(1); 2466 } 2467 } 2468 2469 /** 2470 * bfa_nw_ioc_attach - IOC attach time initialization and setup. 2471 * 2472 * @ioc: memory for IOC 2473 * @bfa: driver instance structure 2474 */ 2475 void 2476 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 2477 { 2478 ioc->bfa = bfa; 2479 ioc->cbfn = cbfn; 2480 ioc->fcmode = false; 2481 ioc->pllinit = false; 2482 ioc->dbg_fwsave_once = true; 2483 ioc->iocpf.ioc = ioc; 2484 2485 bfa_ioc_mbox_attach(ioc); 2486 INIT_LIST_HEAD(&ioc->notify_q); 2487 2488 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 2489 bfa_fsm_send_event(ioc, IOC_E_RESET); 2490 } 2491 2492 /* Driver detach time IOC cleanup. */ 2493 void 2494 bfa_nw_ioc_detach(struct bfa_ioc *ioc) 2495 { 2496 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2497 2498 /* Done with detach, empty the notify_q. */ 2499 INIT_LIST_HEAD(&ioc->notify_q); 2500 } 2501 2502 /** 2503 * bfa_nw_ioc_pci_init - Setup IOC PCI properties. 2504 * 2505 * @pcidev: PCI device information for this IOC 2506 */ 2507 void 2508 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 2509 enum bfi_pcifn_class clscode) 2510 { 2511 ioc->clscode = clscode; 2512 ioc->pcidev = *pcidev; 2513 2514 /** 2515 * Initialize IOC and device personality 2516 */ 2517 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; 2518 ioc->asic_mode = BFI_ASIC_MODE_FC; 2519 2520 switch (pcidev->device_id) { 2521 case PCI_DEVICE_ID_BROCADE_CT: 2522 ioc->asic_gen = BFI_ASIC_GEN_CT; 2523 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2524 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2525 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; 2526 ioc->ad_cap_bm = BFA_CM_CNA; 2527 break; 2528 2529 case BFA_PCI_DEVICE_ID_CT2: 2530 ioc->asic_gen = BFI_ASIC_GEN_CT2; 2531 if (clscode == BFI_PCIFN_CLASS_FC && 2532 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { 2533 ioc->asic_mode = BFI_ASIC_MODE_FC16; 2534 ioc->fcmode = true; 2535 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; 2536 ioc->ad_cap_bm = BFA_CM_HBA; 2537 } else { 2538 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2539 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2540 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { 2541 ioc->port_mode = 2542 ioc->port_mode_cfg = BFA_MODE_CNA; 2543 ioc->ad_cap_bm = BFA_CM_CNA; 2544 } else { 2545 ioc->port_mode = 2546 ioc->port_mode_cfg = BFA_MODE_NIC; 2547 ioc->ad_cap_bm = BFA_CM_NIC; 2548 } 2549 } 2550 break; 2551 2552 default: 2553 BUG_ON(1); 2554 } 2555 2556 /** 2557 * Set asic specific interfaces. 2558 */ 2559 if (ioc->asic_gen == BFI_ASIC_GEN_CT) 2560 bfa_nw_ioc_set_ct_hwif(ioc); 2561 else { 2562 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); 2563 bfa_nw_ioc_set_ct2_hwif(ioc); 2564 bfa_nw_ioc_ct2_poweron(ioc); 2565 } 2566 2567 bfa_ioc_map_port(ioc); 2568 bfa_ioc_reg_init(ioc); 2569 } 2570 2571 /** 2572 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory 2573 * 2574 * @dm_kva: kernel virtual address of IOC dma memory 2575 * @dm_pa: physical address of IOC dma memory 2576 */ 2577 void 2578 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2579 { 2580 /** 2581 * dma memory for firmware attribute 2582 */ 2583 ioc->attr_dma.kva = dm_kva; 2584 ioc->attr_dma.pa = dm_pa; 2585 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2586 } 2587 2588 /* Return size of dma memory required. */ 2589 u32 2590 bfa_nw_ioc_meminfo(void) 2591 { 2592 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 2593 } 2594 2595 void 2596 bfa_nw_ioc_enable(struct bfa_ioc *ioc) 2597 { 2598 bfa_ioc_stats(ioc, ioc_enables); 2599 ioc->dbg_fwsave_once = true; 2600 2601 bfa_fsm_send_event(ioc, IOC_E_ENABLE); 2602 } 2603 2604 void 2605 bfa_nw_ioc_disable(struct bfa_ioc *ioc) 2606 { 2607 bfa_ioc_stats(ioc, ioc_disables); 2608 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2609 } 2610 2611 /* Initialize memory for saving firmware trace. */ 2612 void 2613 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2614 { 2615 ioc->dbg_fwsave = dbg_fwsave; 2616 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; 2617 } 2618 2619 static u32 2620 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 2621 { 2622 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2623 } 2624 2625 /* Register mailbox message handler function, to be called by common modules */ 2626 void 2627 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2628 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2629 { 2630 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2631 2632 mod->mbhdlr[mc].cbfn = cbfn; 2633 mod->mbhdlr[mc].cbarg = cbarg; 2634 } 2635 2636 /** 2637 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. 2638 * 2639 * @ioc: IOC instance 2640 * @cmd: Mailbox command 2641 * 2642 * Waits if mailbox is busy. Responsibility of caller to serialize 2643 */ 2644 bool 2645 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2646 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) 2647 { 2648 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2649 u32 stat; 2650 2651 cmd->cbfn = cbfn; 2652 cmd->cbarg = cbarg; 2653 2654 /** 2655 * If a previous command is pending, queue new command 2656 */ 2657 if (!list_empty(&mod->cmd_q)) { 2658 list_add_tail(&cmd->qe, &mod->cmd_q); 2659 return true; 2660 } 2661 2662 /** 2663 * If mailbox is busy, queue command for poll timer 2664 */ 2665 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2666 if (stat) { 2667 list_add_tail(&cmd->qe, &mod->cmd_q); 2668 return true; 2669 } 2670 2671 /** 2672 * mailbox is free -- queue command to firmware 2673 */ 2674 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2675 2676 return false; 2677 } 2678 2679 /* Handle mailbox interrupts */ 2680 void 2681 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2682 { 2683 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2684 struct bfi_mbmsg m; 2685 int mc; 2686 2687 if (bfa_ioc_msgget(ioc, &m)) { 2688 /** 2689 * Treat IOC message class as special. 2690 */ 2691 mc = m.mh.msg_class; 2692 if (mc == BFI_MC_IOC) { 2693 bfa_ioc_isr(ioc, &m); 2694 return; 2695 } 2696 2697 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2698 return; 2699 2700 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2701 } 2702 2703 bfa_ioc_lpu_read_stat(ioc); 2704 2705 /** 2706 * Try to send pending mailbox commands 2707 */ 2708 bfa_ioc_mbox_poll(ioc); 2709 } 2710 2711 void 2712 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) 2713 { 2714 bfa_ioc_stats(ioc, ioc_hbfails); 2715 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2716 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2717 } 2718 2719 /* return true if IOC is disabled */ 2720 bool 2721 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2722 { 2723 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || 2724 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2725 } 2726 2727 /* return true if IOC is operational */ 2728 bool 2729 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2730 { 2731 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2732 } 2733 2734 /* Add to IOC heartbeat failure notification queue. To be used by common 2735 * modules such as cee, port, diag. 2736 */ 2737 void 2738 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, 2739 struct bfa_ioc_notify *notify) 2740 { 2741 list_add_tail(¬ify->qe, &ioc->notify_q); 2742 } 2743 2744 #define BFA_MFG_NAME "QLogic" 2745 static void 2746 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 2747 struct bfa_adapter_attr *ad_attr) 2748 { 2749 struct bfi_ioc_attr *ioc_attr; 2750 2751 ioc_attr = ioc->attr; 2752 2753 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 2754 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2755 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2756 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2757 memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2758 sizeof(struct bfa_mfg_vpd)); 2759 2760 ad_attr->nports = bfa_ioc_get_nports(ioc); 2761 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 2762 2763 bfa_ioc_get_adapter_model(ioc, ad_attr->model); 2764 /* For now, model descr uses same model string */ 2765 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 2766 2767 ad_attr->card_type = ioc_attr->card_type; 2768 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); 2769 2770 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 2771 ad_attr->prototype = 1; 2772 else 2773 ad_attr->prototype = 0; 2774 2775 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2776 bfa_nw_ioc_get_mac(ioc, ad_attr->mac); 2777 2778 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2779 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2780 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 2781 ad_attr->asic_rev = ioc_attr->asic_rev; 2782 2783 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2784 } 2785 2786 static enum bfa_ioc_type 2787 bfa_ioc_get_type(struct bfa_ioc *ioc) 2788 { 2789 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) 2790 return BFA_IOC_TYPE_LL; 2791 2792 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); 2793 2794 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) 2795 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; 2796 } 2797 2798 static void 2799 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 2800 { 2801 memcpy(serial_num, 2802 (void *)ioc->attr->brcd_serialnum, 2803 BFA_ADAPTER_SERIAL_NUM_LEN); 2804 } 2805 2806 static void 2807 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 2808 { 2809 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2810 } 2811 2812 static void 2813 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 2814 { 2815 BUG_ON(!(chip_rev)); 2816 2817 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2818 2819 chip_rev[0] = 'R'; 2820 chip_rev[1] = 'e'; 2821 chip_rev[2] = 'v'; 2822 chip_rev[3] = '-'; 2823 chip_rev[4] = ioc->attr->asic_rev; 2824 chip_rev[5] = '\0'; 2825 } 2826 2827 static void 2828 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 2829 { 2830 memcpy(optrom_ver, ioc->attr->optrom_version, 2831 BFA_VERSION_LEN); 2832 } 2833 2834 static void 2835 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2836 { 2837 strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2838 } 2839 2840 static void 2841 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 2842 { 2843 struct bfi_ioc_attr *ioc_attr; 2844 2845 BUG_ON(!(model)); 2846 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2847 2848 ioc_attr = ioc->attr; 2849 2850 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2851 BFA_MFG_NAME, ioc_attr->card_type); 2852 } 2853 2854 static enum bfa_ioc_state 2855 bfa_ioc_get_state(struct bfa_ioc *ioc) 2856 { 2857 enum bfa_iocpf_state iocpf_st; 2858 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2859 2860 if (ioc_st == BFA_IOC_ENABLING || 2861 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { 2862 2863 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2864 2865 switch (iocpf_st) { 2866 case BFA_IOCPF_SEMWAIT: 2867 ioc_st = BFA_IOC_SEMWAIT; 2868 break; 2869 2870 case BFA_IOCPF_HWINIT: 2871 ioc_st = BFA_IOC_HWINIT; 2872 break; 2873 2874 case BFA_IOCPF_FWMISMATCH: 2875 ioc_st = BFA_IOC_FWMISMATCH; 2876 break; 2877 2878 case BFA_IOCPF_FAIL: 2879 ioc_st = BFA_IOC_FAIL; 2880 break; 2881 2882 case BFA_IOCPF_INITFAIL: 2883 ioc_st = BFA_IOC_INITFAIL; 2884 break; 2885 2886 default: 2887 break; 2888 } 2889 } 2890 return ioc_st; 2891 } 2892 2893 void 2894 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 2895 { 2896 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2897 2898 ioc_attr->state = bfa_ioc_get_state(ioc); 2899 ioc_attr->port_id = bfa_ioc_portid(ioc); 2900 ioc_attr->port_mode = ioc->port_mode; 2901 2902 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2903 ioc_attr->cap_bm = ioc->ad_cap_bm; 2904 2905 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2906 2907 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2908 2909 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); 2910 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); 2911 ioc_attr->def_fn = bfa_ioc_is_default(ioc); 2912 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2913 } 2914 2915 /* WWN public */ 2916 static u64 2917 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2918 { 2919 return ioc->attr->pwwn; 2920 } 2921 2922 void 2923 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac) 2924 { 2925 ether_addr_copy(mac, ioc->attr->mac); 2926 } 2927 2928 /* Firmware failure detected. Start recovery actions. */ 2929 static void 2930 bfa_ioc_recover(struct bfa_ioc *ioc) 2931 { 2932 pr_crit("Heart Beat of IOC has failed\n"); 2933 bfa_ioc_stats(ioc, ioc_hbfails); 2934 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2935 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2936 } 2937 2938 /* BFA IOC PF private functions */ 2939 2940 static void 2941 bfa_iocpf_enable(struct bfa_ioc *ioc) 2942 { 2943 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); 2944 } 2945 2946 static void 2947 bfa_iocpf_disable(struct bfa_ioc *ioc) 2948 { 2949 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 2950 } 2951 2952 static void 2953 bfa_iocpf_fail(struct bfa_ioc *ioc) 2954 { 2955 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 2956 } 2957 2958 static void 2959 bfa_iocpf_initfail(struct bfa_ioc *ioc) 2960 { 2961 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 2962 } 2963 2964 static void 2965 bfa_iocpf_getattrfail(struct bfa_ioc *ioc) 2966 { 2967 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 2968 } 2969 2970 static void 2971 bfa_iocpf_stop(struct bfa_ioc *ioc) 2972 { 2973 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); 2974 } 2975 2976 void 2977 bfa_nw_iocpf_timeout(struct bfa_ioc *ioc) 2978 { 2979 enum bfa_iocpf_state iocpf_st; 2980 2981 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2982 2983 if (iocpf_st == BFA_IOCPF_HWINIT) 2984 bfa_ioc_poll_fwinit(ioc); 2985 else 2986 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2987 } 2988 2989 void 2990 bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc) 2991 { 2992 bfa_ioc_hw_sem_get(ioc); 2993 } 2994 2995 static void 2996 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) 2997 { 2998 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 2999 3000 if (fwstate == BFI_IOC_DISABLED) { 3001 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 3002 return; 3003 } 3004 3005 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3006 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3007 } else { 3008 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3009 mod_timer(&ioc->iocpf_timer, jiffies + 3010 msecs_to_jiffies(BFA_IOC_POLL_TOV)); 3011 } 3012 } 3013 3014 /* 3015 * Flash module specific 3016 */ 3017 3018 /* 3019 * FLASH DMA buffer should be big enough to hold both MFG block and 3020 * asic block(64k) at the same time and also should be 2k aligned to 3021 * avoid write segement to cross sector boundary. 3022 */ 3023 #define BFA_FLASH_SEG_SZ 2048 3024 #define BFA_FLASH_DMA_BUF_SZ \ 3025 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) 3026 3027 static void 3028 bfa_flash_cb(struct bfa_flash *flash) 3029 { 3030 flash->op_busy = 0; 3031 if (flash->cbfn) 3032 flash->cbfn(flash->cbarg, flash->status); 3033 } 3034 3035 static void 3036 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) 3037 { 3038 struct bfa_flash *flash = cbarg; 3039 3040 switch (event) { 3041 case BFA_IOC_E_DISABLED: 3042 case BFA_IOC_E_FAILED: 3043 if (flash->op_busy) { 3044 flash->status = BFA_STATUS_IOC_FAILURE; 3045 flash->cbfn(flash->cbarg, flash->status); 3046 flash->op_busy = 0; 3047 } 3048 break; 3049 default: 3050 break; 3051 } 3052 } 3053 3054 /* 3055 * Send flash write request. 3056 */ 3057 static void 3058 bfa_flash_write_send(struct bfa_flash *flash) 3059 { 3060 struct bfi_flash_write_req *msg = 3061 (struct bfi_flash_write_req *) flash->mb.msg; 3062 u32 len; 3063 3064 msg->type = be32_to_cpu(flash->type); 3065 msg->instance = flash->instance; 3066 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3067 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3068 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3069 msg->length = be32_to_cpu(len); 3070 3071 /* indicate if it's the last msg of the whole write operation */ 3072 msg->last = (len == flash->residue) ? 1 : 0; 3073 3074 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, 3075 bfa_ioc_portid(flash->ioc)); 3076 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3077 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); 3078 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3079 3080 flash->residue -= len; 3081 flash->offset += len; 3082 } 3083 3084 /** 3085 * bfa_flash_read_send - Send flash read request. 3086 * 3087 * @cbarg: callback argument 3088 */ 3089 static void 3090 bfa_flash_read_send(void *cbarg) 3091 { 3092 struct bfa_flash *flash = cbarg; 3093 struct bfi_flash_read_req *msg = 3094 (struct bfi_flash_read_req *) flash->mb.msg; 3095 u32 len; 3096 3097 msg->type = be32_to_cpu(flash->type); 3098 msg->instance = flash->instance; 3099 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3100 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3101 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3102 msg->length = be32_to_cpu(len); 3103 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, 3104 bfa_ioc_portid(flash->ioc)); 3105 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3106 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3107 } 3108 3109 /** 3110 * bfa_flash_intr - Process flash response messages upon receiving interrupts. 3111 * 3112 * @flasharg: flash structure 3113 * @msg: message structure 3114 */ 3115 static void 3116 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 3117 { 3118 struct bfa_flash *flash = flasharg; 3119 u32 status; 3120 3121 union { 3122 struct bfi_flash_query_rsp *query; 3123 struct bfi_flash_write_rsp *write; 3124 struct bfi_flash_read_rsp *read; 3125 struct bfi_mbmsg *msg; 3126 } m; 3127 3128 m.msg = msg; 3129 3130 /* receiving response after ioc failure */ 3131 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) 3132 return; 3133 3134 switch (msg->mh.msg_id) { 3135 case BFI_FLASH_I2H_QUERY_RSP: 3136 status = be32_to_cpu(m.query->status); 3137 if (status == BFA_STATUS_OK) { 3138 u32 i; 3139 struct bfa_flash_attr *attr, *f; 3140 3141 attr = (struct bfa_flash_attr *) flash->ubuf; 3142 f = (struct bfa_flash_attr *) flash->dbuf_kva; 3143 attr->status = be32_to_cpu(f->status); 3144 attr->npart = be32_to_cpu(f->npart); 3145 for (i = 0; i < attr->npart; i++) { 3146 attr->part[i].part_type = 3147 be32_to_cpu(f->part[i].part_type); 3148 attr->part[i].part_instance = 3149 be32_to_cpu(f->part[i].part_instance); 3150 attr->part[i].part_off = 3151 be32_to_cpu(f->part[i].part_off); 3152 attr->part[i].part_size = 3153 be32_to_cpu(f->part[i].part_size); 3154 attr->part[i].part_len = 3155 be32_to_cpu(f->part[i].part_len); 3156 attr->part[i].part_status = 3157 be32_to_cpu(f->part[i].part_status); 3158 } 3159 } 3160 flash->status = status; 3161 bfa_flash_cb(flash); 3162 break; 3163 case BFI_FLASH_I2H_WRITE_RSP: 3164 status = be32_to_cpu(m.write->status); 3165 if (status != BFA_STATUS_OK || flash->residue == 0) { 3166 flash->status = status; 3167 bfa_flash_cb(flash); 3168 } else 3169 bfa_flash_write_send(flash); 3170 break; 3171 case BFI_FLASH_I2H_READ_RSP: 3172 status = be32_to_cpu(m.read->status); 3173 if (status != BFA_STATUS_OK) { 3174 flash->status = status; 3175 bfa_flash_cb(flash); 3176 } else { 3177 u32 len = be32_to_cpu(m.read->length); 3178 memcpy(flash->ubuf + flash->offset, 3179 flash->dbuf_kva, len); 3180 flash->residue -= len; 3181 flash->offset += len; 3182 if (flash->residue == 0) { 3183 flash->status = status; 3184 bfa_flash_cb(flash); 3185 } else 3186 bfa_flash_read_send(flash); 3187 } 3188 break; 3189 case BFI_FLASH_I2H_BOOT_VER_RSP: 3190 case BFI_FLASH_I2H_EVENT: 3191 break; 3192 default: 3193 WARN_ON(1); 3194 } 3195 } 3196 3197 /* 3198 * Flash memory info API. 3199 */ 3200 u32 3201 bfa_nw_flash_meminfo(void) 3202 { 3203 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3204 } 3205 3206 /** 3207 * bfa_nw_flash_attach - Flash attach API. 3208 * 3209 * @flash: flash structure 3210 * @ioc: ioc structure 3211 * @dev: device structure 3212 */ 3213 void 3214 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 3215 { 3216 flash->ioc = ioc; 3217 flash->cbfn = NULL; 3218 flash->cbarg = NULL; 3219 flash->op_busy = 0; 3220 3221 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); 3222 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); 3223 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 3224 } 3225 3226 /** 3227 * bfa_nw_flash_memclaim - Claim memory for flash 3228 * 3229 * @flash: flash structure 3230 * @dm_kva: pointer to virtual memory address 3231 * @dm_pa: physical memory address 3232 */ 3233 void 3234 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 3235 { 3236 flash->dbuf_kva = dm_kva; 3237 flash->dbuf_pa = dm_pa; 3238 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); 3239 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3240 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3241 } 3242 3243 /** 3244 * bfa_nw_flash_get_attr - Get flash attribute. 3245 * 3246 * @flash: flash structure 3247 * @attr: flash attribute structure 3248 * @cbfn: callback function 3249 * @cbarg: callback argument 3250 * 3251 * Return status. 3252 */ 3253 enum bfa_status 3254 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, 3255 bfa_cb_flash cbfn, void *cbarg) 3256 { 3257 struct bfi_flash_query_req *msg = 3258 (struct bfi_flash_query_req *) flash->mb.msg; 3259 3260 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3261 return BFA_STATUS_IOC_NON_OP; 3262 3263 if (flash->op_busy) 3264 return BFA_STATUS_DEVBUSY; 3265 3266 flash->op_busy = 1; 3267 flash->cbfn = cbfn; 3268 flash->cbarg = cbarg; 3269 flash->ubuf = (u8 *) attr; 3270 3271 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, 3272 bfa_ioc_portid(flash->ioc)); 3273 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); 3274 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3275 3276 return BFA_STATUS_OK; 3277 } 3278 3279 /** 3280 * bfa_nw_flash_update_part - Update flash partition. 3281 * 3282 * @flash: flash structure 3283 * @type: flash partition type 3284 * @instance: flash partition instance 3285 * @buf: update data buffer 3286 * @len: data buffer length 3287 * @offset: offset relative to the partition starting address 3288 * @cbfn: callback function 3289 * @cbarg: callback argument 3290 * 3291 * Return status. 3292 */ 3293 enum bfa_status 3294 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, 3295 void *buf, u32 len, u32 offset, 3296 bfa_cb_flash cbfn, void *cbarg) 3297 { 3298 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3299 return BFA_STATUS_IOC_NON_OP; 3300 3301 /* 3302 * 'len' must be in word (4-byte) boundary 3303 */ 3304 if (!len || (len & 0x03)) 3305 return BFA_STATUS_FLASH_BAD_LEN; 3306 3307 if (type == BFA_FLASH_PART_MFG) 3308 return BFA_STATUS_EINVAL; 3309 3310 if (flash->op_busy) 3311 return BFA_STATUS_DEVBUSY; 3312 3313 flash->op_busy = 1; 3314 flash->cbfn = cbfn; 3315 flash->cbarg = cbarg; 3316 flash->type = type; 3317 flash->instance = instance; 3318 flash->residue = len; 3319 flash->offset = 0; 3320 flash->addr_off = offset; 3321 flash->ubuf = buf; 3322 3323 bfa_flash_write_send(flash); 3324 3325 return BFA_STATUS_OK; 3326 } 3327 3328 /** 3329 * bfa_nw_flash_read_part - Read flash partition. 3330 * 3331 * @flash: flash structure 3332 * @type: flash partition type 3333 * @instance: flash partition instance 3334 * @buf: read data buffer 3335 * @len: data buffer length 3336 * @offset: offset relative to the partition starting address 3337 * @cbfn: callback function 3338 * @cbarg: callback argument 3339 * 3340 * Return status. 3341 */ 3342 enum bfa_status 3343 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, 3344 void *buf, u32 len, u32 offset, 3345 bfa_cb_flash cbfn, void *cbarg) 3346 { 3347 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3348 return BFA_STATUS_IOC_NON_OP; 3349 3350 /* 3351 * 'len' must be in word (4-byte) boundary 3352 */ 3353 if (!len || (len & 0x03)) 3354 return BFA_STATUS_FLASH_BAD_LEN; 3355 3356 if (flash->op_busy) 3357 return BFA_STATUS_DEVBUSY; 3358 3359 flash->op_busy = 1; 3360 flash->cbfn = cbfn; 3361 flash->cbarg = cbarg; 3362 flash->type = type; 3363 flash->instance = instance; 3364 flash->residue = len; 3365 flash->offset = 0; 3366 flash->addr_off = offset; 3367 flash->ubuf = buf; 3368 3369 bfa_flash_read_send(flash); 3370 3371 return BFA_STATUS_OK; 3372 } 3373