1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 20 #include "bfa_ioc.h" 21 #include "bfi_reg.h" 22 #include "bfa_defs.h" 23 24 /* IOC local definitions */ 25 26 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ 27 28 #define bfa_ioc_firmware_lock(__ioc) \ 29 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 30 #define bfa_ioc_firmware_unlock(__ioc) \ 31 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 32 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 33 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 34 #define bfa_ioc_notify_fail(__ioc) \ 35 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 36 #define bfa_ioc_sync_start(__ioc) \ 37 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 38 #define bfa_ioc_sync_join(__ioc) \ 39 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 40 #define bfa_ioc_sync_leave(__ioc) \ 41 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) 42 #define bfa_ioc_sync_ack(__ioc) \ 43 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 44 #define bfa_ioc_sync_complete(__ioc) \ 45 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 46 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ 47 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) 48 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ 49 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) 50 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ 51 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) 52 53 static bool bfa_nw_auto_recover = true; 54 55 /* 56 * forward declarations 57 */ 58 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); 59 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); 60 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); 61 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); 62 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); 63 static void bfa_ioc_send_enable(struct bfa_ioc *ioc); 64 static void bfa_ioc_send_disable(struct bfa_ioc *ioc); 65 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); 66 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); 67 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); 68 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); 69 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 70 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 71 static void bfa_ioc_recover(struct bfa_ioc *ioc); 72 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 73 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 74 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 75 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); 76 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 77 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 78 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 79 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 80 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 81 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 82 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, 83 enum bfi_fwboot_type boot_type, u32 boot_param); 84 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 85 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 86 char *serial_num); 87 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 88 char *fw_ver); 89 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, 90 char *chip_rev); 91 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, 92 char *optrom_ver); 93 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 94 char *manufacturer); 95 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 96 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 97 98 /* IOC state machine definitions/declarations */ 99 enum ioc_event { 100 IOC_E_RESET = 1, /*!< IOC reset request */ 101 IOC_E_ENABLE = 2, /*!< IOC enable request */ 102 IOC_E_DISABLE = 3, /*!< IOC disable request */ 103 IOC_E_DETACH = 4, /*!< driver detach cleanup */ 104 IOC_E_ENABLED = 5, /*!< f/w enabled */ 105 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 106 IOC_E_DISABLED = 7, /*!< f/w disabled */ 107 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ 108 IOC_E_HBFAIL = 9, /*!< heartbeat failure */ 109 IOC_E_HWERROR = 10, /*!< hardware error interrupt */ 110 IOC_E_TIMEOUT = 11, /*!< timeout */ 111 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ 112 }; 113 114 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 115 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 116 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 117 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 118 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 119 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); 120 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 121 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 122 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 123 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); 124 125 static struct bfa_sm_table ioc_sm_table[] = { 126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, 132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 135 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, 136 }; 137 138 /* 139 * Forward declareations for iocpf state machine 140 */ 141 static void bfa_iocpf_enable(struct bfa_ioc *ioc); 142 static void bfa_iocpf_disable(struct bfa_ioc *ioc); 143 static void bfa_iocpf_fail(struct bfa_ioc *ioc); 144 static void bfa_iocpf_initfail(struct bfa_ioc *ioc); 145 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 146 static void bfa_iocpf_stop(struct bfa_ioc *ioc); 147 148 /* IOCPF state machine events */ 149 enum iocpf_event { 150 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 151 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 152 IOCPF_E_STOP = 3, /*!< stop on driver detach */ 153 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ 154 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ 155 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ 156 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ 157 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ 158 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 159 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 160 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 161 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 162 }; 163 164 /* IOCPF states */ 165 enum bfa_iocpf_state { 166 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 167 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 168 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ 169 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ 170 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ 171 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ 172 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ 173 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ 174 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ 175 }; 176 177 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); 178 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); 179 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); 180 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); 181 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); 182 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); 183 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); 184 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, 185 enum iocpf_event); 186 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); 187 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); 188 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); 189 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); 190 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, 191 enum iocpf_event); 192 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); 193 194 static struct bfa_sm_table iocpf_sm_table[] = { 195 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, 196 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, 197 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, 198 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, 199 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 200 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 201 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 202 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, 203 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 204 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, 205 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 206 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 207 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, 208 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 209 }; 210 211 /* IOC State Machine */ 212 213 /* Beginning state. IOC uninit state. */ 214 static void 215 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 216 { 217 } 218 219 /* IOC is in uninit state. */ 220 static void 221 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 222 { 223 switch (event) { 224 case IOC_E_RESET: 225 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 226 break; 227 228 default: 229 bfa_sm_fault(event); 230 } 231 } 232 233 /* Reset entry actions -- initialize state machine */ 234 static void 235 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 236 { 237 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 238 } 239 240 /* IOC is in reset state. */ 241 static void 242 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 243 { 244 switch (event) { 245 case IOC_E_ENABLE: 246 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 247 break; 248 249 case IOC_E_DISABLE: 250 bfa_ioc_disable_comp(ioc); 251 break; 252 253 case IOC_E_DETACH: 254 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 255 break; 256 257 default: 258 bfa_sm_fault(event); 259 } 260 } 261 262 static void 263 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 264 { 265 bfa_iocpf_enable(ioc); 266 } 267 268 /* Host IOC function is being enabled, awaiting response from firmware. 269 * Semaphore is acquired. 270 */ 271 static void 272 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 273 { 274 switch (event) { 275 case IOC_E_ENABLED: 276 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 277 break; 278 279 case IOC_E_PFFAILED: 280 /* !!! fall through !!! */ 281 case IOC_E_HWERROR: 282 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 283 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 284 if (event != IOC_E_PFFAILED) 285 bfa_iocpf_initfail(ioc); 286 break; 287 288 case IOC_E_HWFAILED: 289 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 290 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 291 break; 292 293 case IOC_E_DISABLE: 294 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 295 break; 296 297 case IOC_E_DETACH: 298 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 299 bfa_iocpf_stop(ioc); 300 break; 301 302 case IOC_E_ENABLE: 303 break; 304 305 default: 306 bfa_sm_fault(event); 307 } 308 } 309 310 /* Semaphore should be acquired for version check. */ 311 static void 312 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 313 { 314 mod_timer(&ioc->ioc_timer, jiffies + 315 msecs_to_jiffies(BFA_IOC_TOV)); 316 bfa_ioc_send_getattr(ioc); 317 } 318 319 /* IOC configuration in progress. Timer is active. */ 320 static void 321 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 322 { 323 switch (event) { 324 case IOC_E_FWRSP_GETATTR: 325 del_timer(&ioc->ioc_timer); 326 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 327 break; 328 329 case IOC_E_PFFAILED: 330 case IOC_E_HWERROR: 331 del_timer(&ioc->ioc_timer); 332 /* fall through */ 333 case IOC_E_TIMEOUT: 334 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 335 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 336 if (event != IOC_E_PFFAILED) 337 bfa_iocpf_getattrfail(ioc); 338 break; 339 340 case IOC_E_DISABLE: 341 del_timer(&ioc->ioc_timer); 342 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 343 break; 344 345 case IOC_E_ENABLE: 346 break; 347 348 default: 349 bfa_sm_fault(event); 350 } 351 } 352 353 static void 354 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 355 { 356 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 357 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 358 bfa_ioc_hb_monitor(ioc); 359 } 360 361 static void 362 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 363 { 364 switch (event) { 365 case IOC_E_ENABLE: 366 break; 367 368 case IOC_E_DISABLE: 369 bfa_ioc_hb_stop(ioc); 370 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 371 break; 372 373 case IOC_E_PFFAILED: 374 case IOC_E_HWERROR: 375 bfa_ioc_hb_stop(ioc); 376 /* !!! fall through !!! */ 377 case IOC_E_HBFAIL: 378 if (ioc->iocpf.auto_recover) 379 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 380 else 381 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 382 383 bfa_ioc_fail_notify(ioc); 384 385 if (event != IOC_E_PFFAILED) 386 bfa_iocpf_fail(ioc); 387 break; 388 389 default: 390 bfa_sm_fault(event); 391 } 392 } 393 394 static void 395 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 396 { 397 bfa_iocpf_disable(ioc); 398 } 399 400 /* IOC is being disabled */ 401 static void 402 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 403 { 404 switch (event) { 405 case IOC_E_DISABLED: 406 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 407 break; 408 409 case IOC_E_HWERROR: 410 /* 411 * No state change. Will move to disabled state 412 * after iocpf sm completes failure processing and 413 * moves to disabled state. 414 */ 415 bfa_iocpf_fail(ioc); 416 break; 417 418 case IOC_E_HWFAILED: 419 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 420 bfa_ioc_disable_comp(ioc); 421 break; 422 423 default: 424 bfa_sm_fault(event); 425 } 426 } 427 428 /* IOC disable completion entry. */ 429 static void 430 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 431 { 432 bfa_ioc_disable_comp(ioc); 433 } 434 435 static void 436 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 437 { 438 switch (event) { 439 case IOC_E_ENABLE: 440 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 441 break; 442 443 case IOC_E_DISABLE: 444 ioc->cbfn->disable_cbfn(ioc->bfa); 445 break; 446 447 case IOC_E_DETACH: 448 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 449 bfa_iocpf_stop(ioc); 450 break; 451 452 default: 453 bfa_sm_fault(event); 454 } 455 } 456 457 static void 458 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) 459 { 460 } 461 462 /* Hardware initialization retry. */ 463 static void 464 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 465 { 466 switch (event) { 467 case IOC_E_ENABLED: 468 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 469 break; 470 471 case IOC_E_PFFAILED: 472 case IOC_E_HWERROR: 473 /** 474 * Initialization retry failed. 475 */ 476 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 477 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 478 if (event != IOC_E_PFFAILED) 479 bfa_iocpf_initfail(ioc); 480 break; 481 482 case IOC_E_HWFAILED: 483 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 484 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 485 break; 486 487 case IOC_E_ENABLE: 488 break; 489 490 case IOC_E_DISABLE: 491 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 492 break; 493 494 case IOC_E_DETACH: 495 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 496 bfa_iocpf_stop(ioc); 497 break; 498 499 default: 500 bfa_sm_fault(event); 501 } 502 } 503 504 static void 505 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) 506 { 507 } 508 509 /* IOC failure. */ 510 static void 511 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 512 { 513 switch (event) { 514 case IOC_E_ENABLE: 515 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 516 break; 517 518 case IOC_E_DISABLE: 519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 520 break; 521 522 case IOC_E_DETACH: 523 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 524 bfa_iocpf_stop(ioc); 525 break; 526 527 case IOC_E_HWERROR: 528 /* HB failure notification, ignore. */ 529 break; 530 531 default: 532 bfa_sm_fault(event); 533 } 534 } 535 536 static void 537 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) 538 { 539 } 540 541 /* IOC failure. */ 542 static void 543 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 544 { 545 switch (event) { 546 547 case IOC_E_ENABLE: 548 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 549 break; 550 551 case IOC_E_DISABLE: 552 ioc->cbfn->disable_cbfn(ioc->bfa); 553 break; 554 555 case IOC_E_DETACH: 556 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 557 break; 558 559 default: 560 bfa_sm_fault(event); 561 } 562 } 563 564 /* IOCPF State Machine */ 565 566 /* Reset entry actions -- initialize state machine */ 567 static void 568 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 569 { 570 iocpf->fw_mismatch_notified = false; 571 iocpf->auto_recover = bfa_nw_auto_recover; 572 } 573 574 /* Beginning state. IOC is in reset state. */ 575 static void 576 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 577 { 578 switch (event) { 579 case IOCPF_E_ENABLE: 580 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 581 break; 582 583 case IOCPF_E_STOP: 584 break; 585 586 default: 587 bfa_sm_fault(event); 588 } 589 } 590 591 /* Semaphore should be acquired for version check. */ 592 static void 593 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 594 { 595 bfa_ioc_hw_sem_init(iocpf->ioc); 596 bfa_ioc_hw_sem_get(iocpf->ioc); 597 } 598 599 /* Awaiting h/w semaphore to continue with version check. */ 600 static void 601 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 602 { 603 struct bfa_ioc *ioc = iocpf->ioc; 604 605 switch (event) { 606 case IOCPF_E_SEMLOCKED: 607 if (bfa_ioc_firmware_lock(ioc)) { 608 if (bfa_ioc_sync_start(ioc)) { 609 bfa_ioc_sync_join(ioc); 610 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 611 } else { 612 bfa_ioc_firmware_unlock(ioc); 613 bfa_nw_ioc_hw_sem_release(ioc); 614 mod_timer(&ioc->sem_timer, jiffies + 615 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 616 } 617 } else { 618 bfa_nw_ioc_hw_sem_release(ioc); 619 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 620 } 621 break; 622 623 case IOCPF_E_SEM_ERROR: 624 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 625 bfa_ioc_pf_hwfailed(ioc); 626 break; 627 628 case IOCPF_E_DISABLE: 629 bfa_ioc_hw_sem_get_cancel(ioc); 630 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 631 bfa_ioc_pf_disabled(ioc); 632 break; 633 634 case IOCPF_E_STOP: 635 bfa_ioc_hw_sem_get_cancel(ioc); 636 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 637 break; 638 639 default: 640 bfa_sm_fault(event); 641 } 642 } 643 644 /* Notify enable completion callback */ 645 static void 646 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 647 { 648 /* Call only the first time sm enters fwmismatch state. */ 649 if (!iocpf->fw_mismatch_notified) 650 bfa_ioc_pf_fwmismatch(iocpf->ioc); 651 652 iocpf->fw_mismatch_notified = true; 653 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 654 msecs_to_jiffies(BFA_IOC_TOV)); 655 } 656 657 /* Awaiting firmware version match. */ 658 static void 659 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 660 { 661 struct bfa_ioc *ioc = iocpf->ioc; 662 663 switch (event) { 664 case IOCPF_E_TIMEOUT: 665 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 666 break; 667 668 case IOCPF_E_DISABLE: 669 del_timer(&ioc->iocpf_timer); 670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 671 bfa_ioc_pf_disabled(ioc); 672 break; 673 674 case IOCPF_E_STOP: 675 del_timer(&ioc->iocpf_timer); 676 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 677 break; 678 679 default: 680 bfa_sm_fault(event); 681 } 682 } 683 684 /* Request for semaphore. */ 685 static void 686 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 687 { 688 bfa_ioc_hw_sem_get(iocpf->ioc); 689 } 690 691 /* Awaiting semaphore for h/w initialzation. */ 692 static void 693 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 694 { 695 struct bfa_ioc *ioc = iocpf->ioc; 696 697 switch (event) { 698 case IOCPF_E_SEMLOCKED: 699 if (bfa_ioc_sync_complete(ioc)) { 700 bfa_ioc_sync_join(ioc); 701 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 702 } else { 703 bfa_nw_ioc_hw_sem_release(ioc); 704 mod_timer(&ioc->sem_timer, jiffies + 705 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 706 } 707 break; 708 709 case IOCPF_E_SEM_ERROR: 710 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 711 bfa_ioc_pf_hwfailed(ioc); 712 break; 713 714 case IOCPF_E_DISABLE: 715 bfa_ioc_hw_sem_get_cancel(ioc); 716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 717 break; 718 719 default: 720 bfa_sm_fault(event); 721 } 722 } 723 724 static void 725 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 726 { 727 iocpf->poll_time = 0; 728 bfa_ioc_reset(iocpf->ioc, false); 729 } 730 731 /* Hardware is being initialized. Interrupts are enabled. 732 * Holding hardware semaphore lock. 733 */ 734 static void 735 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) 736 { 737 struct bfa_ioc *ioc = iocpf->ioc; 738 739 switch (event) { 740 case IOCPF_E_FWREADY: 741 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 742 break; 743 744 case IOCPF_E_TIMEOUT: 745 bfa_nw_ioc_hw_sem_release(ioc); 746 bfa_ioc_pf_failed(ioc); 747 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 748 break; 749 750 case IOCPF_E_DISABLE: 751 del_timer(&ioc->iocpf_timer); 752 bfa_ioc_sync_leave(ioc); 753 bfa_nw_ioc_hw_sem_release(ioc); 754 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 755 break; 756 757 default: 758 bfa_sm_fault(event); 759 } 760 } 761 762 static void 763 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) 764 { 765 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 766 msecs_to_jiffies(BFA_IOC_TOV)); 767 /** 768 * Enable Interrupts before sending fw IOC ENABLE cmd. 769 */ 770 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); 771 bfa_ioc_send_enable(iocpf->ioc); 772 } 773 774 /* Host IOC function is being enabled, awaiting response from firmware. 775 * Semaphore is acquired. 776 */ 777 static void 778 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 779 { 780 struct bfa_ioc *ioc = iocpf->ioc; 781 782 switch (event) { 783 case IOCPF_E_FWRSP_ENABLE: 784 del_timer(&ioc->iocpf_timer); 785 bfa_nw_ioc_hw_sem_release(ioc); 786 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 787 break; 788 789 case IOCPF_E_INITFAIL: 790 del_timer(&ioc->iocpf_timer); 791 /* fall through */ 792 793 case IOCPF_E_TIMEOUT: 794 bfa_nw_ioc_hw_sem_release(ioc); 795 if (event == IOCPF_E_TIMEOUT) 796 bfa_ioc_pf_failed(ioc); 797 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 798 break; 799 800 case IOCPF_E_DISABLE: 801 del_timer(&ioc->iocpf_timer); 802 bfa_nw_ioc_hw_sem_release(ioc); 803 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 804 break; 805 806 default: 807 bfa_sm_fault(event); 808 } 809 } 810 811 static void 812 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 813 { 814 bfa_ioc_pf_enabled(iocpf->ioc); 815 } 816 817 static void 818 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 819 { 820 switch (event) { 821 case IOCPF_E_DISABLE: 822 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 823 break; 824 825 case IOCPF_E_GETATTRFAIL: 826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 827 break; 828 829 case IOCPF_E_FAIL: 830 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 831 break; 832 833 default: 834 bfa_sm_fault(event); 835 } 836 } 837 838 static void 839 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) 840 { 841 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 842 msecs_to_jiffies(BFA_IOC_TOV)); 843 bfa_ioc_send_disable(iocpf->ioc); 844 } 845 846 /* IOC is being disabled */ 847 static void 848 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 849 { 850 struct bfa_ioc *ioc = iocpf->ioc; 851 852 switch (event) { 853 case IOCPF_E_FWRSP_DISABLE: 854 del_timer(&ioc->iocpf_timer); 855 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 856 break; 857 858 case IOCPF_E_FAIL: 859 del_timer(&ioc->iocpf_timer); 860 /* fall through*/ 861 862 case IOCPF_E_TIMEOUT: 863 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 864 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 865 break; 866 867 case IOCPF_E_FWRSP_ENABLE: 868 break; 869 870 default: 871 bfa_sm_fault(event); 872 } 873 } 874 875 static void 876 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) 877 { 878 bfa_ioc_hw_sem_get(iocpf->ioc); 879 } 880 881 /* IOC hb ack request is being removed. */ 882 static void 883 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 884 { 885 struct bfa_ioc *ioc = iocpf->ioc; 886 887 switch (event) { 888 case IOCPF_E_SEMLOCKED: 889 bfa_ioc_sync_leave(ioc); 890 bfa_nw_ioc_hw_sem_release(ioc); 891 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 892 break; 893 894 case IOCPF_E_SEM_ERROR: 895 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 896 bfa_ioc_pf_hwfailed(ioc); 897 break; 898 899 case IOCPF_E_FAIL: 900 break; 901 902 default: 903 bfa_sm_fault(event); 904 } 905 } 906 907 /* IOC disable completion entry. */ 908 static void 909 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 910 { 911 bfa_ioc_mbox_flush(iocpf->ioc); 912 bfa_ioc_pf_disabled(iocpf->ioc); 913 } 914 915 static void 916 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) 917 { 918 struct bfa_ioc *ioc = iocpf->ioc; 919 920 switch (event) { 921 case IOCPF_E_ENABLE: 922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 923 break; 924 925 case IOCPF_E_STOP: 926 bfa_ioc_firmware_unlock(ioc); 927 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 928 break; 929 930 default: 931 bfa_sm_fault(event); 932 } 933 } 934 935 static void 936 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) 937 { 938 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); 939 bfa_ioc_hw_sem_get(iocpf->ioc); 940 } 941 942 /* Hardware initialization failed. */ 943 static void 944 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 945 { 946 struct bfa_ioc *ioc = iocpf->ioc; 947 948 switch (event) { 949 case IOCPF_E_SEMLOCKED: 950 bfa_ioc_notify_fail(ioc); 951 bfa_ioc_sync_leave(ioc); 952 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 953 bfa_nw_ioc_hw_sem_release(ioc); 954 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 955 break; 956 957 case IOCPF_E_SEM_ERROR: 958 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 959 bfa_ioc_pf_hwfailed(ioc); 960 break; 961 962 case IOCPF_E_DISABLE: 963 bfa_ioc_hw_sem_get_cancel(ioc); 964 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 965 break; 966 967 case IOCPF_E_STOP: 968 bfa_ioc_hw_sem_get_cancel(ioc); 969 bfa_ioc_firmware_unlock(ioc); 970 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 971 break; 972 973 case IOCPF_E_FAIL: 974 break; 975 976 default: 977 bfa_sm_fault(event); 978 } 979 } 980 981 static void 982 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 983 { 984 } 985 986 /* Hardware initialization failed. */ 987 static void 988 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 989 { 990 struct bfa_ioc *ioc = iocpf->ioc; 991 992 switch (event) { 993 case IOCPF_E_DISABLE: 994 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 995 break; 996 997 case IOCPF_E_STOP: 998 bfa_ioc_firmware_unlock(ioc); 999 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1000 break; 1001 1002 default: 1003 bfa_sm_fault(event); 1004 } 1005 } 1006 1007 static void 1008 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) 1009 { 1010 /** 1011 * Mark IOC as failed in hardware and stop firmware. 1012 */ 1013 bfa_ioc_lpu_stop(iocpf->ioc); 1014 1015 /** 1016 * Flush any queued up mailbox requests. 1017 */ 1018 bfa_ioc_mbox_flush(iocpf->ioc); 1019 bfa_ioc_hw_sem_get(iocpf->ioc); 1020 } 1021 1022 /* IOC is in failed state. */ 1023 static void 1024 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1025 { 1026 struct bfa_ioc *ioc = iocpf->ioc; 1027 1028 switch (event) { 1029 case IOCPF_E_SEMLOCKED: 1030 bfa_ioc_sync_ack(ioc); 1031 bfa_ioc_notify_fail(ioc); 1032 if (!iocpf->auto_recover) { 1033 bfa_ioc_sync_leave(ioc); 1034 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1035 bfa_nw_ioc_hw_sem_release(ioc); 1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1037 } else { 1038 if (bfa_ioc_sync_complete(ioc)) 1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1040 else { 1041 bfa_nw_ioc_hw_sem_release(ioc); 1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1043 } 1044 } 1045 break; 1046 1047 case IOCPF_E_SEM_ERROR: 1048 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1049 bfa_ioc_pf_hwfailed(ioc); 1050 break; 1051 1052 case IOCPF_E_DISABLE: 1053 bfa_ioc_hw_sem_get_cancel(ioc); 1054 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1055 break; 1056 1057 case IOCPF_E_FAIL: 1058 break; 1059 1060 default: 1061 bfa_sm_fault(event); 1062 } 1063 } 1064 1065 static void 1066 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) 1067 { 1068 } 1069 1070 /* IOC is in failed state. */ 1071 static void 1072 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1073 { 1074 switch (event) { 1075 case IOCPF_E_DISABLE: 1076 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1077 break; 1078 1079 default: 1080 bfa_sm_fault(event); 1081 } 1082 } 1083 1084 /* BFA IOC private functions */ 1085 1086 /* Notify common modules registered for notification. */ 1087 static void 1088 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1089 { 1090 struct bfa_ioc_notify *notify; 1091 1092 list_for_each_entry(notify, &ioc->notify_q, qe) 1093 notify->cbfn(notify->cbarg, event); 1094 } 1095 1096 static void 1097 bfa_ioc_disable_comp(struct bfa_ioc *ioc) 1098 { 1099 ioc->cbfn->disable_cbfn(ioc->bfa); 1100 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); 1101 } 1102 1103 bool 1104 bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1105 { 1106 u32 r32; 1107 int cnt = 0; 1108 #define BFA_SEM_SPINCNT 3000 1109 1110 r32 = readl(sem_reg); 1111 1112 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { 1113 cnt++; 1114 udelay(2); 1115 r32 = readl(sem_reg); 1116 } 1117 1118 if (!(r32 & 1)) 1119 return true; 1120 1121 return false; 1122 } 1123 1124 void 1125 bfa_nw_ioc_sem_release(void __iomem *sem_reg) 1126 { 1127 readl(sem_reg); 1128 writel(1, sem_reg); 1129 } 1130 1131 /* Clear fwver hdr */ 1132 static void 1133 bfa_ioc_fwver_clear(struct bfa_ioc *ioc) 1134 { 1135 u32 pgnum, pgoff, loff = 0; 1136 int i; 1137 1138 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); 1139 pgoff = PSS_SMEM_PGOFF(loff); 1140 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1141 1142 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { 1143 writel(0, ioc->ioc_regs.smem_page_start + loff); 1144 loff += sizeof(u32); 1145 } 1146 } 1147 1148 1149 static void 1150 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1151 { 1152 struct bfi_ioc_image_hdr fwhdr; 1153 u32 fwstate, r32; 1154 1155 /* Spin on init semaphore to serialize. */ 1156 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1157 while (r32 & 0x1) { 1158 udelay(20); 1159 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1160 } 1161 1162 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1163 if (fwstate == BFI_IOC_UNINIT) { 1164 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1165 return; 1166 } 1167 1168 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1169 1170 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { 1171 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1172 return; 1173 } 1174 1175 bfa_ioc_fwver_clear(ioc); 1176 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1177 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1178 1179 /* 1180 * Try to lock and then unlock the semaphore. 1181 */ 1182 readl(ioc->ioc_regs.ioc_sem_reg); 1183 writel(1, ioc->ioc_regs.ioc_sem_reg); 1184 1185 /* Unlock init semaphore */ 1186 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1187 } 1188 1189 static void 1190 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) 1191 { 1192 u32 r32; 1193 1194 /** 1195 * First read to the semaphore register will return 0, subsequent reads 1196 * will return 1. Semaphore is released by writing 1 to the register 1197 */ 1198 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1199 if (r32 == ~0) { 1200 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); 1201 return; 1202 } 1203 if (!(r32 & 1)) { 1204 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1205 return; 1206 } 1207 1208 mod_timer(&ioc->sem_timer, jiffies + 1209 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 1210 } 1211 1212 void 1213 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) 1214 { 1215 writel(1, ioc->ioc_regs.ioc_sem_reg); 1216 } 1217 1218 static void 1219 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1220 { 1221 del_timer(&ioc->sem_timer); 1222 } 1223 1224 /* Initialize LPU local memory (aka secondary memory / SRAM) */ 1225 static void 1226 bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1227 { 1228 u32 pss_ctl; 1229 int i; 1230 #define PSS_LMEM_INIT_TIME 10000 1231 1232 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1233 pss_ctl &= ~__PSS_LMEM_RESET; 1234 pss_ctl |= __PSS_LMEM_INIT_EN; 1235 1236 /* 1237 * i2c workaround 12.5khz clock 1238 */ 1239 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1240 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1241 1242 /** 1243 * wait for memory initialization to be complete 1244 */ 1245 i = 0; 1246 do { 1247 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1248 i++; 1249 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1250 1251 /** 1252 * If memory initialization is not successful, IOC timeout will catch 1253 * such failures. 1254 */ 1255 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); 1256 1257 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1258 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1259 } 1260 1261 static void 1262 bfa_ioc_lpu_start(struct bfa_ioc *ioc) 1263 { 1264 u32 pss_ctl; 1265 1266 /** 1267 * Take processor out of reset. 1268 */ 1269 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1270 pss_ctl &= ~__PSS_LPU0_RESET; 1271 1272 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1273 } 1274 1275 static void 1276 bfa_ioc_lpu_stop(struct bfa_ioc *ioc) 1277 { 1278 u32 pss_ctl; 1279 1280 /** 1281 * Put processors in reset. 1282 */ 1283 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1284 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1285 1286 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1287 } 1288 1289 /* Get driver and firmware versions. */ 1290 void 1291 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1292 { 1293 u32 pgnum; 1294 u32 loff = 0; 1295 int i; 1296 u32 *fwsig = (u32 *) fwhdr; 1297 1298 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1299 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1300 1301 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); 1302 i++) { 1303 fwsig[i] = 1304 swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 1305 loff += sizeof(u32); 1306 } 1307 } 1308 1309 static bool 1310 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, 1311 struct bfi_ioc_image_hdr *fwhdr_2) 1312 { 1313 int i; 1314 1315 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1316 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) 1317 return false; 1318 } 1319 1320 return true; 1321 } 1322 1323 /* Returns TRUE if major minor and maintenance are same. 1324 * If patch version are same, check for MD5 Checksum to be same. 1325 */ 1326 static bool 1327 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, 1328 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1329 { 1330 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) 1331 return false; 1332 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) 1333 return false; 1334 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) 1335 return false; 1336 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) 1337 return false; 1338 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && 1339 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && 1340 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) 1341 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); 1342 1343 return true; 1344 } 1345 1346 static bool 1347 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) 1348 { 1349 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) 1350 return false; 1351 1352 return true; 1353 } 1354 1355 static bool 1356 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) 1357 { 1358 if (fwhdr->fwver.phase == 0 && 1359 fwhdr->fwver.build == 0) 1360 return false; 1361 1362 return true; 1363 } 1364 1365 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ 1366 static enum bfi_ioc_img_ver_cmp 1367 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, 1368 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1369 { 1370 if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp)) 1371 return BFI_IOC_IMG_VER_INCOMP; 1372 1373 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) 1374 return BFI_IOC_IMG_VER_BETTER; 1375 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) 1376 return BFI_IOC_IMG_VER_OLD; 1377 1378 /* GA takes priority over internal builds of the same patch stream. 1379 * At this point major minor maint and patch numbers are same. 1380 */ 1381 if (fwhdr_is_ga(base_fwhdr)) 1382 if (fwhdr_is_ga(fwhdr_to_cmp)) 1383 return BFI_IOC_IMG_VER_SAME; 1384 else 1385 return BFI_IOC_IMG_VER_OLD; 1386 else 1387 if (fwhdr_is_ga(fwhdr_to_cmp)) 1388 return BFI_IOC_IMG_VER_BETTER; 1389 1390 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) 1391 return BFI_IOC_IMG_VER_BETTER; 1392 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) 1393 return BFI_IOC_IMG_VER_OLD; 1394 1395 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) 1396 return BFI_IOC_IMG_VER_BETTER; 1397 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) 1398 return BFI_IOC_IMG_VER_OLD; 1399 1400 /* All Version Numbers are equal. 1401 * Md5 check to be done as a part of compatibility check. 1402 */ 1403 return BFI_IOC_IMG_VER_SAME; 1404 } 1405 1406 /* register definitions */ 1407 #define FLI_CMD_REG 0x0001d000 1408 #define FLI_WRDATA_REG 0x0001d00c 1409 #define FLI_RDDATA_REG 0x0001d010 1410 #define FLI_ADDR_REG 0x0001d004 1411 #define FLI_DEV_STATUS_REG 0x0001d014 1412 1413 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ 1414 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ 1415 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ 1416 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ 1417 1418 #define NFC_STATE_RUNNING 0x20000001 1419 #define NFC_STATE_PAUSED 0x00004560 1420 #define NFC_VER_VALID 0x147 1421 1422 enum bfa_flash_cmd { 1423 BFA_FLASH_FAST_READ = 0x0b, /* fast read */ 1424 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ 1425 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ 1426 BFA_FLASH_WRITE = 0x02, /* write */ 1427 BFA_FLASH_READ_STATUS = 0x05, /* read status */ 1428 }; 1429 1430 /* hardware error definition */ 1431 enum bfa_flash_err { 1432 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ 1433 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ 1434 BFA_FLASH_BAD = -3, /*!< flash bad */ 1435 BFA_FLASH_BUSY = -4, /*!< flash busy */ 1436 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ 1437 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ 1438 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ 1439 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ 1440 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ 1441 }; 1442 1443 /* flash command register data structure */ 1444 union bfa_flash_cmd_reg { 1445 struct { 1446 #ifdef __BIG_ENDIAN 1447 u32 act:1; 1448 u32 rsv:1; 1449 u32 write_cnt:9; 1450 u32 read_cnt:9; 1451 u32 addr_cnt:4; 1452 u32 cmd:8; 1453 #else 1454 u32 cmd:8; 1455 u32 addr_cnt:4; 1456 u32 read_cnt:9; 1457 u32 write_cnt:9; 1458 u32 rsv:1; 1459 u32 act:1; 1460 #endif 1461 } r; 1462 u32 i; 1463 }; 1464 1465 /* flash device status register data structure */ 1466 union bfa_flash_dev_status_reg { 1467 struct { 1468 #ifdef __BIG_ENDIAN 1469 u32 rsv:21; 1470 u32 fifo_cnt:6; 1471 u32 busy:1; 1472 u32 init_status:1; 1473 u32 present:1; 1474 u32 bad:1; 1475 u32 good:1; 1476 #else 1477 u32 good:1; 1478 u32 bad:1; 1479 u32 present:1; 1480 u32 init_status:1; 1481 u32 busy:1; 1482 u32 fifo_cnt:6; 1483 u32 rsv:21; 1484 #endif 1485 } r; 1486 u32 i; 1487 }; 1488 1489 /* flash address register data structure */ 1490 union bfa_flash_addr_reg { 1491 struct { 1492 #ifdef __BIG_ENDIAN 1493 u32 addr:24; 1494 u32 dummy:8; 1495 #else 1496 u32 dummy:8; 1497 u32 addr:24; 1498 #endif 1499 } r; 1500 u32 i; 1501 }; 1502 1503 /* Flash raw private functions */ 1504 static void 1505 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, 1506 u8 rd_cnt, u8 ad_cnt, u8 op) 1507 { 1508 union bfa_flash_cmd_reg cmd; 1509 1510 cmd.i = 0; 1511 cmd.r.act = 1; 1512 cmd.r.write_cnt = wr_cnt; 1513 cmd.r.read_cnt = rd_cnt; 1514 cmd.r.addr_cnt = ad_cnt; 1515 cmd.r.cmd = op; 1516 writel(cmd.i, (pci_bar + FLI_CMD_REG)); 1517 } 1518 1519 static void 1520 bfa_flash_set_addr(void __iomem *pci_bar, u32 address) 1521 { 1522 union bfa_flash_addr_reg addr; 1523 1524 addr.r.addr = address & 0x00ffffff; 1525 addr.r.dummy = 0; 1526 writel(addr.i, (pci_bar + FLI_ADDR_REG)); 1527 } 1528 1529 static int 1530 bfa_flash_cmd_act_check(void __iomem *pci_bar) 1531 { 1532 union bfa_flash_cmd_reg cmd; 1533 1534 cmd.i = readl(pci_bar + FLI_CMD_REG); 1535 1536 if (cmd.r.act) 1537 return BFA_FLASH_ERR_CMD_ACT; 1538 1539 return 0; 1540 } 1541 1542 /* Flush FLI data fifo. */ 1543 static int 1544 bfa_flash_fifo_flush(void __iomem *pci_bar) 1545 { 1546 u32 i; 1547 u32 t; 1548 union bfa_flash_dev_status_reg dev_status; 1549 1550 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1551 1552 if (!dev_status.r.fifo_cnt) 1553 return 0; 1554 1555 /* fifo counter in terms of words */ 1556 for (i = 0; i < dev_status.r.fifo_cnt; i++) 1557 t = readl(pci_bar + FLI_RDDATA_REG); 1558 1559 /* Check the device status. It may take some time. */ 1560 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1561 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1562 if (!dev_status.r.fifo_cnt) 1563 break; 1564 } 1565 1566 if (dev_status.r.fifo_cnt) 1567 return BFA_FLASH_ERR_FIFO_CNT; 1568 1569 return 0; 1570 } 1571 1572 /* Read flash status. */ 1573 static int 1574 bfa_flash_status_read(void __iomem *pci_bar) 1575 { 1576 union bfa_flash_dev_status_reg dev_status; 1577 int status; 1578 u32 ret_status; 1579 int i; 1580 1581 status = bfa_flash_fifo_flush(pci_bar); 1582 if (status < 0) 1583 return status; 1584 1585 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); 1586 1587 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1588 status = bfa_flash_cmd_act_check(pci_bar); 1589 if (!status) 1590 break; 1591 } 1592 1593 if (status) 1594 return status; 1595 1596 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1597 if (!dev_status.r.fifo_cnt) 1598 return BFA_FLASH_BUSY; 1599 1600 ret_status = readl(pci_bar + FLI_RDDATA_REG); 1601 ret_status >>= 24; 1602 1603 status = bfa_flash_fifo_flush(pci_bar); 1604 if (status < 0) 1605 return status; 1606 1607 return ret_status; 1608 } 1609 1610 /* Start flash read operation. */ 1611 static int 1612 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1613 char *buf) 1614 { 1615 int status; 1616 1617 /* len must be mutiple of 4 and not exceeding fifo size */ 1618 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1619 return BFA_FLASH_ERR_LEN; 1620 1621 /* check status */ 1622 status = bfa_flash_status_read(pci_bar); 1623 if (status == BFA_FLASH_BUSY) 1624 status = bfa_flash_status_read(pci_bar); 1625 1626 if (status < 0) 1627 return status; 1628 1629 /* check if write-in-progress bit is cleared */ 1630 if (status & BFA_FLASH_WIP_MASK) 1631 return BFA_FLASH_ERR_WIP; 1632 1633 bfa_flash_set_addr(pci_bar, offset); 1634 1635 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); 1636 1637 return 0; 1638 } 1639 1640 /* Check flash read operation. */ 1641 static u32 1642 bfa_flash_read_check(void __iomem *pci_bar) 1643 { 1644 if (bfa_flash_cmd_act_check(pci_bar)) 1645 return 1; 1646 1647 return 0; 1648 } 1649 1650 /* End flash read operation. */ 1651 static void 1652 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) 1653 { 1654 u32 i; 1655 1656 /* read data fifo up to 32 words */ 1657 for (i = 0; i < len; i += 4) { 1658 u32 w = readl(pci_bar + FLI_RDDATA_REG); 1659 *((u32 *)(buf + i)) = swab32(w); 1660 } 1661 1662 bfa_flash_fifo_flush(pci_bar); 1663 } 1664 1665 /* Perform flash raw read. */ 1666 1667 #define FLASH_BLOCKING_OP_MAX 500 1668 #define FLASH_SEM_LOCK_REG 0x18820 1669 1670 static int 1671 bfa_raw_sem_get(void __iomem *bar) 1672 { 1673 int locked; 1674 1675 locked = readl(bar + FLASH_SEM_LOCK_REG); 1676 1677 return !locked; 1678 } 1679 1680 static enum bfa_status 1681 bfa_flash_sem_get(void __iomem *bar) 1682 { 1683 u32 n = FLASH_BLOCKING_OP_MAX; 1684 1685 while (!bfa_raw_sem_get(bar)) { 1686 if (--n <= 0) 1687 return BFA_STATUS_BADFLASH; 1688 mdelay(10); 1689 } 1690 return BFA_STATUS_OK; 1691 } 1692 1693 static void 1694 bfa_flash_sem_put(void __iomem *bar) 1695 { 1696 writel(0, (bar + FLASH_SEM_LOCK_REG)); 1697 } 1698 1699 static enum bfa_status 1700 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1701 u32 len) 1702 { 1703 u32 n; 1704 int status; 1705 u32 off, l, s, residue, fifo_sz; 1706 1707 residue = len; 1708 off = 0; 1709 fifo_sz = BFA_FLASH_FIFO_SIZE; 1710 status = bfa_flash_sem_get(pci_bar); 1711 if (status != BFA_STATUS_OK) 1712 return status; 1713 1714 while (residue) { 1715 s = offset + off; 1716 n = s / fifo_sz; 1717 l = (n + 1) * fifo_sz - s; 1718 if (l > residue) 1719 l = residue; 1720 1721 status = bfa_flash_read_start(pci_bar, offset + off, l, 1722 &buf[off]); 1723 if (status < 0) { 1724 bfa_flash_sem_put(pci_bar); 1725 return BFA_STATUS_FAILED; 1726 } 1727 1728 n = BFA_FLASH_BLOCKING_OP_MAX; 1729 while (bfa_flash_read_check(pci_bar)) { 1730 if (--n <= 0) { 1731 bfa_flash_sem_put(pci_bar); 1732 return BFA_STATUS_FAILED; 1733 } 1734 } 1735 1736 bfa_flash_read_end(pci_bar, l, &buf[off]); 1737 1738 residue -= l; 1739 off += l; 1740 } 1741 bfa_flash_sem_put(pci_bar); 1742 1743 return BFA_STATUS_OK; 1744 } 1745 1746 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ 1747 1748 static enum bfa_status 1749 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, 1750 u32 *fwimg) 1751 { 1752 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, 1753 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), 1754 (char *)fwimg, BFI_FLASH_CHUNK_SZ); 1755 } 1756 1757 static enum bfi_ioc_img_ver_cmp 1758 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, 1759 struct bfi_ioc_image_hdr *base_fwhdr) 1760 { 1761 struct bfi_ioc_image_hdr *flash_fwhdr; 1762 enum bfa_status status; 1763 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; 1764 1765 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); 1766 if (status != BFA_STATUS_OK) 1767 return BFI_IOC_IMG_VER_INCOMP; 1768 1769 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; 1770 if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) 1771 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); 1772 else 1773 return BFI_IOC_IMG_VER_INCOMP; 1774 } 1775 1776 /** 1777 * Returns TRUE if driver is willing to work with current smem f/w version. 1778 */ 1779 bool 1780 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1781 { 1782 struct bfi_ioc_image_hdr *drv_fwhdr; 1783 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; 1784 1785 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1786 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1787 1788 /* If smem is incompatible or old, driver should not work with it. */ 1789 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); 1790 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || 1791 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { 1792 return false; 1793 } 1794 1795 /* IF Flash has a better F/W than smem do not work with smem. 1796 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. 1797 * If Flash is old or incomp work with smem iff smem f/w == drv f/w. 1798 */ 1799 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); 1800 1801 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) 1802 return false; 1803 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) 1804 return true; 1805 else 1806 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? 1807 true : false; 1808 } 1809 1810 /* Return true if current running version is valid. Firmware signature and 1811 * execution context (driver/bios) must match. 1812 */ 1813 static bool 1814 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1815 { 1816 struct bfi_ioc_image_hdr fwhdr; 1817 1818 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1819 if (swab32(fwhdr.bootenv) != boot_env) 1820 return false; 1821 1822 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1823 } 1824 1825 /* Conditionally flush any pending message from firmware at start. */ 1826 static void 1827 bfa_ioc_msgflush(struct bfa_ioc *ioc) 1828 { 1829 u32 r32; 1830 1831 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 1832 if (r32) 1833 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1834 } 1835 1836 static void 1837 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1838 { 1839 enum bfi_ioc_state ioc_fwstate; 1840 bool fwvalid; 1841 u32 boot_env; 1842 1843 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1844 1845 if (force) 1846 ioc_fwstate = BFI_IOC_UNINIT; 1847 1848 boot_env = BFI_FWBOOT_ENV_OS; 1849 1850 /** 1851 * check if firmware is valid 1852 */ 1853 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1854 false : bfa_ioc_fwver_valid(ioc, boot_env); 1855 1856 if (!fwvalid) { 1857 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1858 BFA_STATUS_OK) 1859 bfa_ioc_poll_fwinit(ioc); 1860 1861 return; 1862 } 1863 1864 /** 1865 * If hardware initialization is in progress (initialized by other IOC), 1866 * just wait for an initialization completion interrupt. 1867 */ 1868 if (ioc_fwstate == BFI_IOC_INITING) { 1869 bfa_ioc_poll_fwinit(ioc); 1870 return; 1871 } 1872 1873 /** 1874 * If IOC function is disabled and firmware version is same, 1875 * just re-enable IOC. 1876 */ 1877 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 1878 /** 1879 * When using MSI-X any pending firmware ready event should 1880 * be flushed. Otherwise MSI-X interrupts are not delivered. 1881 */ 1882 bfa_ioc_msgflush(ioc); 1883 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1884 return; 1885 } 1886 1887 /** 1888 * Initialize the h/w for any other states. 1889 */ 1890 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1891 BFA_STATUS_OK) 1892 bfa_ioc_poll_fwinit(ioc); 1893 } 1894 1895 void 1896 bfa_nw_ioc_timeout(struct bfa_ioc *ioc) 1897 { 1898 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1899 } 1900 1901 static void 1902 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 1903 { 1904 u32 *msgp = (u32 *) ioc_msg; 1905 u32 i; 1906 1907 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); 1908 1909 /* 1910 * first write msg to mailbox registers 1911 */ 1912 for (i = 0; i < len / sizeof(u32); i++) 1913 writel(cpu_to_le32(msgp[i]), 1914 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1915 1916 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1917 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1918 1919 /* 1920 * write 1 to mailbox CMD to trigger LPU event 1921 */ 1922 writel(1, ioc->ioc_regs.hfn_mbox_cmd); 1923 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); 1924 } 1925 1926 static void 1927 bfa_ioc_send_enable(struct bfa_ioc *ioc) 1928 { 1929 struct bfi_ioc_ctrl_req enable_req; 1930 1931 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1932 bfa_ioc_portid(ioc)); 1933 enable_req.clscode = htons(ioc->clscode); 1934 enable_req.rsvd = htons(0); 1935 /* overflow in 2106 */ 1936 enable_req.tv_sec = ntohl(ktime_get_real_seconds()); 1937 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1938 } 1939 1940 static void 1941 bfa_ioc_send_disable(struct bfa_ioc *ioc) 1942 { 1943 struct bfi_ioc_ctrl_req disable_req; 1944 1945 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1946 bfa_ioc_portid(ioc)); 1947 disable_req.clscode = htons(ioc->clscode); 1948 disable_req.rsvd = htons(0); 1949 /* overflow in 2106 */ 1950 disable_req.tv_sec = ntohl(ktime_get_real_seconds()); 1951 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1952 } 1953 1954 static void 1955 bfa_ioc_send_getattr(struct bfa_ioc *ioc) 1956 { 1957 struct bfi_ioc_getattr_req attr_req; 1958 1959 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1960 bfa_ioc_portid(ioc)); 1961 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); 1962 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); 1963 } 1964 1965 void 1966 bfa_nw_ioc_hb_check(struct bfa_ioc *ioc) 1967 { 1968 u32 hb_count; 1969 1970 hb_count = readl(ioc->ioc_regs.heartbeat); 1971 if (ioc->hb_count == hb_count) { 1972 bfa_ioc_recover(ioc); 1973 return; 1974 } else { 1975 ioc->hb_count = hb_count; 1976 } 1977 1978 bfa_ioc_mbox_poll(ioc); 1979 mod_timer(&ioc->hb_timer, jiffies + 1980 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1981 } 1982 1983 static void 1984 bfa_ioc_hb_monitor(struct bfa_ioc *ioc) 1985 { 1986 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); 1987 mod_timer(&ioc->hb_timer, jiffies + 1988 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1989 } 1990 1991 static void 1992 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 1993 { 1994 del_timer(&ioc->hb_timer); 1995 } 1996 1997 /* Initiate a full firmware download. */ 1998 static enum bfa_status 1999 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 2000 u32 boot_env) 2001 { 2002 u32 *fwimg; 2003 u32 pgnum; 2004 u32 loff = 0; 2005 u32 chunkno = 0; 2006 u32 i; 2007 u32 asicmode; 2008 u32 fwimg_size; 2009 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; 2010 enum bfa_status status; 2011 2012 if (boot_env == BFI_FWBOOT_ENV_OS && 2013 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2014 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); 2015 2016 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2017 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); 2018 if (status != BFA_STATUS_OK) 2019 return status; 2020 2021 fwimg = fwimg_buf; 2022 } else { 2023 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); 2024 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 2025 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2026 } 2027 2028 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 2029 2030 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2031 2032 for (i = 0; i < fwimg_size; i++) { 2033 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 2034 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 2035 if (boot_env == BFI_FWBOOT_ENV_OS && 2036 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2037 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2038 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), 2039 fwimg_buf); 2040 if (status != BFA_STATUS_OK) 2041 return status; 2042 2043 fwimg = fwimg_buf; 2044 } else { 2045 fwimg = bfa_cb_image_get_chunk( 2046 bfa_ioc_asic_gen(ioc), 2047 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2048 } 2049 } 2050 2051 /** 2052 * write smem 2053 */ 2054 writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]), 2055 ioc->ioc_regs.smem_page_start + loff); 2056 2057 loff += sizeof(u32); 2058 2059 /** 2060 * handle page offset wrap around 2061 */ 2062 loff = PSS_SMEM_PGOFF(loff); 2063 if (loff == 0) { 2064 pgnum++; 2065 writel(pgnum, 2066 ioc->ioc_regs.host_page_num_fn); 2067 } 2068 } 2069 2070 writel(bfa_ioc_smem_pgnum(ioc, 0), 2071 ioc->ioc_regs.host_page_num_fn); 2072 2073 /* 2074 * Set boot type, env and device mode at the end. 2075 */ 2076 if (boot_env == BFI_FWBOOT_ENV_OS && 2077 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2078 boot_type = BFI_FWBOOT_TYPE_NORMAL; 2079 } 2080 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, 2081 ioc->port0_mode, ioc->port1_mode); 2082 writel(asicmode, ((ioc->ioc_regs.smem_page_start) 2083 + BFI_FWBOOT_DEVMODE_OFF)); 2084 writel(boot_type, ((ioc->ioc_regs.smem_page_start) 2085 + (BFI_FWBOOT_TYPE_OFF))); 2086 writel(boot_env, ((ioc->ioc_regs.smem_page_start) 2087 + (BFI_FWBOOT_ENV_OFF))); 2088 return BFA_STATUS_OK; 2089 } 2090 2091 static void 2092 bfa_ioc_reset(struct bfa_ioc *ioc, bool force) 2093 { 2094 bfa_ioc_hwinit(ioc, force); 2095 } 2096 2097 /* BFA ioc enable reply by firmware */ 2098 static void 2099 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 2100 u8 cap_bm) 2101 { 2102 struct bfa_iocpf *iocpf = &ioc->iocpf; 2103 2104 ioc->port_mode = ioc->port_mode_cfg = port_mode; 2105 ioc->ad_cap_bm = cap_bm; 2106 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 2107 } 2108 2109 /* Update BFA configuration from firmware configuration. */ 2110 static void 2111 bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 2112 { 2113 struct bfi_ioc_attr *attr = ioc->attr; 2114 2115 attr->adapter_prop = ntohl(attr->adapter_prop); 2116 attr->card_type = ntohl(attr->card_type); 2117 attr->maxfrsize = ntohs(attr->maxfrsize); 2118 2119 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 2120 } 2121 2122 /* Attach time initialization of mbox logic. */ 2123 static void 2124 bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 2125 { 2126 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2127 int mc; 2128 2129 INIT_LIST_HEAD(&mod->cmd_q); 2130 for (mc = 0; mc < BFI_MC_MAX; mc++) { 2131 mod->mbhdlr[mc].cbfn = NULL; 2132 mod->mbhdlr[mc].cbarg = ioc->bfa; 2133 } 2134 } 2135 2136 /* Mbox poll timer -- restarts any pending mailbox requests. */ 2137 static void 2138 bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 2139 { 2140 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2141 struct bfa_mbox_cmd *cmd; 2142 bfa_mbox_cmd_cbfn_t cbfn; 2143 void *cbarg; 2144 u32 stat; 2145 2146 /** 2147 * If no command pending, do nothing 2148 */ 2149 if (list_empty(&mod->cmd_q)) 2150 return; 2151 2152 /** 2153 * If previous command is not yet fetched by firmware, do nothing 2154 */ 2155 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2156 if (stat) 2157 return; 2158 2159 /** 2160 * Enqueue command to firmware. 2161 */ 2162 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2163 list_del(&cmd->qe); 2164 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2165 2166 /** 2167 * Give a callback to the client, indicating that the command is sent 2168 */ 2169 if (cmd->cbfn) { 2170 cbfn = cmd->cbfn; 2171 cbarg = cmd->cbarg; 2172 cmd->cbfn = NULL; 2173 cbfn(cbarg); 2174 } 2175 } 2176 2177 /* Cleanup any pending requests. */ 2178 static void 2179 bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 2180 { 2181 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2182 struct bfa_mbox_cmd *cmd; 2183 2184 while (!list_empty(&mod->cmd_q)) { 2185 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2186 list_del(&cmd->qe); 2187 } 2188 } 2189 2190 /** 2191 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap 2192 * 2193 * @ioc: memory for IOC 2194 * @tbuf: app memory to store data from smem 2195 * @soff: smem offset 2196 * @sz: size of smem in bytes 2197 */ 2198 static int 2199 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 2200 { 2201 u32 pgnum, loff, r32; 2202 int i, len; 2203 u32 *buf = tbuf; 2204 2205 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); 2206 loff = PSS_SMEM_PGOFF(soff); 2207 2208 /* 2209 * Hold semaphore to serialize pll init and fwtrc. 2210 */ 2211 if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) 2212 return 1; 2213 2214 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2215 2216 len = sz/sizeof(u32); 2217 for (i = 0; i < len; i++) { 2218 r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 2219 buf[i] = be32_to_cpu(r32); 2220 loff += sizeof(u32); 2221 2222 /** 2223 * handle page offset wrap around 2224 */ 2225 loff = PSS_SMEM_PGOFF(loff); 2226 if (loff == 0) { 2227 pgnum++; 2228 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2229 } 2230 } 2231 2232 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), 2233 ioc->ioc_regs.host_page_num_fn); 2234 2235 /* 2236 * release semaphore 2237 */ 2238 readl(ioc->ioc_regs.ioc_init_sem_reg); 2239 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 2240 return 0; 2241 } 2242 2243 /* Retrieve saved firmware trace from a prior IOC failure. */ 2244 int 2245 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2246 { 2247 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; 2248 int tlen, status = 0; 2249 2250 tlen = *trclen; 2251 if (tlen > BNA_DBG_FWTRC_LEN) 2252 tlen = BNA_DBG_FWTRC_LEN; 2253 2254 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); 2255 *trclen = tlen; 2256 return status; 2257 } 2258 2259 /* Save firmware trace if configured. */ 2260 static void 2261 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 2262 { 2263 int tlen; 2264 2265 if (ioc->dbg_fwsave_once) { 2266 ioc->dbg_fwsave_once = false; 2267 if (ioc->dbg_fwsave_len) { 2268 tlen = ioc->dbg_fwsave_len; 2269 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2270 } 2271 } 2272 } 2273 2274 /* Retrieve saved firmware trace from a prior IOC failure. */ 2275 int 2276 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2277 { 2278 int tlen; 2279 2280 if (ioc->dbg_fwsave_len == 0) 2281 return BFA_STATUS_ENOFSAVE; 2282 2283 tlen = *trclen; 2284 if (tlen > ioc->dbg_fwsave_len) 2285 tlen = ioc->dbg_fwsave_len; 2286 2287 memcpy(trcdata, ioc->dbg_fwsave, tlen); 2288 *trclen = tlen; 2289 return BFA_STATUS_OK; 2290 } 2291 2292 static void 2293 bfa_ioc_fail_notify(struct bfa_ioc *ioc) 2294 { 2295 /** 2296 * Notify driver and common modules registered for notification. 2297 */ 2298 ioc->cbfn->hbfail_cbfn(ioc->bfa); 2299 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); 2300 bfa_nw_ioc_debug_save_ftrc(ioc); 2301 } 2302 2303 /* IOCPF to IOC interface */ 2304 static void 2305 bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 2306 { 2307 bfa_fsm_send_event(ioc, IOC_E_ENABLED); 2308 } 2309 2310 static void 2311 bfa_ioc_pf_disabled(struct bfa_ioc *ioc) 2312 { 2313 bfa_fsm_send_event(ioc, IOC_E_DISABLED); 2314 } 2315 2316 static void 2317 bfa_ioc_pf_failed(struct bfa_ioc *ioc) 2318 { 2319 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 2320 } 2321 2322 static void 2323 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) 2324 { 2325 bfa_fsm_send_event(ioc, IOC_E_HWFAILED); 2326 } 2327 2328 static void 2329 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) 2330 { 2331 /** 2332 * Provide enable completion callback and AEN notification. 2333 */ 2334 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 2335 } 2336 2337 /* IOC public */ 2338 static enum bfa_status 2339 bfa_ioc_pll_init(struct bfa_ioc *ioc) 2340 { 2341 /* 2342 * Hold semaphore so that nobody can access the chip during init. 2343 */ 2344 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 2345 2346 bfa_ioc_pll_init_asic(ioc); 2347 2348 ioc->pllinit = true; 2349 2350 /* Initialize LMEM */ 2351 bfa_ioc_lmem_init(ioc); 2352 2353 /* 2354 * release semaphore. 2355 */ 2356 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2357 2358 return BFA_STATUS_OK; 2359 } 2360 2361 /* Interface used by diag module to do firmware boot with memory test 2362 * as the entry vector. 2363 */ 2364 static enum bfa_status 2365 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, 2366 u32 boot_env) 2367 { 2368 struct bfi_ioc_image_hdr *drv_fwhdr; 2369 enum bfa_status status; 2370 bfa_ioc_stats(ioc, ioc_boots); 2371 2372 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 2373 return BFA_STATUS_FAILED; 2374 if (boot_env == BFI_FWBOOT_ENV_OS && 2375 boot_type == BFI_FWBOOT_TYPE_NORMAL) { 2376 drv_fwhdr = (struct bfi_ioc_image_hdr *) 2377 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 2378 /* Work with Flash iff flash f/w is better than driver f/w. 2379 * Otherwise push drivers firmware. 2380 */ 2381 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == 2382 BFI_IOC_IMG_VER_BETTER) 2383 boot_type = BFI_FWBOOT_TYPE_FLASH; 2384 } 2385 2386 /** 2387 * Initialize IOC state of all functions on a chip reset. 2388 */ 2389 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2390 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2391 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2392 } else { 2393 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); 2394 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); 2395 } 2396 2397 bfa_ioc_msgflush(ioc); 2398 status = bfa_ioc_download_fw(ioc, boot_type, boot_env); 2399 if (status == BFA_STATUS_OK) 2400 bfa_ioc_lpu_start(ioc); 2401 else 2402 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2403 2404 return status; 2405 } 2406 2407 /* Enable/disable IOC failure auto recovery. */ 2408 void 2409 bfa_nw_ioc_auto_recover(bool auto_recover) 2410 { 2411 bfa_nw_auto_recover = auto_recover; 2412 } 2413 2414 static bool 2415 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 2416 { 2417 u32 *msgp = mbmsg; 2418 u32 r32; 2419 int i; 2420 2421 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 2422 if ((r32 & 1) == 0) 2423 return false; 2424 2425 /** 2426 * read the MBOX msg 2427 */ 2428 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 2429 i++) { 2430 r32 = readl(ioc->ioc_regs.lpu_mbox + 2431 i * sizeof(u32)); 2432 msgp[i] = htonl(r32); 2433 } 2434 2435 /** 2436 * turn off mailbox interrupt by clearing mailbox status 2437 */ 2438 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 2439 readl(ioc->ioc_regs.lpu_mbox_cmd); 2440 2441 return true; 2442 } 2443 2444 static void 2445 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 2446 { 2447 union bfi_ioc_i2h_msg_u *msg; 2448 struct bfa_iocpf *iocpf = &ioc->iocpf; 2449 2450 msg = (union bfi_ioc_i2h_msg_u *) m; 2451 2452 bfa_ioc_stats(ioc, ioc_isrs); 2453 2454 switch (msg->mh.msg_id) { 2455 case BFI_IOC_I2H_HBEAT: 2456 break; 2457 2458 case BFI_IOC_I2H_ENABLE_REPLY: 2459 bfa_ioc_enable_reply(ioc, 2460 (enum bfa_mode)msg->fw_event.port_mode, 2461 msg->fw_event.cap_bm); 2462 break; 2463 2464 case BFI_IOC_I2H_DISABLE_REPLY: 2465 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); 2466 break; 2467 2468 case BFI_IOC_I2H_GETATTR_REPLY: 2469 bfa_ioc_getattr_reply(ioc); 2470 break; 2471 2472 default: 2473 BUG_ON(1); 2474 } 2475 } 2476 2477 /** 2478 * bfa_nw_ioc_attach - IOC attach time initialization and setup. 2479 * 2480 * @ioc: memory for IOC 2481 * @bfa: driver instance structure 2482 */ 2483 void 2484 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 2485 { 2486 ioc->bfa = bfa; 2487 ioc->cbfn = cbfn; 2488 ioc->fcmode = false; 2489 ioc->pllinit = false; 2490 ioc->dbg_fwsave_once = true; 2491 ioc->iocpf.ioc = ioc; 2492 2493 bfa_ioc_mbox_attach(ioc); 2494 INIT_LIST_HEAD(&ioc->notify_q); 2495 2496 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 2497 bfa_fsm_send_event(ioc, IOC_E_RESET); 2498 } 2499 2500 /* Driver detach time IOC cleanup. */ 2501 void 2502 bfa_nw_ioc_detach(struct bfa_ioc *ioc) 2503 { 2504 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2505 2506 /* Done with detach, empty the notify_q. */ 2507 INIT_LIST_HEAD(&ioc->notify_q); 2508 } 2509 2510 /** 2511 * bfa_nw_ioc_pci_init - Setup IOC PCI properties. 2512 * 2513 * @pcidev: PCI device information for this IOC 2514 */ 2515 void 2516 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 2517 enum bfi_pcifn_class clscode) 2518 { 2519 ioc->clscode = clscode; 2520 ioc->pcidev = *pcidev; 2521 2522 /** 2523 * Initialize IOC and device personality 2524 */ 2525 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; 2526 ioc->asic_mode = BFI_ASIC_MODE_FC; 2527 2528 switch (pcidev->device_id) { 2529 case PCI_DEVICE_ID_BROCADE_CT: 2530 ioc->asic_gen = BFI_ASIC_GEN_CT; 2531 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2532 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2533 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; 2534 ioc->ad_cap_bm = BFA_CM_CNA; 2535 break; 2536 2537 case BFA_PCI_DEVICE_ID_CT2: 2538 ioc->asic_gen = BFI_ASIC_GEN_CT2; 2539 if (clscode == BFI_PCIFN_CLASS_FC && 2540 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { 2541 ioc->asic_mode = BFI_ASIC_MODE_FC16; 2542 ioc->fcmode = true; 2543 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; 2544 ioc->ad_cap_bm = BFA_CM_HBA; 2545 } else { 2546 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2547 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2548 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { 2549 ioc->port_mode = 2550 ioc->port_mode_cfg = BFA_MODE_CNA; 2551 ioc->ad_cap_bm = BFA_CM_CNA; 2552 } else { 2553 ioc->port_mode = 2554 ioc->port_mode_cfg = BFA_MODE_NIC; 2555 ioc->ad_cap_bm = BFA_CM_NIC; 2556 } 2557 } 2558 break; 2559 2560 default: 2561 BUG_ON(1); 2562 } 2563 2564 /** 2565 * Set asic specific interfaces. 2566 */ 2567 if (ioc->asic_gen == BFI_ASIC_GEN_CT) 2568 bfa_nw_ioc_set_ct_hwif(ioc); 2569 else { 2570 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); 2571 bfa_nw_ioc_set_ct2_hwif(ioc); 2572 bfa_nw_ioc_ct2_poweron(ioc); 2573 } 2574 2575 bfa_ioc_map_port(ioc); 2576 bfa_ioc_reg_init(ioc); 2577 } 2578 2579 /** 2580 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory 2581 * 2582 * @dm_kva: kernel virtual address of IOC dma memory 2583 * @dm_pa: physical address of IOC dma memory 2584 */ 2585 void 2586 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2587 { 2588 /** 2589 * dma memory for firmware attribute 2590 */ 2591 ioc->attr_dma.kva = dm_kva; 2592 ioc->attr_dma.pa = dm_pa; 2593 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2594 } 2595 2596 /* Return size of dma memory required. */ 2597 u32 2598 bfa_nw_ioc_meminfo(void) 2599 { 2600 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 2601 } 2602 2603 void 2604 bfa_nw_ioc_enable(struct bfa_ioc *ioc) 2605 { 2606 bfa_ioc_stats(ioc, ioc_enables); 2607 ioc->dbg_fwsave_once = true; 2608 2609 bfa_fsm_send_event(ioc, IOC_E_ENABLE); 2610 } 2611 2612 void 2613 bfa_nw_ioc_disable(struct bfa_ioc *ioc) 2614 { 2615 bfa_ioc_stats(ioc, ioc_disables); 2616 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2617 } 2618 2619 /* Initialize memory for saving firmware trace. */ 2620 void 2621 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2622 { 2623 ioc->dbg_fwsave = dbg_fwsave; 2624 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; 2625 } 2626 2627 static u32 2628 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 2629 { 2630 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2631 } 2632 2633 /* Register mailbox message handler function, to be called by common modules */ 2634 void 2635 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2636 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2637 { 2638 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2639 2640 mod->mbhdlr[mc].cbfn = cbfn; 2641 mod->mbhdlr[mc].cbarg = cbarg; 2642 } 2643 2644 /** 2645 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. 2646 * 2647 * @ioc: IOC instance 2648 * @cmd: Mailbox command 2649 * 2650 * Waits if mailbox is busy. Responsibility of caller to serialize 2651 */ 2652 bool 2653 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2654 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) 2655 { 2656 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2657 u32 stat; 2658 2659 cmd->cbfn = cbfn; 2660 cmd->cbarg = cbarg; 2661 2662 /** 2663 * If a previous command is pending, queue new command 2664 */ 2665 if (!list_empty(&mod->cmd_q)) { 2666 list_add_tail(&cmd->qe, &mod->cmd_q); 2667 return true; 2668 } 2669 2670 /** 2671 * If mailbox is busy, queue command for poll timer 2672 */ 2673 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2674 if (stat) { 2675 list_add_tail(&cmd->qe, &mod->cmd_q); 2676 return true; 2677 } 2678 2679 /** 2680 * mailbox is free -- queue command to firmware 2681 */ 2682 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2683 2684 return false; 2685 } 2686 2687 /* Handle mailbox interrupts */ 2688 void 2689 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2690 { 2691 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2692 struct bfi_mbmsg m; 2693 int mc; 2694 2695 if (bfa_ioc_msgget(ioc, &m)) { 2696 /** 2697 * Treat IOC message class as special. 2698 */ 2699 mc = m.mh.msg_class; 2700 if (mc == BFI_MC_IOC) { 2701 bfa_ioc_isr(ioc, &m); 2702 return; 2703 } 2704 2705 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2706 return; 2707 2708 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2709 } 2710 2711 bfa_ioc_lpu_read_stat(ioc); 2712 2713 /** 2714 * Try to send pending mailbox commands 2715 */ 2716 bfa_ioc_mbox_poll(ioc); 2717 } 2718 2719 void 2720 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) 2721 { 2722 bfa_ioc_stats(ioc, ioc_hbfails); 2723 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2724 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2725 } 2726 2727 /* return true if IOC is disabled */ 2728 bool 2729 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2730 { 2731 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || 2732 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2733 } 2734 2735 /* return true if IOC is operational */ 2736 bool 2737 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2738 { 2739 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2740 } 2741 2742 /* Add to IOC heartbeat failure notification queue. To be used by common 2743 * modules such as cee, port, diag. 2744 */ 2745 void 2746 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, 2747 struct bfa_ioc_notify *notify) 2748 { 2749 list_add_tail(¬ify->qe, &ioc->notify_q); 2750 } 2751 2752 #define BFA_MFG_NAME "QLogic" 2753 static void 2754 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 2755 struct bfa_adapter_attr *ad_attr) 2756 { 2757 struct bfi_ioc_attr *ioc_attr; 2758 2759 ioc_attr = ioc->attr; 2760 2761 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 2762 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2763 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2764 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2765 memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2766 sizeof(struct bfa_mfg_vpd)); 2767 2768 ad_attr->nports = bfa_ioc_get_nports(ioc); 2769 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 2770 2771 bfa_ioc_get_adapter_model(ioc, ad_attr->model); 2772 /* For now, model descr uses same model string */ 2773 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 2774 2775 ad_attr->card_type = ioc_attr->card_type; 2776 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); 2777 2778 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 2779 ad_attr->prototype = 1; 2780 else 2781 ad_attr->prototype = 0; 2782 2783 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2784 bfa_nw_ioc_get_mac(ioc, ad_attr->mac); 2785 2786 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2787 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2788 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 2789 ad_attr->asic_rev = ioc_attr->asic_rev; 2790 2791 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2792 } 2793 2794 static enum bfa_ioc_type 2795 bfa_ioc_get_type(struct bfa_ioc *ioc) 2796 { 2797 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) 2798 return BFA_IOC_TYPE_LL; 2799 2800 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); 2801 2802 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) 2803 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; 2804 } 2805 2806 static void 2807 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 2808 { 2809 memcpy(serial_num, 2810 (void *)ioc->attr->brcd_serialnum, 2811 BFA_ADAPTER_SERIAL_NUM_LEN); 2812 } 2813 2814 static void 2815 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 2816 { 2817 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2818 } 2819 2820 static void 2821 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 2822 { 2823 BUG_ON(!(chip_rev)); 2824 2825 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2826 2827 chip_rev[0] = 'R'; 2828 chip_rev[1] = 'e'; 2829 chip_rev[2] = 'v'; 2830 chip_rev[3] = '-'; 2831 chip_rev[4] = ioc->attr->asic_rev; 2832 chip_rev[5] = '\0'; 2833 } 2834 2835 static void 2836 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 2837 { 2838 memcpy(optrom_ver, ioc->attr->optrom_version, 2839 BFA_VERSION_LEN); 2840 } 2841 2842 static void 2843 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2844 { 2845 strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2846 } 2847 2848 static void 2849 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 2850 { 2851 struct bfi_ioc_attr *ioc_attr; 2852 2853 BUG_ON(!(model)); 2854 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2855 2856 ioc_attr = ioc->attr; 2857 2858 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2859 BFA_MFG_NAME, ioc_attr->card_type); 2860 } 2861 2862 static enum bfa_ioc_state 2863 bfa_ioc_get_state(struct bfa_ioc *ioc) 2864 { 2865 enum bfa_iocpf_state iocpf_st; 2866 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2867 2868 if (ioc_st == BFA_IOC_ENABLING || 2869 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { 2870 2871 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2872 2873 switch (iocpf_st) { 2874 case BFA_IOCPF_SEMWAIT: 2875 ioc_st = BFA_IOC_SEMWAIT; 2876 break; 2877 2878 case BFA_IOCPF_HWINIT: 2879 ioc_st = BFA_IOC_HWINIT; 2880 break; 2881 2882 case BFA_IOCPF_FWMISMATCH: 2883 ioc_st = BFA_IOC_FWMISMATCH; 2884 break; 2885 2886 case BFA_IOCPF_FAIL: 2887 ioc_st = BFA_IOC_FAIL; 2888 break; 2889 2890 case BFA_IOCPF_INITFAIL: 2891 ioc_st = BFA_IOC_INITFAIL; 2892 break; 2893 2894 default: 2895 break; 2896 } 2897 } 2898 return ioc_st; 2899 } 2900 2901 void 2902 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 2903 { 2904 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2905 2906 ioc_attr->state = bfa_ioc_get_state(ioc); 2907 ioc_attr->port_id = bfa_ioc_portid(ioc); 2908 ioc_attr->port_mode = ioc->port_mode; 2909 2910 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2911 ioc_attr->cap_bm = ioc->ad_cap_bm; 2912 2913 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2914 2915 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2916 2917 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); 2918 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); 2919 ioc_attr->def_fn = bfa_ioc_is_default(ioc); 2920 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2921 } 2922 2923 /* WWN public */ 2924 static u64 2925 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2926 { 2927 return ioc->attr->pwwn; 2928 } 2929 2930 void 2931 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac) 2932 { 2933 ether_addr_copy(mac, ioc->attr->mac); 2934 } 2935 2936 /* Firmware failure detected. Start recovery actions. */ 2937 static void 2938 bfa_ioc_recover(struct bfa_ioc *ioc) 2939 { 2940 pr_crit("Heart Beat of IOC has failed\n"); 2941 bfa_ioc_stats(ioc, ioc_hbfails); 2942 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2943 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2944 } 2945 2946 /* BFA IOC PF private functions */ 2947 2948 static void 2949 bfa_iocpf_enable(struct bfa_ioc *ioc) 2950 { 2951 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); 2952 } 2953 2954 static void 2955 bfa_iocpf_disable(struct bfa_ioc *ioc) 2956 { 2957 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 2958 } 2959 2960 static void 2961 bfa_iocpf_fail(struct bfa_ioc *ioc) 2962 { 2963 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 2964 } 2965 2966 static void 2967 bfa_iocpf_initfail(struct bfa_ioc *ioc) 2968 { 2969 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 2970 } 2971 2972 static void 2973 bfa_iocpf_getattrfail(struct bfa_ioc *ioc) 2974 { 2975 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 2976 } 2977 2978 static void 2979 bfa_iocpf_stop(struct bfa_ioc *ioc) 2980 { 2981 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); 2982 } 2983 2984 void 2985 bfa_nw_iocpf_timeout(struct bfa_ioc *ioc) 2986 { 2987 enum bfa_iocpf_state iocpf_st; 2988 2989 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2990 2991 if (iocpf_st == BFA_IOCPF_HWINIT) 2992 bfa_ioc_poll_fwinit(ioc); 2993 else 2994 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2995 } 2996 2997 void 2998 bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc) 2999 { 3000 bfa_ioc_hw_sem_get(ioc); 3001 } 3002 3003 static void 3004 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) 3005 { 3006 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 3007 3008 if (fwstate == BFI_IOC_DISABLED) { 3009 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 3010 return; 3011 } 3012 3013 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3014 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3015 } else { 3016 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3017 mod_timer(&ioc->iocpf_timer, jiffies + 3018 msecs_to_jiffies(BFA_IOC_POLL_TOV)); 3019 } 3020 } 3021 3022 /* 3023 * Flash module specific 3024 */ 3025 3026 /* 3027 * FLASH DMA buffer should be big enough to hold both MFG block and 3028 * asic block(64k) at the same time and also should be 2k aligned to 3029 * avoid write segement to cross sector boundary. 3030 */ 3031 #define BFA_FLASH_SEG_SZ 2048 3032 #define BFA_FLASH_DMA_BUF_SZ \ 3033 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) 3034 3035 static void 3036 bfa_flash_cb(struct bfa_flash *flash) 3037 { 3038 flash->op_busy = 0; 3039 if (flash->cbfn) 3040 flash->cbfn(flash->cbarg, flash->status); 3041 } 3042 3043 static void 3044 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) 3045 { 3046 struct bfa_flash *flash = cbarg; 3047 3048 switch (event) { 3049 case BFA_IOC_E_DISABLED: 3050 case BFA_IOC_E_FAILED: 3051 if (flash->op_busy) { 3052 flash->status = BFA_STATUS_IOC_FAILURE; 3053 flash->cbfn(flash->cbarg, flash->status); 3054 flash->op_busy = 0; 3055 } 3056 break; 3057 default: 3058 break; 3059 } 3060 } 3061 3062 /* 3063 * Send flash write request. 3064 */ 3065 static void 3066 bfa_flash_write_send(struct bfa_flash *flash) 3067 { 3068 struct bfi_flash_write_req *msg = 3069 (struct bfi_flash_write_req *) flash->mb.msg; 3070 u32 len; 3071 3072 msg->type = be32_to_cpu(flash->type); 3073 msg->instance = flash->instance; 3074 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3075 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3076 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3077 msg->length = be32_to_cpu(len); 3078 3079 /* indicate if it's the last msg of the whole write operation */ 3080 msg->last = (len == flash->residue) ? 1 : 0; 3081 3082 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, 3083 bfa_ioc_portid(flash->ioc)); 3084 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3085 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); 3086 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3087 3088 flash->residue -= len; 3089 flash->offset += len; 3090 } 3091 3092 /** 3093 * bfa_flash_read_send - Send flash read request. 3094 * 3095 * @cbarg: callback argument 3096 */ 3097 static void 3098 bfa_flash_read_send(void *cbarg) 3099 { 3100 struct bfa_flash *flash = cbarg; 3101 struct bfi_flash_read_req *msg = 3102 (struct bfi_flash_read_req *) flash->mb.msg; 3103 u32 len; 3104 3105 msg->type = be32_to_cpu(flash->type); 3106 msg->instance = flash->instance; 3107 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3108 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3109 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3110 msg->length = be32_to_cpu(len); 3111 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, 3112 bfa_ioc_portid(flash->ioc)); 3113 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3114 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3115 } 3116 3117 /** 3118 * bfa_flash_intr - Process flash response messages upon receiving interrupts. 3119 * 3120 * @flasharg: flash structure 3121 * @msg: message structure 3122 */ 3123 static void 3124 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 3125 { 3126 struct bfa_flash *flash = flasharg; 3127 u32 status; 3128 3129 union { 3130 struct bfi_flash_query_rsp *query; 3131 struct bfi_flash_write_rsp *write; 3132 struct bfi_flash_read_rsp *read; 3133 struct bfi_mbmsg *msg; 3134 } m; 3135 3136 m.msg = msg; 3137 3138 /* receiving response after ioc failure */ 3139 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) 3140 return; 3141 3142 switch (msg->mh.msg_id) { 3143 case BFI_FLASH_I2H_QUERY_RSP: 3144 status = be32_to_cpu(m.query->status); 3145 if (status == BFA_STATUS_OK) { 3146 u32 i; 3147 struct bfa_flash_attr *attr, *f; 3148 3149 attr = (struct bfa_flash_attr *) flash->ubuf; 3150 f = (struct bfa_flash_attr *) flash->dbuf_kva; 3151 attr->status = be32_to_cpu(f->status); 3152 attr->npart = be32_to_cpu(f->npart); 3153 for (i = 0; i < attr->npart; i++) { 3154 attr->part[i].part_type = 3155 be32_to_cpu(f->part[i].part_type); 3156 attr->part[i].part_instance = 3157 be32_to_cpu(f->part[i].part_instance); 3158 attr->part[i].part_off = 3159 be32_to_cpu(f->part[i].part_off); 3160 attr->part[i].part_size = 3161 be32_to_cpu(f->part[i].part_size); 3162 attr->part[i].part_len = 3163 be32_to_cpu(f->part[i].part_len); 3164 attr->part[i].part_status = 3165 be32_to_cpu(f->part[i].part_status); 3166 } 3167 } 3168 flash->status = status; 3169 bfa_flash_cb(flash); 3170 break; 3171 case BFI_FLASH_I2H_WRITE_RSP: 3172 status = be32_to_cpu(m.write->status); 3173 if (status != BFA_STATUS_OK || flash->residue == 0) { 3174 flash->status = status; 3175 bfa_flash_cb(flash); 3176 } else 3177 bfa_flash_write_send(flash); 3178 break; 3179 case BFI_FLASH_I2H_READ_RSP: 3180 status = be32_to_cpu(m.read->status); 3181 if (status != BFA_STATUS_OK) { 3182 flash->status = status; 3183 bfa_flash_cb(flash); 3184 } else { 3185 u32 len = be32_to_cpu(m.read->length); 3186 memcpy(flash->ubuf + flash->offset, 3187 flash->dbuf_kva, len); 3188 flash->residue -= len; 3189 flash->offset += len; 3190 if (flash->residue == 0) { 3191 flash->status = status; 3192 bfa_flash_cb(flash); 3193 } else 3194 bfa_flash_read_send(flash); 3195 } 3196 break; 3197 case BFI_FLASH_I2H_BOOT_VER_RSP: 3198 case BFI_FLASH_I2H_EVENT: 3199 break; 3200 default: 3201 WARN_ON(1); 3202 } 3203 } 3204 3205 /* 3206 * Flash memory info API. 3207 */ 3208 u32 3209 bfa_nw_flash_meminfo(void) 3210 { 3211 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3212 } 3213 3214 /** 3215 * bfa_nw_flash_attach - Flash attach API. 3216 * 3217 * @flash: flash structure 3218 * @ioc: ioc structure 3219 * @dev: device structure 3220 */ 3221 void 3222 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 3223 { 3224 flash->ioc = ioc; 3225 flash->cbfn = NULL; 3226 flash->cbarg = NULL; 3227 flash->op_busy = 0; 3228 3229 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); 3230 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); 3231 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 3232 } 3233 3234 /** 3235 * bfa_nw_flash_memclaim - Claim memory for flash 3236 * 3237 * @flash: flash structure 3238 * @dm_kva: pointer to virtual memory address 3239 * @dm_pa: physical memory address 3240 */ 3241 void 3242 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 3243 { 3244 flash->dbuf_kva = dm_kva; 3245 flash->dbuf_pa = dm_pa; 3246 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); 3247 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3248 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3249 } 3250 3251 /** 3252 * bfa_nw_flash_get_attr - Get flash attribute. 3253 * 3254 * @flash: flash structure 3255 * @attr: flash attribute structure 3256 * @cbfn: callback function 3257 * @cbarg: callback argument 3258 * 3259 * Return status. 3260 */ 3261 enum bfa_status 3262 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, 3263 bfa_cb_flash cbfn, void *cbarg) 3264 { 3265 struct bfi_flash_query_req *msg = 3266 (struct bfi_flash_query_req *) flash->mb.msg; 3267 3268 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3269 return BFA_STATUS_IOC_NON_OP; 3270 3271 if (flash->op_busy) 3272 return BFA_STATUS_DEVBUSY; 3273 3274 flash->op_busy = 1; 3275 flash->cbfn = cbfn; 3276 flash->cbarg = cbarg; 3277 flash->ubuf = (u8 *) attr; 3278 3279 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, 3280 bfa_ioc_portid(flash->ioc)); 3281 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); 3282 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3283 3284 return BFA_STATUS_OK; 3285 } 3286 3287 /** 3288 * bfa_nw_flash_update_part - Update flash partition. 3289 * 3290 * @flash: flash structure 3291 * @type: flash partition type 3292 * @instance: flash partition instance 3293 * @buf: update data buffer 3294 * @len: data buffer length 3295 * @offset: offset relative to the partition starting address 3296 * @cbfn: callback function 3297 * @cbarg: callback argument 3298 * 3299 * Return status. 3300 */ 3301 enum bfa_status 3302 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, 3303 void *buf, u32 len, u32 offset, 3304 bfa_cb_flash cbfn, void *cbarg) 3305 { 3306 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3307 return BFA_STATUS_IOC_NON_OP; 3308 3309 /* 3310 * 'len' must be in word (4-byte) boundary 3311 */ 3312 if (!len || (len & 0x03)) 3313 return BFA_STATUS_FLASH_BAD_LEN; 3314 3315 if (type == BFA_FLASH_PART_MFG) 3316 return BFA_STATUS_EINVAL; 3317 3318 if (flash->op_busy) 3319 return BFA_STATUS_DEVBUSY; 3320 3321 flash->op_busy = 1; 3322 flash->cbfn = cbfn; 3323 flash->cbarg = cbarg; 3324 flash->type = type; 3325 flash->instance = instance; 3326 flash->residue = len; 3327 flash->offset = 0; 3328 flash->addr_off = offset; 3329 flash->ubuf = buf; 3330 3331 bfa_flash_write_send(flash); 3332 3333 return BFA_STATUS_OK; 3334 } 3335 3336 /** 3337 * bfa_nw_flash_read_part - Read flash partition. 3338 * 3339 * @flash: flash structure 3340 * @type: flash partition type 3341 * @instance: flash partition instance 3342 * @buf: read data buffer 3343 * @len: data buffer length 3344 * @offset: offset relative to the partition starting address 3345 * @cbfn: callback function 3346 * @cbarg: callback argument 3347 * 3348 * Return status. 3349 */ 3350 enum bfa_status 3351 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, 3352 void *buf, u32 len, u32 offset, 3353 bfa_cb_flash cbfn, void *cbarg) 3354 { 3355 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3356 return BFA_STATUS_IOC_NON_OP; 3357 3358 /* 3359 * 'len' must be in word (4-byte) boundary 3360 */ 3361 if (!len || (len & 0x03)) 3362 return BFA_STATUS_FLASH_BAD_LEN; 3363 3364 if (flash->op_busy) 3365 return BFA_STATUS_DEVBUSY; 3366 3367 flash->op_busy = 1; 3368 flash->cbfn = cbfn; 3369 flash->cbarg = cbarg; 3370 flash->type = type; 3371 flash->instance = instance; 3372 flash->residue = len; 3373 flash->offset = 0; 3374 flash->addr_off = offset; 3375 flash->ubuf = buf; 3376 3377 bfa_flash_read_send(flash); 3378 3379 return BFA_STATUS_OK; 3380 } 3381