1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 20 #include "bfa_ioc.h" 21 #include "bfi_reg.h" 22 #include "bfa_defs.h" 23 24 /* IOC local definitions */ 25 26 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ 27 28 #define bfa_ioc_firmware_lock(__ioc) \ 29 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 30 #define bfa_ioc_firmware_unlock(__ioc) \ 31 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 32 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 33 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 34 #define bfa_ioc_notify_fail(__ioc) \ 35 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 36 #define bfa_ioc_sync_start(__ioc) \ 37 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 38 #define bfa_ioc_sync_join(__ioc) \ 39 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 40 #define bfa_ioc_sync_leave(__ioc) \ 41 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) 42 #define bfa_ioc_sync_ack(__ioc) \ 43 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 44 #define bfa_ioc_sync_complete(__ioc) \ 45 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 46 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ 47 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) 48 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ 49 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) 50 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ 51 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) 52 53 static bool bfa_nw_auto_recover = true; 54 55 /* 56 * forward declarations 57 */ 58 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); 59 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); 60 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); 61 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); 62 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); 63 static void bfa_ioc_send_enable(struct bfa_ioc *ioc); 64 static void bfa_ioc_send_disable(struct bfa_ioc *ioc); 65 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); 66 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); 67 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); 68 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); 69 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 70 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 71 static void bfa_ioc_recover(struct bfa_ioc *ioc); 72 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 73 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 74 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 75 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); 76 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 77 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 78 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 79 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 80 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 81 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 82 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, 83 enum bfi_fwboot_type boot_type, u32 boot_param); 84 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 85 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 86 char *serial_num); 87 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 88 char *fw_ver); 89 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, 90 char *chip_rev); 91 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, 92 char *optrom_ver); 93 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 94 char *manufacturer); 95 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 96 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 97 98 /* IOC state machine definitions/declarations */ 99 enum ioc_event { 100 IOC_E_RESET = 1, /*!< IOC reset request */ 101 IOC_E_ENABLE = 2, /*!< IOC enable request */ 102 IOC_E_DISABLE = 3, /*!< IOC disable request */ 103 IOC_E_DETACH = 4, /*!< driver detach cleanup */ 104 IOC_E_ENABLED = 5, /*!< f/w enabled */ 105 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 106 IOC_E_DISABLED = 7, /*!< f/w disabled */ 107 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ 108 IOC_E_HBFAIL = 9, /*!< heartbeat failure */ 109 IOC_E_HWERROR = 10, /*!< hardware error interrupt */ 110 IOC_E_TIMEOUT = 11, /*!< timeout */ 111 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ 112 }; 113 114 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 115 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 116 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 117 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 118 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 119 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); 120 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 121 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 122 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 123 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); 124 125 static struct bfa_sm_table ioc_sm_table[] = { 126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, 129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, 132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 135 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, 136 }; 137 138 /* 139 * Forward declareations for iocpf state machine 140 */ 141 static void bfa_iocpf_enable(struct bfa_ioc *ioc); 142 static void bfa_iocpf_disable(struct bfa_ioc *ioc); 143 static void bfa_iocpf_fail(struct bfa_ioc *ioc); 144 static void bfa_iocpf_initfail(struct bfa_ioc *ioc); 145 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 146 static void bfa_iocpf_stop(struct bfa_ioc *ioc); 147 148 /* IOCPF state machine events */ 149 enum iocpf_event { 150 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 151 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 152 IOCPF_E_STOP = 3, /*!< stop on driver detach */ 153 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ 154 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ 155 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ 156 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ 157 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ 158 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 159 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 160 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 161 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 162 }; 163 164 /* IOCPF states */ 165 enum bfa_iocpf_state { 166 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 167 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 168 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ 169 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ 170 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ 171 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ 172 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ 173 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ 174 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ 175 }; 176 177 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); 178 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); 179 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); 180 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); 181 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); 182 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); 183 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); 184 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, 185 enum iocpf_event); 186 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); 187 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); 188 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); 189 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); 190 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, 191 enum iocpf_event); 192 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); 193 194 static struct bfa_sm_table iocpf_sm_table[] = { 195 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, 196 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, 197 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, 198 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, 199 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 200 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 201 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 202 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, 203 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 204 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, 205 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 206 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 207 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, 208 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 209 }; 210 211 /* IOC State Machine */ 212 213 /* Beginning state. IOC uninit state. */ 214 static void 215 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 216 { 217 } 218 219 /* IOC is in uninit state. */ 220 static void 221 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 222 { 223 switch (event) { 224 case IOC_E_RESET: 225 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 226 break; 227 228 default: 229 bfa_sm_fault(event); 230 } 231 } 232 233 /* Reset entry actions -- initialize state machine */ 234 static void 235 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 236 { 237 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 238 } 239 240 /* IOC is in reset state. */ 241 static void 242 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 243 { 244 switch (event) { 245 case IOC_E_ENABLE: 246 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 247 break; 248 249 case IOC_E_DISABLE: 250 bfa_ioc_disable_comp(ioc); 251 break; 252 253 case IOC_E_DETACH: 254 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 255 break; 256 257 default: 258 bfa_sm_fault(event); 259 } 260 } 261 262 static void 263 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 264 { 265 bfa_iocpf_enable(ioc); 266 } 267 268 /* Host IOC function is being enabled, awaiting response from firmware. 269 * Semaphore is acquired. 270 */ 271 static void 272 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 273 { 274 switch (event) { 275 case IOC_E_ENABLED: 276 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 277 break; 278 279 case IOC_E_PFFAILED: 280 /* !!! fall through !!! */ 281 case IOC_E_HWERROR: 282 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 283 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 284 if (event != IOC_E_PFFAILED) 285 bfa_iocpf_initfail(ioc); 286 break; 287 288 case IOC_E_HWFAILED: 289 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 290 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 291 break; 292 293 case IOC_E_DISABLE: 294 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 295 break; 296 297 case IOC_E_DETACH: 298 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 299 bfa_iocpf_stop(ioc); 300 break; 301 302 case IOC_E_ENABLE: 303 break; 304 305 default: 306 bfa_sm_fault(event); 307 } 308 } 309 310 /* Semaphore should be acquired for version check. */ 311 static void 312 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 313 { 314 mod_timer(&ioc->ioc_timer, jiffies + 315 msecs_to_jiffies(BFA_IOC_TOV)); 316 bfa_ioc_send_getattr(ioc); 317 } 318 319 /* IOC configuration in progress. Timer is active. */ 320 static void 321 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 322 { 323 switch (event) { 324 case IOC_E_FWRSP_GETATTR: 325 del_timer(&ioc->ioc_timer); 326 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 327 break; 328 329 case IOC_E_PFFAILED: 330 case IOC_E_HWERROR: 331 del_timer(&ioc->ioc_timer); 332 /* fall through */ 333 case IOC_E_TIMEOUT: 334 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 335 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 336 if (event != IOC_E_PFFAILED) 337 bfa_iocpf_getattrfail(ioc); 338 break; 339 340 case IOC_E_DISABLE: 341 del_timer(&ioc->ioc_timer); 342 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 343 break; 344 345 case IOC_E_ENABLE: 346 break; 347 348 default: 349 bfa_sm_fault(event); 350 } 351 } 352 353 static void 354 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 355 { 356 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 357 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 358 bfa_ioc_hb_monitor(ioc); 359 } 360 361 static void 362 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 363 { 364 switch (event) { 365 case IOC_E_ENABLE: 366 break; 367 368 case IOC_E_DISABLE: 369 bfa_ioc_hb_stop(ioc); 370 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 371 break; 372 373 case IOC_E_PFFAILED: 374 case IOC_E_HWERROR: 375 bfa_ioc_hb_stop(ioc); 376 /* !!! fall through !!! */ 377 case IOC_E_HBFAIL: 378 if (ioc->iocpf.auto_recover) 379 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 380 else 381 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 382 383 bfa_ioc_fail_notify(ioc); 384 385 if (event != IOC_E_PFFAILED) 386 bfa_iocpf_fail(ioc); 387 break; 388 389 default: 390 bfa_sm_fault(event); 391 } 392 } 393 394 static void 395 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 396 { 397 bfa_iocpf_disable(ioc); 398 } 399 400 /* IOC is being disabled */ 401 static void 402 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 403 { 404 switch (event) { 405 case IOC_E_DISABLED: 406 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 407 break; 408 409 case IOC_E_HWERROR: 410 /* 411 * No state change. Will move to disabled state 412 * after iocpf sm completes failure processing and 413 * moves to disabled state. 414 */ 415 bfa_iocpf_fail(ioc); 416 break; 417 418 case IOC_E_HWFAILED: 419 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 420 bfa_ioc_disable_comp(ioc); 421 break; 422 423 default: 424 bfa_sm_fault(event); 425 } 426 } 427 428 /* IOC disable completion entry. */ 429 static void 430 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 431 { 432 bfa_ioc_disable_comp(ioc); 433 } 434 435 static void 436 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 437 { 438 switch (event) { 439 case IOC_E_ENABLE: 440 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); 441 break; 442 443 case IOC_E_DISABLE: 444 ioc->cbfn->disable_cbfn(ioc->bfa); 445 break; 446 447 case IOC_E_DETACH: 448 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 449 bfa_iocpf_stop(ioc); 450 break; 451 452 default: 453 bfa_sm_fault(event); 454 } 455 } 456 457 static void 458 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) 459 { 460 } 461 462 /* Hardware initialization retry. */ 463 static void 464 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 465 { 466 switch (event) { 467 case IOC_E_ENABLED: 468 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 469 break; 470 471 case IOC_E_PFFAILED: 472 case IOC_E_HWERROR: 473 /** 474 * Initialization retry failed. 475 */ 476 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 477 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 478 if (event != IOC_E_PFFAILED) 479 bfa_iocpf_initfail(ioc); 480 break; 481 482 case IOC_E_HWFAILED: 483 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 484 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); 485 break; 486 487 case IOC_E_ENABLE: 488 break; 489 490 case IOC_E_DISABLE: 491 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 492 break; 493 494 case IOC_E_DETACH: 495 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 496 bfa_iocpf_stop(ioc); 497 break; 498 499 default: 500 bfa_sm_fault(event); 501 } 502 } 503 504 static void 505 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) 506 { 507 } 508 509 /* IOC failure. */ 510 static void 511 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 512 { 513 switch (event) { 514 case IOC_E_ENABLE: 515 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 516 break; 517 518 case IOC_E_DISABLE: 519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 520 break; 521 522 case IOC_E_DETACH: 523 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 524 bfa_iocpf_stop(ioc); 525 break; 526 527 case IOC_E_HWERROR: 528 /* HB failure notification, ignore. */ 529 break; 530 531 default: 532 bfa_sm_fault(event); 533 } 534 } 535 536 static void 537 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) 538 { 539 } 540 541 /* IOC failure. */ 542 static void 543 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 544 { 545 switch (event) { 546 547 case IOC_E_ENABLE: 548 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 549 break; 550 551 case IOC_E_DISABLE: 552 ioc->cbfn->disable_cbfn(ioc->bfa); 553 break; 554 555 case IOC_E_DETACH: 556 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 557 break; 558 559 default: 560 bfa_sm_fault(event); 561 } 562 } 563 564 /* IOCPF State Machine */ 565 566 /* Reset entry actions -- initialize state machine */ 567 static void 568 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 569 { 570 iocpf->fw_mismatch_notified = false; 571 iocpf->auto_recover = bfa_nw_auto_recover; 572 } 573 574 /* Beginning state. IOC is in reset state. */ 575 static void 576 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 577 { 578 switch (event) { 579 case IOCPF_E_ENABLE: 580 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 581 break; 582 583 case IOCPF_E_STOP: 584 break; 585 586 default: 587 bfa_sm_fault(event); 588 } 589 } 590 591 /* Semaphore should be acquired for version check. */ 592 static void 593 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 594 { 595 bfa_ioc_hw_sem_init(iocpf->ioc); 596 bfa_ioc_hw_sem_get(iocpf->ioc); 597 } 598 599 /* Awaiting h/w semaphore to continue with version check. */ 600 static void 601 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 602 { 603 struct bfa_ioc *ioc = iocpf->ioc; 604 605 switch (event) { 606 case IOCPF_E_SEMLOCKED: 607 if (bfa_ioc_firmware_lock(ioc)) { 608 if (bfa_ioc_sync_start(ioc)) { 609 bfa_ioc_sync_join(ioc); 610 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 611 } else { 612 bfa_ioc_firmware_unlock(ioc); 613 bfa_nw_ioc_hw_sem_release(ioc); 614 mod_timer(&ioc->sem_timer, jiffies + 615 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 616 } 617 } else { 618 bfa_nw_ioc_hw_sem_release(ioc); 619 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 620 } 621 break; 622 623 case IOCPF_E_SEM_ERROR: 624 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 625 bfa_ioc_pf_hwfailed(ioc); 626 break; 627 628 case IOCPF_E_DISABLE: 629 bfa_ioc_hw_sem_get_cancel(ioc); 630 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 631 bfa_ioc_pf_disabled(ioc); 632 break; 633 634 case IOCPF_E_STOP: 635 bfa_ioc_hw_sem_get_cancel(ioc); 636 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 637 break; 638 639 default: 640 bfa_sm_fault(event); 641 } 642 } 643 644 /* Notify enable completion callback */ 645 static void 646 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 647 { 648 /* Call only the first time sm enters fwmismatch state. */ 649 if (!iocpf->fw_mismatch_notified) 650 bfa_ioc_pf_fwmismatch(iocpf->ioc); 651 652 iocpf->fw_mismatch_notified = true; 653 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 654 msecs_to_jiffies(BFA_IOC_TOV)); 655 } 656 657 /* Awaiting firmware version match. */ 658 static void 659 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 660 { 661 struct bfa_ioc *ioc = iocpf->ioc; 662 663 switch (event) { 664 case IOCPF_E_TIMEOUT: 665 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); 666 break; 667 668 case IOCPF_E_DISABLE: 669 del_timer(&ioc->iocpf_timer); 670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 671 bfa_ioc_pf_disabled(ioc); 672 break; 673 674 case IOCPF_E_STOP: 675 del_timer(&ioc->iocpf_timer); 676 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 677 break; 678 679 default: 680 bfa_sm_fault(event); 681 } 682 } 683 684 /* Request for semaphore. */ 685 static void 686 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 687 { 688 bfa_ioc_hw_sem_get(iocpf->ioc); 689 } 690 691 /* Awaiting semaphore for h/w initialzation. */ 692 static void 693 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 694 { 695 struct bfa_ioc *ioc = iocpf->ioc; 696 697 switch (event) { 698 case IOCPF_E_SEMLOCKED: 699 if (bfa_ioc_sync_complete(ioc)) { 700 bfa_ioc_sync_join(ioc); 701 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 702 } else { 703 bfa_nw_ioc_hw_sem_release(ioc); 704 mod_timer(&ioc->sem_timer, jiffies + 705 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 706 } 707 break; 708 709 case IOCPF_E_SEM_ERROR: 710 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 711 bfa_ioc_pf_hwfailed(ioc); 712 break; 713 714 case IOCPF_E_DISABLE: 715 bfa_ioc_hw_sem_get_cancel(ioc); 716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 717 break; 718 719 default: 720 bfa_sm_fault(event); 721 } 722 } 723 724 static void 725 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 726 { 727 iocpf->poll_time = 0; 728 bfa_ioc_reset(iocpf->ioc, false); 729 } 730 731 /* Hardware is being initialized. Interrupts are enabled. 732 * Holding hardware semaphore lock. 733 */ 734 static void 735 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) 736 { 737 struct bfa_ioc *ioc = iocpf->ioc; 738 739 switch (event) { 740 case IOCPF_E_FWREADY: 741 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 742 break; 743 744 case IOCPF_E_TIMEOUT: 745 bfa_nw_ioc_hw_sem_release(ioc); 746 bfa_ioc_pf_failed(ioc); 747 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 748 break; 749 750 case IOCPF_E_DISABLE: 751 del_timer(&ioc->iocpf_timer); 752 bfa_ioc_sync_leave(ioc); 753 bfa_nw_ioc_hw_sem_release(ioc); 754 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 755 break; 756 757 default: 758 bfa_sm_fault(event); 759 } 760 } 761 762 static void 763 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) 764 { 765 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 766 msecs_to_jiffies(BFA_IOC_TOV)); 767 /** 768 * Enable Interrupts before sending fw IOC ENABLE cmd. 769 */ 770 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); 771 bfa_ioc_send_enable(iocpf->ioc); 772 } 773 774 /* Host IOC function is being enabled, awaiting response from firmware. 775 * Semaphore is acquired. 776 */ 777 static void 778 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 779 { 780 struct bfa_ioc *ioc = iocpf->ioc; 781 782 switch (event) { 783 case IOCPF_E_FWRSP_ENABLE: 784 del_timer(&ioc->iocpf_timer); 785 bfa_nw_ioc_hw_sem_release(ioc); 786 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 787 break; 788 789 case IOCPF_E_INITFAIL: 790 del_timer(&ioc->iocpf_timer); 791 /* 792 * !!! fall through !!! 793 */ 794 case IOCPF_E_TIMEOUT: 795 bfa_nw_ioc_hw_sem_release(ioc); 796 if (event == IOCPF_E_TIMEOUT) 797 bfa_ioc_pf_failed(ioc); 798 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 799 break; 800 801 case IOCPF_E_DISABLE: 802 del_timer(&ioc->iocpf_timer); 803 bfa_nw_ioc_hw_sem_release(ioc); 804 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 805 break; 806 807 default: 808 bfa_sm_fault(event); 809 } 810 } 811 812 static void 813 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 814 { 815 bfa_ioc_pf_enabled(iocpf->ioc); 816 } 817 818 static void 819 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 820 { 821 switch (event) { 822 case IOCPF_E_DISABLE: 823 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 824 break; 825 826 case IOCPF_E_GETATTRFAIL: 827 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 828 break; 829 830 case IOCPF_E_FAIL: 831 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 832 break; 833 834 default: 835 bfa_sm_fault(event); 836 } 837 } 838 839 static void 840 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) 841 { 842 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 843 msecs_to_jiffies(BFA_IOC_TOV)); 844 bfa_ioc_send_disable(iocpf->ioc); 845 } 846 847 /* IOC is being disabled */ 848 static void 849 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 850 { 851 struct bfa_ioc *ioc = iocpf->ioc; 852 853 switch (event) { 854 case IOCPF_E_FWRSP_DISABLE: 855 del_timer(&ioc->iocpf_timer); 856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 857 break; 858 859 case IOCPF_E_FAIL: 860 del_timer(&ioc->iocpf_timer); 861 /* 862 * !!! fall through !!! 863 */ 864 865 case IOCPF_E_TIMEOUT: 866 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 867 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 868 break; 869 870 case IOCPF_E_FWRSP_ENABLE: 871 break; 872 873 default: 874 bfa_sm_fault(event); 875 } 876 } 877 878 static void 879 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) 880 { 881 bfa_ioc_hw_sem_get(iocpf->ioc); 882 } 883 884 /* IOC hb ack request is being removed. */ 885 static void 886 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 887 { 888 struct bfa_ioc *ioc = iocpf->ioc; 889 890 switch (event) { 891 case IOCPF_E_SEMLOCKED: 892 bfa_ioc_sync_leave(ioc); 893 bfa_nw_ioc_hw_sem_release(ioc); 894 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 895 break; 896 897 case IOCPF_E_SEM_ERROR: 898 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 899 bfa_ioc_pf_hwfailed(ioc); 900 break; 901 902 case IOCPF_E_FAIL: 903 break; 904 905 default: 906 bfa_sm_fault(event); 907 } 908 } 909 910 /* IOC disable completion entry. */ 911 static void 912 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 913 { 914 bfa_ioc_mbox_flush(iocpf->ioc); 915 bfa_ioc_pf_disabled(iocpf->ioc); 916 } 917 918 static void 919 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) 920 { 921 struct bfa_ioc *ioc = iocpf->ioc; 922 923 switch (event) { 924 case IOCPF_E_ENABLE: 925 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 926 break; 927 928 case IOCPF_E_STOP: 929 bfa_ioc_firmware_unlock(ioc); 930 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 931 break; 932 933 default: 934 bfa_sm_fault(event); 935 } 936 } 937 938 static void 939 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) 940 { 941 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); 942 bfa_ioc_hw_sem_get(iocpf->ioc); 943 } 944 945 /* Hardware initialization failed. */ 946 static void 947 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 948 { 949 struct bfa_ioc *ioc = iocpf->ioc; 950 951 switch (event) { 952 case IOCPF_E_SEMLOCKED: 953 bfa_ioc_notify_fail(ioc); 954 bfa_ioc_sync_leave(ioc); 955 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 956 bfa_nw_ioc_hw_sem_release(ioc); 957 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 958 break; 959 960 case IOCPF_E_SEM_ERROR: 961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 962 bfa_ioc_pf_hwfailed(ioc); 963 break; 964 965 case IOCPF_E_DISABLE: 966 bfa_ioc_hw_sem_get_cancel(ioc); 967 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 968 break; 969 970 case IOCPF_E_STOP: 971 bfa_ioc_hw_sem_get_cancel(ioc); 972 bfa_ioc_firmware_unlock(ioc); 973 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 974 break; 975 976 case IOCPF_E_FAIL: 977 break; 978 979 default: 980 bfa_sm_fault(event); 981 } 982 } 983 984 static void 985 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 986 { 987 } 988 989 /* Hardware initialization failed. */ 990 static void 991 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 992 { 993 struct bfa_ioc *ioc = iocpf->ioc; 994 995 switch (event) { 996 case IOCPF_E_DISABLE: 997 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 998 break; 999 1000 case IOCPF_E_STOP: 1001 bfa_ioc_firmware_unlock(ioc); 1002 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1003 break; 1004 1005 default: 1006 bfa_sm_fault(event); 1007 } 1008 } 1009 1010 static void 1011 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) 1012 { 1013 /** 1014 * Mark IOC as failed in hardware and stop firmware. 1015 */ 1016 bfa_ioc_lpu_stop(iocpf->ioc); 1017 1018 /** 1019 * Flush any queued up mailbox requests. 1020 */ 1021 bfa_ioc_mbox_flush(iocpf->ioc); 1022 bfa_ioc_hw_sem_get(iocpf->ioc); 1023 } 1024 1025 /* IOC is in failed state. */ 1026 static void 1027 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1028 { 1029 struct bfa_ioc *ioc = iocpf->ioc; 1030 1031 switch (event) { 1032 case IOCPF_E_SEMLOCKED: 1033 bfa_ioc_sync_ack(ioc); 1034 bfa_ioc_notify_fail(ioc); 1035 if (!iocpf->auto_recover) { 1036 bfa_ioc_sync_leave(ioc); 1037 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); 1038 bfa_nw_ioc_hw_sem_release(ioc); 1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1040 } else { 1041 if (bfa_ioc_sync_complete(ioc)) 1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1043 else { 1044 bfa_nw_ioc_hw_sem_release(ioc); 1045 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1046 } 1047 } 1048 break; 1049 1050 case IOCPF_E_SEM_ERROR: 1051 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1052 bfa_ioc_pf_hwfailed(ioc); 1053 break; 1054 1055 case IOCPF_E_DISABLE: 1056 bfa_ioc_hw_sem_get_cancel(ioc); 1057 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1058 break; 1059 1060 case IOCPF_E_FAIL: 1061 break; 1062 1063 default: 1064 bfa_sm_fault(event); 1065 } 1066 } 1067 1068 static void 1069 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) 1070 { 1071 } 1072 1073 /* IOC is in failed state. */ 1074 static void 1075 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1076 { 1077 switch (event) { 1078 case IOCPF_E_DISABLE: 1079 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1080 break; 1081 1082 default: 1083 bfa_sm_fault(event); 1084 } 1085 } 1086 1087 /* BFA IOC private functions */ 1088 1089 /* Notify common modules registered for notification. */ 1090 static void 1091 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1092 { 1093 struct bfa_ioc_notify *notify; 1094 1095 list_for_each_entry(notify, &ioc->notify_q, qe) 1096 notify->cbfn(notify->cbarg, event); 1097 } 1098 1099 static void 1100 bfa_ioc_disable_comp(struct bfa_ioc *ioc) 1101 { 1102 ioc->cbfn->disable_cbfn(ioc->bfa); 1103 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); 1104 } 1105 1106 bool 1107 bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1108 { 1109 u32 r32; 1110 int cnt = 0; 1111 #define BFA_SEM_SPINCNT 3000 1112 1113 r32 = readl(sem_reg); 1114 1115 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { 1116 cnt++; 1117 udelay(2); 1118 r32 = readl(sem_reg); 1119 } 1120 1121 if (!(r32 & 1)) 1122 return true; 1123 1124 return false; 1125 } 1126 1127 void 1128 bfa_nw_ioc_sem_release(void __iomem *sem_reg) 1129 { 1130 readl(sem_reg); 1131 writel(1, sem_reg); 1132 } 1133 1134 /* Clear fwver hdr */ 1135 static void 1136 bfa_ioc_fwver_clear(struct bfa_ioc *ioc) 1137 { 1138 u32 pgnum, pgoff, loff = 0; 1139 int i; 1140 1141 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); 1142 pgoff = PSS_SMEM_PGOFF(loff); 1143 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1144 1145 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { 1146 writel(0, ioc->ioc_regs.smem_page_start + loff); 1147 loff += sizeof(u32); 1148 } 1149 } 1150 1151 1152 static void 1153 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1154 { 1155 struct bfi_ioc_image_hdr fwhdr; 1156 u32 fwstate, r32; 1157 1158 /* Spin on init semaphore to serialize. */ 1159 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1160 while (r32 & 0x1) { 1161 udelay(20); 1162 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); 1163 } 1164 1165 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1166 if (fwstate == BFI_IOC_UNINIT) { 1167 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1168 return; 1169 } 1170 1171 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1172 1173 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { 1174 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1175 return; 1176 } 1177 1178 bfa_ioc_fwver_clear(ioc); 1179 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1180 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); 1181 1182 /* 1183 * Try to lock and then unlock the semaphore. 1184 */ 1185 readl(ioc->ioc_regs.ioc_sem_reg); 1186 writel(1, ioc->ioc_regs.ioc_sem_reg); 1187 1188 /* Unlock init semaphore */ 1189 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1190 } 1191 1192 static void 1193 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) 1194 { 1195 u32 r32; 1196 1197 /** 1198 * First read to the semaphore register will return 0, subsequent reads 1199 * will return 1. Semaphore is released by writing 1 to the register 1200 */ 1201 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1202 if (r32 == ~0) { 1203 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); 1204 return; 1205 } 1206 if (!(r32 & 1)) { 1207 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1208 return; 1209 } 1210 1211 mod_timer(&ioc->sem_timer, jiffies + 1212 msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); 1213 } 1214 1215 void 1216 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) 1217 { 1218 writel(1, ioc->ioc_regs.ioc_sem_reg); 1219 } 1220 1221 static void 1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1223 { 1224 del_timer(&ioc->sem_timer); 1225 } 1226 1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */ 1228 static void 1229 bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1230 { 1231 u32 pss_ctl; 1232 int i; 1233 #define PSS_LMEM_INIT_TIME 10000 1234 1235 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1236 pss_ctl &= ~__PSS_LMEM_RESET; 1237 pss_ctl |= __PSS_LMEM_INIT_EN; 1238 1239 /* 1240 * i2c workaround 12.5khz clock 1241 */ 1242 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1243 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1244 1245 /** 1246 * wait for memory initialization to be complete 1247 */ 1248 i = 0; 1249 do { 1250 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1251 i++; 1252 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1253 1254 /** 1255 * If memory initialization is not successful, IOC timeout will catch 1256 * such failures. 1257 */ 1258 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); 1259 1260 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1261 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1262 } 1263 1264 static void 1265 bfa_ioc_lpu_start(struct bfa_ioc *ioc) 1266 { 1267 u32 pss_ctl; 1268 1269 /** 1270 * Take processor out of reset. 1271 */ 1272 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1273 pss_ctl &= ~__PSS_LPU0_RESET; 1274 1275 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1276 } 1277 1278 static void 1279 bfa_ioc_lpu_stop(struct bfa_ioc *ioc) 1280 { 1281 u32 pss_ctl; 1282 1283 /** 1284 * Put processors in reset. 1285 */ 1286 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1287 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1288 1289 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1290 } 1291 1292 /* Get driver and firmware versions. */ 1293 void 1294 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1295 { 1296 u32 pgnum; 1297 u32 loff = 0; 1298 int i; 1299 u32 *fwsig = (u32 *) fwhdr; 1300 1301 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1302 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1303 1304 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); 1305 i++) { 1306 fwsig[i] = 1307 swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 1308 loff += sizeof(u32); 1309 } 1310 } 1311 1312 static bool 1313 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, 1314 struct bfi_ioc_image_hdr *fwhdr_2) 1315 { 1316 int i; 1317 1318 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1319 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) 1320 return false; 1321 } 1322 1323 return true; 1324 } 1325 1326 /* Returns TRUE if major minor and maintenance are same. 1327 * If patch version are same, check for MD5 Checksum to be same. 1328 */ 1329 static bool 1330 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, 1331 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1332 { 1333 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) 1334 return false; 1335 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) 1336 return false; 1337 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) 1338 return false; 1339 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) 1340 return false; 1341 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && 1342 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && 1343 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) 1344 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); 1345 1346 return true; 1347 } 1348 1349 static bool 1350 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) 1351 { 1352 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) 1353 return false; 1354 1355 return true; 1356 } 1357 1358 static bool 1359 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) 1360 { 1361 if (fwhdr->fwver.phase == 0 && 1362 fwhdr->fwver.build == 0) 1363 return false; 1364 1365 return true; 1366 } 1367 1368 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ 1369 static enum bfi_ioc_img_ver_cmp 1370 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, 1371 struct bfi_ioc_image_hdr *fwhdr_to_cmp) 1372 { 1373 if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp)) 1374 return BFI_IOC_IMG_VER_INCOMP; 1375 1376 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) 1377 return BFI_IOC_IMG_VER_BETTER; 1378 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) 1379 return BFI_IOC_IMG_VER_OLD; 1380 1381 /* GA takes priority over internal builds of the same patch stream. 1382 * At this point major minor maint and patch numbers are same. 1383 */ 1384 if (fwhdr_is_ga(base_fwhdr)) 1385 if (fwhdr_is_ga(fwhdr_to_cmp)) 1386 return BFI_IOC_IMG_VER_SAME; 1387 else 1388 return BFI_IOC_IMG_VER_OLD; 1389 else 1390 if (fwhdr_is_ga(fwhdr_to_cmp)) 1391 return BFI_IOC_IMG_VER_BETTER; 1392 1393 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) 1394 return BFI_IOC_IMG_VER_BETTER; 1395 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) 1396 return BFI_IOC_IMG_VER_OLD; 1397 1398 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) 1399 return BFI_IOC_IMG_VER_BETTER; 1400 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) 1401 return BFI_IOC_IMG_VER_OLD; 1402 1403 /* All Version Numbers are equal. 1404 * Md5 check to be done as a part of compatibility check. 1405 */ 1406 return BFI_IOC_IMG_VER_SAME; 1407 } 1408 1409 /* register definitions */ 1410 #define FLI_CMD_REG 0x0001d000 1411 #define FLI_WRDATA_REG 0x0001d00c 1412 #define FLI_RDDATA_REG 0x0001d010 1413 #define FLI_ADDR_REG 0x0001d004 1414 #define FLI_DEV_STATUS_REG 0x0001d014 1415 1416 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ 1417 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ 1418 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ 1419 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ 1420 1421 #define NFC_STATE_RUNNING 0x20000001 1422 #define NFC_STATE_PAUSED 0x00004560 1423 #define NFC_VER_VALID 0x147 1424 1425 enum bfa_flash_cmd { 1426 BFA_FLASH_FAST_READ = 0x0b, /* fast read */ 1427 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ 1428 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ 1429 BFA_FLASH_WRITE = 0x02, /* write */ 1430 BFA_FLASH_READ_STATUS = 0x05, /* read status */ 1431 }; 1432 1433 /* hardware error definition */ 1434 enum bfa_flash_err { 1435 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ 1436 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ 1437 BFA_FLASH_BAD = -3, /*!< flash bad */ 1438 BFA_FLASH_BUSY = -4, /*!< flash busy */ 1439 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ 1440 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ 1441 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ 1442 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ 1443 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ 1444 }; 1445 1446 /* flash command register data structure */ 1447 union bfa_flash_cmd_reg { 1448 struct { 1449 #ifdef __BIG_ENDIAN 1450 u32 act:1; 1451 u32 rsv:1; 1452 u32 write_cnt:9; 1453 u32 read_cnt:9; 1454 u32 addr_cnt:4; 1455 u32 cmd:8; 1456 #else 1457 u32 cmd:8; 1458 u32 addr_cnt:4; 1459 u32 read_cnt:9; 1460 u32 write_cnt:9; 1461 u32 rsv:1; 1462 u32 act:1; 1463 #endif 1464 } r; 1465 u32 i; 1466 }; 1467 1468 /* flash device status register data structure */ 1469 union bfa_flash_dev_status_reg { 1470 struct { 1471 #ifdef __BIG_ENDIAN 1472 u32 rsv:21; 1473 u32 fifo_cnt:6; 1474 u32 busy:1; 1475 u32 init_status:1; 1476 u32 present:1; 1477 u32 bad:1; 1478 u32 good:1; 1479 #else 1480 u32 good:1; 1481 u32 bad:1; 1482 u32 present:1; 1483 u32 init_status:1; 1484 u32 busy:1; 1485 u32 fifo_cnt:6; 1486 u32 rsv:21; 1487 #endif 1488 } r; 1489 u32 i; 1490 }; 1491 1492 /* flash address register data structure */ 1493 union bfa_flash_addr_reg { 1494 struct { 1495 #ifdef __BIG_ENDIAN 1496 u32 addr:24; 1497 u32 dummy:8; 1498 #else 1499 u32 dummy:8; 1500 u32 addr:24; 1501 #endif 1502 } r; 1503 u32 i; 1504 }; 1505 1506 /* Flash raw private functions */ 1507 static void 1508 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, 1509 u8 rd_cnt, u8 ad_cnt, u8 op) 1510 { 1511 union bfa_flash_cmd_reg cmd; 1512 1513 cmd.i = 0; 1514 cmd.r.act = 1; 1515 cmd.r.write_cnt = wr_cnt; 1516 cmd.r.read_cnt = rd_cnt; 1517 cmd.r.addr_cnt = ad_cnt; 1518 cmd.r.cmd = op; 1519 writel(cmd.i, (pci_bar + FLI_CMD_REG)); 1520 } 1521 1522 static void 1523 bfa_flash_set_addr(void __iomem *pci_bar, u32 address) 1524 { 1525 union bfa_flash_addr_reg addr; 1526 1527 addr.r.addr = address & 0x00ffffff; 1528 addr.r.dummy = 0; 1529 writel(addr.i, (pci_bar + FLI_ADDR_REG)); 1530 } 1531 1532 static int 1533 bfa_flash_cmd_act_check(void __iomem *pci_bar) 1534 { 1535 union bfa_flash_cmd_reg cmd; 1536 1537 cmd.i = readl(pci_bar + FLI_CMD_REG); 1538 1539 if (cmd.r.act) 1540 return BFA_FLASH_ERR_CMD_ACT; 1541 1542 return 0; 1543 } 1544 1545 /* Flush FLI data fifo. */ 1546 static int 1547 bfa_flash_fifo_flush(void __iomem *pci_bar) 1548 { 1549 u32 i; 1550 u32 t; 1551 union bfa_flash_dev_status_reg dev_status; 1552 1553 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1554 1555 if (!dev_status.r.fifo_cnt) 1556 return 0; 1557 1558 /* fifo counter in terms of words */ 1559 for (i = 0; i < dev_status.r.fifo_cnt; i++) 1560 t = readl(pci_bar + FLI_RDDATA_REG); 1561 1562 /* Check the device status. It may take some time. */ 1563 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1564 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1565 if (!dev_status.r.fifo_cnt) 1566 break; 1567 } 1568 1569 if (dev_status.r.fifo_cnt) 1570 return BFA_FLASH_ERR_FIFO_CNT; 1571 1572 return 0; 1573 } 1574 1575 /* Read flash status. */ 1576 static int 1577 bfa_flash_status_read(void __iomem *pci_bar) 1578 { 1579 union bfa_flash_dev_status_reg dev_status; 1580 int status; 1581 u32 ret_status; 1582 int i; 1583 1584 status = bfa_flash_fifo_flush(pci_bar); 1585 if (status < 0) 1586 return status; 1587 1588 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); 1589 1590 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { 1591 status = bfa_flash_cmd_act_check(pci_bar); 1592 if (!status) 1593 break; 1594 } 1595 1596 if (status) 1597 return status; 1598 1599 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); 1600 if (!dev_status.r.fifo_cnt) 1601 return BFA_FLASH_BUSY; 1602 1603 ret_status = readl(pci_bar + FLI_RDDATA_REG); 1604 ret_status >>= 24; 1605 1606 status = bfa_flash_fifo_flush(pci_bar); 1607 if (status < 0) 1608 return status; 1609 1610 return ret_status; 1611 } 1612 1613 /* Start flash read operation. */ 1614 static int 1615 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1616 char *buf) 1617 { 1618 int status; 1619 1620 /* len must be mutiple of 4 and not exceeding fifo size */ 1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) 1622 return BFA_FLASH_ERR_LEN; 1623 1624 /* check status */ 1625 status = bfa_flash_status_read(pci_bar); 1626 if (status == BFA_FLASH_BUSY) 1627 status = bfa_flash_status_read(pci_bar); 1628 1629 if (status < 0) 1630 return status; 1631 1632 /* check if write-in-progress bit is cleared */ 1633 if (status & BFA_FLASH_WIP_MASK) 1634 return BFA_FLASH_ERR_WIP; 1635 1636 bfa_flash_set_addr(pci_bar, offset); 1637 1638 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); 1639 1640 return 0; 1641 } 1642 1643 /* Check flash read operation. */ 1644 static u32 1645 bfa_flash_read_check(void __iomem *pci_bar) 1646 { 1647 if (bfa_flash_cmd_act_check(pci_bar)) 1648 return 1; 1649 1650 return 0; 1651 } 1652 1653 /* End flash read operation. */ 1654 static void 1655 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) 1656 { 1657 u32 i; 1658 1659 /* read data fifo up to 32 words */ 1660 for (i = 0; i < len; i += 4) { 1661 u32 w = readl(pci_bar + FLI_RDDATA_REG); 1662 *((u32 *)(buf + i)) = swab32(w); 1663 } 1664 1665 bfa_flash_fifo_flush(pci_bar); 1666 } 1667 1668 /* Perform flash raw read. */ 1669 1670 #define FLASH_BLOCKING_OP_MAX 500 1671 #define FLASH_SEM_LOCK_REG 0x18820 1672 1673 static int 1674 bfa_raw_sem_get(void __iomem *bar) 1675 { 1676 int locked; 1677 1678 locked = readl(bar + FLASH_SEM_LOCK_REG); 1679 1680 return !locked; 1681 } 1682 1683 static enum bfa_status 1684 bfa_flash_sem_get(void __iomem *bar) 1685 { 1686 u32 n = FLASH_BLOCKING_OP_MAX; 1687 1688 while (!bfa_raw_sem_get(bar)) { 1689 if (--n <= 0) 1690 return BFA_STATUS_BADFLASH; 1691 mdelay(10); 1692 } 1693 return BFA_STATUS_OK; 1694 } 1695 1696 static void 1697 bfa_flash_sem_put(void __iomem *bar) 1698 { 1699 writel(0, (bar + FLASH_SEM_LOCK_REG)); 1700 } 1701 1702 static enum bfa_status 1703 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1704 u32 len) 1705 { 1706 u32 n; 1707 int status; 1708 u32 off, l, s, residue, fifo_sz; 1709 1710 residue = len; 1711 off = 0; 1712 fifo_sz = BFA_FLASH_FIFO_SIZE; 1713 status = bfa_flash_sem_get(pci_bar); 1714 if (status != BFA_STATUS_OK) 1715 return status; 1716 1717 while (residue) { 1718 s = offset + off; 1719 n = s / fifo_sz; 1720 l = (n + 1) * fifo_sz - s; 1721 if (l > residue) 1722 l = residue; 1723 1724 status = bfa_flash_read_start(pci_bar, offset + off, l, 1725 &buf[off]); 1726 if (status < 0) { 1727 bfa_flash_sem_put(pci_bar); 1728 return BFA_STATUS_FAILED; 1729 } 1730 1731 n = BFA_FLASH_BLOCKING_OP_MAX; 1732 while (bfa_flash_read_check(pci_bar)) { 1733 if (--n <= 0) { 1734 bfa_flash_sem_put(pci_bar); 1735 return BFA_STATUS_FAILED; 1736 } 1737 } 1738 1739 bfa_flash_read_end(pci_bar, l, &buf[off]); 1740 1741 residue -= l; 1742 off += l; 1743 } 1744 bfa_flash_sem_put(pci_bar); 1745 1746 return BFA_STATUS_OK; 1747 } 1748 1749 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ 1750 1751 static enum bfa_status 1752 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, 1753 u32 *fwimg) 1754 { 1755 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, 1756 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), 1757 (char *)fwimg, BFI_FLASH_CHUNK_SZ); 1758 } 1759 1760 static enum bfi_ioc_img_ver_cmp 1761 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, 1762 struct bfi_ioc_image_hdr *base_fwhdr) 1763 { 1764 struct bfi_ioc_image_hdr *flash_fwhdr; 1765 enum bfa_status status; 1766 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; 1767 1768 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); 1769 if (status != BFA_STATUS_OK) 1770 return BFI_IOC_IMG_VER_INCOMP; 1771 1772 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; 1773 if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) 1774 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); 1775 else 1776 return BFI_IOC_IMG_VER_INCOMP; 1777 } 1778 1779 /** 1780 * Returns TRUE if driver is willing to work with current smem f/w version. 1781 */ 1782 bool 1783 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1784 { 1785 struct bfi_ioc_image_hdr *drv_fwhdr; 1786 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; 1787 1788 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1789 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1790 1791 /* If smem is incompatible or old, driver should not work with it. */ 1792 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); 1793 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || 1794 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { 1795 return false; 1796 } 1797 1798 /* IF Flash has a better F/W than smem do not work with smem. 1799 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. 1800 * If Flash is old or incomp work with smem iff smem f/w == drv f/w. 1801 */ 1802 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); 1803 1804 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) 1805 return false; 1806 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) 1807 return true; 1808 else 1809 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? 1810 true : false; 1811 } 1812 1813 /* Return true if current running version is valid. Firmware signature and 1814 * execution context (driver/bios) must match. 1815 */ 1816 static bool 1817 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1818 { 1819 struct bfi_ioc_image_hdr fwhdr; 1820 1821 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1822 if (swab32(fwhdr.bootenv) != boot_env) 1823 return false; 1824 1825 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1826 } 1827 1828 /* Conditionally flush any pending message from firmware at start. */ 1829 static void 1830 bfa_ioc_msgflush(struct bfa_ioc *ioc) 1831 { 1832 u32 r32; 1833 1834 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 1835 if (r32) 1836 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1837 } 1838 1839 static void 1840 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1841 { 1842 enum bfi_ioc_state ioc_fwstate; 1843 bool fwvalid; 1844 u32 boot_env; 1845 1846 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 1847 1848 if (force) 1849 ioc_fwstate = BFI_IOC_UNINIT; 1850 1851 boot_env = BFI_FWBOOT_ENV_OS; 1852 1853 /** 1854 * check if firmware is valid 1855 */ 1856 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1857 false : bfa_ioc_fwver_valid(ioc, boot_env); 1858 1859 if (!fwvalid) { 1860 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1861 BFA_STATUS_OK) 1862 bfa_ioc_poll_fwinit(ioc); 1863 1864 return; 1865 } 1866 1867 /** 1868 * If hardware initialization is in progress (initialized by other IOC), 1869 * just wait for an initialization completion interrupt. 1870 */ 1871 if (ioc_fwstate == BFI_IOC_INITING) { 1872 bfa_ioc_poll_fwinit(ioc); 1873 return; 1874 } 1875 1876 /** 1877 * If IOC function is disabled and firmware version is same, 1878 * just re-enable IOC. 1879 */ 1880 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { 1881 /** 1882 * When using MSI-X any pending firmware ready event should 1883 * be flushed. Otherwise MSI-X interrupts are not delivered. 1884 */ 1885 bfa_ioc_msgflush(ioc); 1886 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1887 return; 1888 } 1889 1890 /** 1891 * Initialize the h/w for any other states. 1892 */ 1893 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == 1894 BFA_STATUS_OK) 1895 bfa_ioc_poll_fwinit(ioc); 1896 } 1897 1898 void 1899 bfa_nw_ioc_timeout(struct bfa_ioc *ioc) 1900 { 1901 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 1902 } 1903 1904 static void 1905 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 1906 { 1907 u32 *msgp = (u32 *) ioc_msg; 1908 u32 i; 1909 1910 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); 1911 1912 /* 1913 * first write msg to mailbox registers 1914 */ 1915 for (i = 0; i < len / sizeof(u32); i++) 1916 writel(cpu_to_le32(msgp[i]), 1917 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1918 1919 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1920 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); 1921 1922 /* 1923 * write 1 to mailbox CMD to trigger LPU event 1924 */ 1925 writel(1, ioc->ioc_regs.hfn_mbox_cmd); 1926 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); 1927 } 1928 1929 static void 1930 bfa_ioc_send_enable(struct bfa_ioc *ioc) 1931 { 1932 struct bfi_ioc_ctrl_req enable_req; 1933 1934 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1935 bfa_ioc_portid(ioc)); 1936 enable_req.clscode = htons(ioc->clscode); 1937 enable_req.rsvd = htons(0); 1938 /* overflow in 2106 */ 1939 enable_req.tv_sec = ntohl(ktime_get_real_seconds()); 1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1941 } 1942 1943 static void 1944 bfa_ioc_send_disable(struct bfa_ioc *ioc) 1945 { 1946 struct bfi_ioc_ctrl_req disable_req; 1947 1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1949 bfa_ioc_portid(ioc)); 1950 disable_req.clscode = htons(ioc->clscode); 1951 disable_req.rsvd = htons(0); 1952 /* overflow in 2106 */ 1953 disable_req.tv_sec = ntohl(ktime_get_real_seconds()); 1954 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1955 } 1956 1957 static void 1958 bfa_ioc_send_getattr(struct bfa_ioc *ioc) 1959 { 1960 struct bfi_ioc_getattr_req attr_req; 1961 1962 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, 1963 bfa_ioc_portid(ioc)); 1964 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); 1965 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); 1966 } 1967 1968 void 1969 bfa_nw_ioc_hb_check(struct bfa_ioc *ioc) 1970 { 1971 u32 hb_count; 1972 1973 hb_count = readl(ioc->ioc_regs.heartbeat); 1974 if (ioc->hb_count == hb_count) { 1975 bfa_ioc_recover(ioc); 1976 return; 1977 } else { 1978 ioc->hb_count = hb_count; 1979 } 1980 1981 bfa_ioc_mbox_poll(ioc); 1982 mod_timer(&ioc->hb_timer, jiffies + 1983 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1984 } 1985 1986 static void 1987 bfa_ioc_hb_monitor(struct bfa_ioc *ioc) 1988 { 1989 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); 1990 mod_timer(&ioc->hb_timer, jiffies + 1991 msecs_to_jiffies(BFA_IOC_HB_TOV)); 1992 } 1993 1994 static void 1995 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 1996 { 1997 del_timer(&ioc->hb_timer); 1998 } 1999 2000 /* Initiate a full firmware download. */ 2001 static enum bfa_status 2002 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 2003 u32 boot_env) 2004 { 2005 u32 *fwimg; 2006 u32 pgnum; 2007 u32 loff = 0; 2008 u32 chunkno = 0; 2009 u32 i; 2010 u32 asicmode; 2011 u32 fwimg_size; 2012 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; 2013 enum bfa_status status; 2014 2015 if (boot_env == BFI_FWBOOT_ENV_OS && 2016 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2017 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); 2018 2019 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2020 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); 2021 if (status != BFA_STATUS_OK) 2022 return status; 2023 2024 fwimg = fwimg_buf; 2025 } else { 2026 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); 2027 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 2028 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2029 } 2030 2031 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 2032 2033 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2034 2035 for (i = 0; i < fwimg_size; i++) { 2036 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 2037 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 2038 if (boot_env == BFI_FWBOOT_ENV_OS && 2039 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2040 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 2041 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), 2042 fwimg_buf); 2043 if (status != BFA_STATUS_OK) 2044 return status; 2045 2046 fwimg = fwimg_buf; 2047 } else { 2048 fwimg = bfa_cb_image_get_chunk( 2049 bfa_ioc_asic_gen(ioc), 2050 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 2051 } 2052 } 2053 2054 /** 2055 * write smem 2056 */ 2057 writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]), 2058 ioc->ioc_regs.smem_page_start + loff); 2059 2060 loff += sizeof(u32); 2061 2062 /** 2063 * handle page offset wrap around 2064 */ 2065 loff = PSS_SMEM_PGOFF(loff); 2066 if (loff == 0) { 2067 pgnum++; 2068 writel(pgnum, 2069 ioc->ioc_regs.host_page_num_fn); 2070 } 2071 } 2072 2073 writel(bfa_ioc_smem_pgnum(ioc, 0), 2074 ioc->ioc_regs.host_page_num_fn); 2075 2076 /* 2077 * Set boot type, env and device mode at the end. 2078 */ 2079 if (boot_env == BFI_FWBOOT_ENV_OS && 2080 boot_type == BFI_FWBOOT_TYPE_FLASH) { 2081 boot_type = BFI_FWBOOT_TYPE_NORMAL; 2082 } 2083 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, 2084 ioc->port0_mode, ioc->port1_mode); 2085 writel(asicmode, ((ioc->ioc_regs.smem_page_start) 2086 + BFI_FWBOOT_DEVMODE_OFF)); 2087 writel(boot_type, ((ioc->ioc_regs.smem_page_start) 2088 + (BFI_FWBOOT_TYPE_OFF))); 2089 writel(boot_env, ((ioc->ioc_regs.smem_page_start) 2090 + (BFI_FWBOOT_ENV_OFF))); 2091 return BFA_STATUS_OK; 2092 } 2093 2094 static void 2095 bfa_ioc_reset(struct bfa_ioc *ioc, bool force) 2096 { 2097 bfa_ioc_hwinit(ioc, force); 2098 } 2099 2100 /* BFA ioc enable reply by firmware */ 2101 static void 2102 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 2103 u8 cap_bm) 2104 { 2105 struct bfa_iocpf *iocpf = &ioc->iocpf; 2106 2107 ioc->port_mode = ioc->port_mode_cfg = port_mode; 2108 ioc->ad_cap_bm = cap_bm; 2109 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 2110 } 2111 2112 /* Update BFA configuration from firmware configuration. */ 2113 static void 2114 bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 2115 { 2116 struct bfi_ioc_attr *attr = ioc->attr; 2117 2118 attr->adapter_prop = ntohl(attr->adapter_prop); 2119 attr->card_type = ntohl(attr->card_type); 2120 attr->maxfrsize = ntohs(attr->maxfrsize); 2121 2122 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 2123 } 2124 2125 /* Attach time initialization of mbox logic. */ 2126 static void 2127 bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 2128 { 2129 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2130 int mc; 2131 2132 INIT_LIST_HEAD(&mod->cmd_q); 2133 for (mc = 0; mc < BFI_MC_MAX; mc++) { 2134 mod->mbhdlr[mc].cbfn = NULL; 2135 mod->mbhdlr[mc].cbarg = ioc->bfa; 2136 } 2137 } 2138 2139 /* Mbox poll timer -- restarts any pending mailbox requests. */ 2140 static void 2141 bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 2142 { 2143 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2144 struct bfa_mbox_cmd *cmd; 2145 bfa_mbox_cmd_cbfn_t cbfn; 2146 void *cbarg; 2147 u32 stat; 2148 2149 /** 2150 * If no command pending, do nothing 2151 */ 2152 if (list_empty(&mod->cmd_q)) 2153 return; 2154 2155 /** 2156 * If previous command is not yet fetched by firmware, do nothing 2157 */ 2158 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2159 if (stat) 2160 return; 2161 2162 /** 2163 * Enqueue command to firmware. 2164 */ 2165 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2166 list_del(&cmd->qe); 2167 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2168 2169 /** 2170 * Give a callback to the client, indicating that the command is sent 2171 */ 2172 if (cmd->cbfn) { 2173 cbfn = cmd->cbfn; 2174 cbarg = cmd->cbarg; 2175 cmd->cbfn = NULL; 2176 cbfn(cbarg); 2177 } 2178 } 2179 2180 /* Cleanup any pending requests. */ 2181 static void 2182 bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 2183 { 2184 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2185 struct bfa_mbox_cmd *cmd; 2186 2187 while (!list_empty(&mod->cmd_q)) { 2188 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); 2189 list_del(&cmd->qe); 2190 } 2191 } 2192 2193 /** 2194 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap 2195 * 2196 * @ioc: memory for IOC 2197 * @tbuf: app memory to store data from smem 2198 * @soff: smem offset 2199 * @sz: size of smem in bytes 2200 */ 2201 static int 2202 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 2203 { 2204 u32 pgnum, loff, r32; 2205 int i, len; 2206 u32 *buf = tbuf; 2207 2208 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); 2209 loff = PSS_SMEM_PGOFF(soff); 2210 2211 /* 2212 * Hold semaphore to serialize pll init and fwtrc. 2213 */ 2214 if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) 2215 return 1; 2216 2217 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2218 2219 len = sz/sizeof(u32); 2220 for (i = 0; i < len; i++) { 2221 r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start)); 2222 buf[i] = be32_to_cpu(r32); 2223 loff += sizeof(u32); 2224 2225 /** 2226 * handle page offset wrap around 2227 */ 2228 loff = PSS_SMEM_PGOFF(loff); 2229 if (loff == 0) { 2230 pgnum++; 2231 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 2232 } 2233 } 2234 2235 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), 2236 ioc->ioc_regs.host_page_num_fn); 2237 2238 /* 2239 * release semaphore 2240 */ 2241 readl(ioc->ioc_regs.ioc_init_sem_reg); 2242 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 2243 return 0; 2244 } 2245 2246 /* Retrieve saved firmware trace from a prior IOC failure. */ 2247 int 2248 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2249 { 2250 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; 2251 int tlen, status = 0; 2252 2253 tlen = *trclen; 2254 if (tlen > BNA_DBG_FWTRC_LEN) 2255 tlen = BNA_DBG_FWTRC_LEN; 2256 2257 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); 2258 *trclen = tlen; 2259 return status; 2260 } 2261 2262 /* Save firmware trace if configured. */ 2263 static void 2264 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 2265 { 2266 int tlen; 2267 2268 if (ioc->dbg_fwsave_once) { 2269 ioc->dbg_fwsave_once = false; 2270 if (ioc->dbg_fwsave_len) { 2271 tlen = ioc->dbg_fwsave_len; 2272 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); 2273 } 2274 } 2275 } 2276 2277 /* Retrieve saved firmware trace from a prior IOC failure. */ 2278 int 2279 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 2280 { 2281 int tlen; 2282 2283 if (ioc->dbg_fwsave_len == 0) 2284 return BFA_STATUS_ENOFSAVE; 2285 2286 tlen = *trclen; 2287 if (tlen > ioc->dbg_fwsave_len) 2288 tlen = ioc->dbg_fwsave_len; 2289 2290 memcpy(trcdata, ioc->dbg_fwsave, tlen); 2291 *trclen = tlen; 2292 return BFA_STATUS_OK; 2293 } 2294 2295 static void 2296 bfa_ioc_fail_notify(struct bfa_ioc *ioc) 2297 { 2298 /** 2299 * Notify driver and common modules registered for notification. 2300 */ 2301 ioc->cbfn->hbfail_cbfn(ioc->bfa); 2302 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); 2303 bfa_nw_ioc_debug_save_ftrc(ioc); 2304 } 2305 2306 /* IOCPF to IOC interface */ 2307 static void 2308 bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 2309 { 2310 bfa_fsm_send_event(ioc, IOC_E_ENABLED); 2311 } 2312 2313 static void 2314 bfa_ioc_pf_disabled(struct bfa_ioc *ioc) 2315 { 2316 bfa_fsm_send_event(ioc, IOC_E_DISABLED); 2317 } 2318 2319 static void 2320 bfa_ioc_pf_failed(struct bfa_ioc *ioc) 2321 { 2322 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 2323 } 2324 2325 static void 2326 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) 2327 { 2328 bfa_fsm_send_event(ioc, IOC_E_HWFAILED); 2329 } 2330 2331 static void 2332 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) 2333 { 2334 /** 2335 * Provide enable completion callback and AEN notification. 2336 */ 2337 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 2338 } 2339 2340 /* IOC public */ 2341 static enum bfa_status 2342 bfa_ioc_pll_init(struct bfa_ioc *ioc) 2343 { 2344 /* 2345 * Hold semaphore so that nobody can access the chip during init. 2346 */ 2347 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 2348 2349 bfa_ioc_pll_init_asic(ioc); 2350 2351 ioc->pllinit = true; 2352 2353 /* Initialize LMEM */ 2354 bfa_ioc_lmem_init(ioc); 2355 2356 /* 2357 * release semaphore. 2358 */ 2359 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 2360 2361 return BFA_STATUS_OK; 2362 } 2363 2364 /* Interface used by diag module to do firmware boot with memory test 2365 * as the entry vector. 2366 */ 2367 static enum bfa_status 2368 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, 2369 u32 boot_env) 2370 { 2371 struct bfi_ioc_image_hdr *drv_fwhdr; 2372 enum bfa_status status; 2373 bfa_ioc_stats(ioc, ioc_boots); 2374 2375 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 2376 return BFA_STATUS_FAILED; 2377 if (boot_env == BFI_FWBOOT_ENV_OS && 2378 boot_type == BFI_FWBOOT_TYPE_NORMAL) { 2379 drv_fwhdr = (struct bfi_ioc_image_hdr *) 2380 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 2381 /* Work with Flash iff flash f/w is better than driver f/w. 2382 * Otherwise push drivers firmware. 2383 */ 2384 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == 2385 BFI_IOC_IMG_VER_BETTER) 2386 boot_type = BFI_FWBOOT_TYPE_FLASH; 2387 } 2388 2389 /** 2390 * Initialize IOC state of all functions on a chip reset. 2391 */ 2392 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2393 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2394 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); 2395 } else { 2396 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); 2397 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); 2398 } 2399 2400 bfa_ioc_msgflush(ioc); 2401 status = bfa_ioc_download_fw(ioc, boot_type, boot_env); 2402 if (status == BFA_STATUS_OK) 2403 bfa_ioc_lpu_start(ioc); 2404 else 2405 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2406 2407 return status; 2408 } 2409 2410 /* Enable/disable IOC failure auto recovery. */ 2411 void 2412 bfa_nw_ioc_auto_recover(bool auto_recover) 2413 { 2414 bfa_nw_auto_recover = auto_recover; 2415 } 2416 2417 static bool 2418 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 2419 { 2420 u32 *msgp = mbmsg; 2421 u32 r32; 2422 int i; 2423 2424 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); 2425 if ((r32 & 1) == 0) 2426 return false; 2427 2428 /** 2429 * read the MBOX msg 2430 */ 2431 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 2432 i++) { 2433 r32 = readl(ioc->ioc_regs.lpu_mbox + 2434 i * sizeof(u32)); 2435 msgp[i] = htonl(r32); 2436 } 2437 2438 /** 2439 * turn off mailbox interrupt by clearing mailbox status 2440 */ 2441 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 2442 readl(ioc->ioc_regs.lpu_mbox_cmd); 2443 2444 return true; 2445 } 2446 2447 static void 2448 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 2449 { 2450 union bfi_ioc_i2h_msg_u *msg; 2451 struct bfa_iocpf *iocpf = &ioc->iocpf; 2452 2453 msg = (union bfi_ioc_i2h_msg_u *) m; 2454 2455 bfa_ioc_stats(ioc, ioc_isrs); 2456 2457 switch (msg->mh.msg_id) { 2458 case BFI_IOC_I2H_HBEAT: 2459 break; 2460 2461 case BFI_IOC_I2H_ENABLE_REPLY: 2462 bfa_ioc_enable_reply(ioc, 2463 (enum bfa_mode)msg->fw_event.port_mode, 2464 msg->fw_event.cap_bm); 2465 break; 2466 2467 case BFI_IOC_I2H_DISABLE_REPLY: 2468 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); 2469 break; 2470 2471 case BFI_IOC_I2H_GETATTR_REPLY: 2472 bfa_ioc_getattr_reply(ioc); 2473 break; 2474 2475 default: 2476 BUG_ON(1); 2477 } 2478 } 2479 2480 /** 2481 * bfa_nw_ioc_attach - IOC attach time initialization and setup. 2482 * 2483 * @ioc: memory for IOC 2484 * @bfa: driver instance structure 2485 */ 2486 void 2487 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 2488 { 2489 ioc->bfa = bfa; 2490 ioc->cbfn = cbfn; 2491 ioc->fcmode = false; 2492 ioc->pllinit = false; 2493 ioc->dbg_fwsave_once = true; 2494 ioc->iocpf.ioc = ioc; 2495 2496 bfa_ioc_mbox_attach(ioc); 2497 INIT_LIST_HEAD(&ioc->notify_q); 2498 2499 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 2500 bfa_fsm_send_event(ioc, IOC_E_RESET); 2501 } 2502 2503 /* Driver detach time IOC cleanup. */ 2504 void 2505 bfa_nw_ioc_detach(struct bfa_ioc *ioc) 2506 { 2507 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2508 2509 /* Done with detach, empty the notify_q. */ 2510 INIT_LIST_HEAD(&ioc->notify_q); 2511 } 2512 2513 /** 2514 * bfa_nw_ioc_pci_init - Setup IOC PCI properties. 2515 * 2516 * @pcidev: PCI device information for this IOC 2517 */ 2518 void 2519 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 2520 enum bfi_pcifn_class clscode) 2521 { 2522 ioc->clscode = clscode; 2523 ioc->pcidev = *pcidev; 2524 2525 /** 2526 * Initialize IOC and device personality 2527 */ 2528 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; 2529 ioc->asic_mode = BFI_ASIC_MODE_FC; 2530 2531 switch (pcidev->device_id) { 2532 case PCI_DEVICE_ID_BROCADE_CT: 2533 ioc->asic_gen = BFI_ASIC_GEN_CT; 2534 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2535 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2536 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; 2537 ioc->ad_cap_bm = BFA_CM_CNA; 2538 break; 2539 2540 case BFA_PCI_DEVICE_ID_CT2: 2541 ioc->asic_gen = BFI_ASIC_GEN_CT2; 2542 if (clscode == BFI_PCIFN_CLASS_FC && 2543 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { 2544 ioc->asic_mode = BFI_ASIC_MODE_FC16; 2545 ioc->fcmode = true; 2546 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; 2547 ioc->ad_cap_bm = BFA_CM_HBA; 2548 } else { 2549 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; 2550 ioc->asic_mode = BFI_ASIC_MODE_ETH; 2551 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { 2552 ioc->port_mode = 2553 ioc->port_mode_cfg = BFA_MODE_CNA; 2554 ioc->ad_cap_bm = BFA_CM_CNA; 2555 } else { 2556 ioc->port_mode = 2557 ioc->port_mode_cfg = BFA_MODE_NIC; 2558 ioc->ad_cap_bm = BFA_CM_NIC; 2559 } 2560 } 2561 break; 2562 2563 default: 2564 BUG_ON(1); 2565 } 2566 2567 /** 2568 * Set asic specific interfaces. 2569 */ 2570 if (ioc->asic_gen == BFI_ASIC_GEN_CT) 2571 bfa_nw_ioc_set_ct_hwif(ioc); 2572 else { 2573 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); 2574 bfa_nw_ioc_set_ct2_hwif(ioc); 2575 bfa_nw_ioc_ct2_poweron(ioc); 2576 } 2577 2578 bfa_ioc_map_port(ioc); 2579 bfa_ioc_reg_init(ioc); 2580 } 2581 2582 /** 2583 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory 2584 * 2585 * @dm_kva: kernel virtual address of IOC dma memory 2586 * @dm_pa: physical address of IOC dma memory 2587 */ 2588 void 2589 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2590 { 2591 /** 2592 * dma memory for firmware attribute 2593 */ 2594 ioc->attr_dma.kva = dm_kva; 2595 ioc->attr_dma.pa = dm_pa; 2596 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2597 } 2598 2599 /* Return size of dma memory required. */ 2600 u32 2601 bfa_nw_ioc_meminfo(void) 2602 { 2603 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 2604 } 2605 2606 void 2607 bfa_nw_ioc_enable(struct bfa_ioc *ioc) 2608 { 2609 bfa_ioc_stats(ioc, ioc_enables); 2610 ioc->dbg_fwsave_once = true; 2611 2612 bfa_fsm_send_event(ioc, IOC_E_ENABLE); 2613 } 2614 2615 void 2616 bfa_nw_ioc_disable(struct bfa_ioc *ioc) 2617 { 2618 bfa_ioc_stats(ioc, ioc_disables); 2619 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2620 } 2621 2622 /* Initialize memory for saving firmware trace. */ 2623 void 2624 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2625 { 2626 ioc->dbg_fwsave = dbg_fwsave; 2627 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; 2628 } 2629 2630 static u32 2631 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 2632 { 2633 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2634 } 2635 2636 /* Register mailbox message handler function, to be called by common modules */ 2637 void 2638 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2639 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2640 { 2641 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2642 2643 mod->mbhdlr[mc].cbfn = cbfn; 2644 mod->mbhdlr[mc].cbarg = cbarg; 2645 } 2646 2647 /** 2648 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. 2649 * 2650 * @ioc: IOC instance 2651 * @cmd: Mailbox command 2652 * 2653 * Waits if mailbox is busy. Responsibility of caller to serialize 2654 */ 2655 bool 2656 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2657 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) 2658 { 2659 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2660 u32 stat; 2661 2662 cmd->cbfn = cbfn; 2663 cmd->cbarg = cbarg; 2664 2665 /** 2666 * If a previous command is pending, queue new command 2667 */ 2668 if (!list_empty(&mod->cmd_q)) { 2669 list_add_tail(&cmd->qe, &mod->cmd_q); 2670 return true; 2671 } 2672 2673 /** 2674 * If mailbox is busy, queue command for poll timer 2675 */ 2676 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2677 if (stat) { 2678 list_add_tail(&cmd->qe, &mod->cmd_q); 2679 return true; 2680 } 2681 2682 /** 2683 * mailbox is free -- queue command to firmware 2684 */ 2685 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2686 2687 return false; 2688 } 2689 2690 /* Handle mailbox interrupts */ 2691 void 2692 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2693 { 2694 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 2695 struct bfi_mbmsg m; 2696 int mc; 2697 2698 if (bfa_ioc_msgget(ioc, &m)) { 2699 /** 2700 * Treat IOC message class as special. 2701 */ 2702 mc = m.mh.msg_class; 2703 if (mc == BFI_MC_IOC) { 2704 bfa_ioc_isr(ioc, &m); 2705 return; 2706 } 2707 2708 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2709 return; 2710 2711 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2712 } 2713 2714 bfa_ioc_lpu_read_stat(ioc); 2715 2716 /** 2717 * Try to send pending mailbox commands 2718 */ 2719 bfa_ioc_mbox_poll(ioc); 2720 } 2721 2722 void 2723 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) 2724 { 2725 bfa_ioc_stats(ioc, ioc_hbfails); 2726 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2727 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2728 } 2729 2730 /* return true if IOC is disabled */ 2731 bool 2732 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2733 { 2734 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || 2735 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2736 } 2737 2738 /* return true if IOC is operational */ 2739 bool 2740 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2741 { 2742 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2743 } 2744 2745 /* Add to IOC heartbeat failure notification queue. To be used by common 2746 * modules such as cee, port, diag. 2747 */ 2748 void 2749 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, 2750 struct bfa_ioc_notify *notify) 2751 { 2752 list_add_tail(¬ify->qe, &ioc->notify_q); 2753 } 2754 2755 #define BFA_MFG_NAME "QLogic" 2756 static void 2757 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 2758 struct bfa_adapter_attr *ad_attr) 2759 { 2760 struct bfi_ioc_attr *ioc_attr; 2761 2762 ioc_attr = ioc->attr; 2763 2764 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); 2765 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2766 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2767 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2768 memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2769 sizeof(struct bfa_mfg_vpd)); 2770 2771 ad_attr->nports = bfa_ioc_get_nports(ioc); 2772 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); 2773 2774 bfa_ioc_get_adapter_model(ioc, ad_attr->model); 2775 /* For now, model descr uses same model string */ 2776 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); 2777 2778 ad_attr->card_type = ioc_attr->card_type; 2779 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); 2780 2781 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 2782 ad_attr->prototype = 1; 2783 else 2784 ad_attr->prototype = 0; 2785 2786 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 2787 bfa_nw_ioc_get_mac(ioc, ad_attr->mac); 2788 2789 ad_attr->pcie_gen = ioc_attr->pcie_gen; 2790 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 2791 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 2792 ad_attr->asic_rev = ioc_attr->asic_rev; 2793 2794 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2795 } 2796 2797 static enum bfa_ioc_type 2798 bfa_ioc_get_type(struct bfa_ioc *ioc) 2799 { 2800 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) 2801 return BFA_IOC_TYPE_LL; 2802 2803 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); 2804 2805 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) 2806 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; 2807 } 2808 2809 static void 2810 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 2811 { 2812 memcpy(serial_num, 2813 (void *)ioc->attr->brcd_serialnum, 2814 BFA_ADAPTER_SERIAL_NUM_LEN); 2815 } 2816 2817 static void 2818 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 2819 { 2820 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2821 } 2822 2823 static void 2824 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 2825 { 2826 BUG_ON(!(chip_rev)); 2827 2828 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2829 2830 chip_rev[0] = 'R'; 2831 chip_rev[1] = 'e'; 2832 chip_rev[2] = 'v'; 2833 chip_rev[3] = '-'; 2834 chip_rev[4] = ioc->attr->asic_rev; 2835 chip_rev[5] = '\0'; 2836 } 2837 2838 static void 2839 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 2840 { 2841 memcpy(optrom_ver, ioc->attr->optrom_version, 2842 BFA_VERSION_LEN); 2843 } 2844 2845 static void 2846 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2847 { 2848 strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2849 } 2850 2851 static void 2852 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 2853 { 2854 struct bfi_ioc_attr *ioc_attr; 2855 2856 BUG_ON(!(model)); 2857 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2858 2859 ioc_attr = ioc->attr; 2860 2861 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2862 BFA_MFG_NAME, ioc_attr->card_type); 2863 } 2864 2865 static enum bfa_ioc_state 2866 bfa_ioc_get_state(struct bfa_ioc *ioc) 2867 { 2868 enum bfa_iocpf_state iocpf_st; 2869 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2870 2871 if (ioc_st == BFA_IOC_ENABLING || 2872 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { 2873 2874 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2875 2876 switch (iocpf_st) { 2877 case BFA_IOCPF_SEMWAIT: 2878 ioc_st = BFA_IOC_SEMWAIT; 2879 break; 2880 2881 case BFA_IOCPF_HWINIT: 2882 ioc_st = BFA_IOC_HWINIT; 2883 break; 2884 2885 case BFA_IOCPF_FWMISMATCH: 2886 ioc_st = BFA_IOC_FWMISMATCH; 2887 break; 2888 2889 case BFA_IOCPF_FAIL: 2890 ioc_st = BFA_IOC_FAIL; 2891 break; 2892 2893 case BFA_IOCPF_INITFAIL: 2894 ioc_st = BFA_IOC_INITFAIL; 2895 break; 2896 2897 default: 2898 break; 2899 } 2900 } 2901 return ioc_st; 2902 } 2903 2904 void 2905 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 2906 { 2907 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2908 2909 ioc_attr->state = bfa_ioc_get_state(ioc); 2910 ioc_attr->port_id = bfa_ioc_portid(ioc); 2911 ioc_attr->port_mode = ioc->port_mode; 2912 2913 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2914 ioc_attr->cap_bm = ioc->ad_cap_bm; 2915 2916 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2917 2918 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2919 2920 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); 2921 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); 2922 ioc_attr->def_fn = bfa_ioc_is_default(ioc); 2923 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2924 } 2925 2926 /* WWN public */ 2927 static u64 2928 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2929 { 2930 return ioc->attr->pwwn; 2931 } 2932 2933 void 2934 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac) 2935 { 2936 ether_addr_copy(mac, ioc->attr->mac); 2937 } 2938 2939 /* Firmware failure detected. Start recovery actions. */ 2940 static void 2941 bfa_ioc_recover(struct bfa_ioc *ioc) 2942 { 2943 pr_crit("Heart Beat of IOC has failed\n"); 2944 bfa_ioc_stats(ioc, ioc_hbfails); 2945 bfa_ioc_stats_hb_count(ioc, ioc->hb_count); 2946 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2947 } 2948 2949 /* BFA IOC PF private functions */ 2950 2951 static void 2952 bfa_iocpf_enable(struct bfa_ioc *ioc) 2953 { 2954 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); 2955 } 2956 2957 static void 2958 bfa_iocpf_disable(struct bfa_ioc *ioc) 2959 { 2960 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 2961 } 2962 2963 static void 2964 bfa_iocpf_fail(struct bfa_ioc *ioc) 2965 { 2966 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 2967 } 2968 2969 static void 2970 bfa_iocpf_initfail(struct bfa_ioc *ioc) 2971 { 2972 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 2973 } 2974 2975 static void 2976 bfa_iocpf_getattrfail(struct bfa_ioc *ioc) 2977 { 2978 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 2979 } 2980 2981 static void 2982 bfa_iocpf_stop(struct bfa_ioc *ioc) 2983 { 2984 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); 2985 } 2986 2987 void 2988 bfa_nw_iocpf_timeout(struct bfa_ioc *ioc) 2989 { 2990 enum bfa_iocpf_state iocpf_st; 2991 2992 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); 2993 2994 if (iocpf_st == BFA_IOCPF_HWINIT) 2995 bfa_ioc_poll_fwinit(ioc); 2996 else 2997 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2998 } 2999 3000 void 3001 bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc) 3002 { 3003 bfa_ioc_hw_sem_get(ioc); 3004 } 3005 3006 static void 3007 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) 3008 { 3009 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); 3010 3011 if (fwstate == BFI_IOC_DISABLED) { 3012 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 3013 return; 3014 } 3015 3016 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3017 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 3018 } else { 3019 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3020 mod_timer(&ioc->iocpf_timer, jiffies + 3021 msecs_to_jiffies(BFA_IOC_POLL_TOV)); 3022 } 3023 } 3024 3025 /* 3026 * Flash module specific 3027 */ 3028 3029 /* 3030 * FLASH DMA buffer should be big enough to hold both MFG block and 3031 * asic block(64k) at the same time and also should be 2k aligned to 3032 * avoid write segement to cross sector boundary. 3033 */ 3034 #define BFA_FLASH_SEG_SZ 2048 3035 #define BFA_FLASH_DMA_BUF_SZ \ 3036 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) 3037 3038 static void 3039 bfa_flash_cb(struct bfa_flash *flash) 3040 { 3041 flash->op_busy = 0; 3042 if (flash->cbfn) 3043 flash->cbfn(flash->cbarg, flash->status); 3044 } 3045 3046 static void 3047 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) 3048 { 3049 struct bfa_flash *flash = cbarg; 3050 3051 switch (event) { 3052 case BFA_IOC_E_DISABLED: 3053 case BFA_IOC_E_FAILED: 3054 if (flash->op_busy) { 3055 flash->status = BFA_STATUS_IOC_FAILURE; 3056 flash->cbfn(flash->cbarg, flash->status); 3057 flash->op_busy = 0; 3058 } 3059 break; 3060 default: 3061 break; 3062 } 3063 } 3064 3065 /* 3066 * Send flash write request. 3067 */ 3068 static void 3069 bfa_flash_write_send(struct bfa_flash *flash) 3070 { 3071 struct bfi_flash_write_req *msg = 3072 (struct bfi_flash_write_req *) flash->mb.msg; 3073 u32 len; 3074 3075 msg->type = be32_to_cpu(flash->type); 3076 msg->instance = flash->instance; 3077 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3078 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3079 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3080 msg->length = be32_to_cpu(len); 3081 3082 /* indicate if it's the last msg of the whole write operation */ 3083 msg->last = (len == flash->residue) ? 1 : 0; 3084 3085 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, 3086 bfa_ioc_portid(flash->ioc)); 3087 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3088 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); 3089 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3090 3091 flash->residue -= len; 3092 flash->offset += len; 3093 } 3094 3095 /** 3096 * bfa_flash_read_send - Send flash read request. 3097 * 3098 * @cbarg: callback argument 3099 */ 3100 static void 3101 bfa_flash_read_send(void *cbarg) 3102 { 3103 struct bfa_flash *flash = cbarg; 3104 struct bfi_flash_read_req *msg = 3105 (struct bfi_flash_read_req *) flash->mb.msg; 3106 u32 len; 3107 3108 msg->type = be32_to_cpu(flash->type); 3109 msg->instance = flash->instance; 3110 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); 3111 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? 3112 flash->residue : BFA_FLASH_DMA_BUF_SZ; 3113 msg->length = be32_to_cpu(len); 3114 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, 3115 bfa_ioc_portid(flash->ioc)); 3116 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); 3117 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3118 } 3119 3120 /** 3121 * bfa_flash_intr - Process flash response messages upon receiving interrupts. 3122 * 3123 * @flasharg: flash structure 3124 * @msg: message structure 3125 */ 3126 static void 3127 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 3128 { 3129 struct bfa_flash *flash = flasharg; 3130 u32 status; 3131 3132 union { 3133 struct bfi_flash_query_rsp *query; 3134 struct bfi_flash_write_rsp *write; 3135 struct bfi_flash_read_rsp *read; 3136 struct bfi_mbmsg *msg; 3137 } m; 3138 3139 m.msg = msg; 3140 3141 /* receiving response after ioc failure */ 3142 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) 3143 return; 3144 3145 switch (msg->mh.msg_id) { 3146 case BFI_FLASH_I2H_QUERY_RSP: 3147 status = be32_to_cpu(m.query->status); 3148 if (status == BFA_STATUS_OK) { 3149 u32 i; 3150 struct bfa_flash_attr *attr, *f; 3151 3152 attr = (struct bfa_flash_attr *) flash->ubuf; 3153 f = (struct bfa_flash_attr *) flash->dbuf_kva; 3154 attr->status = be32_to_cpu(f->status); 3155 attr->npart = be32_to_cpu(f->npart); 3156 for (i = 0; i < attr->npart; i++) { 3157 attr->part[i].part_type = 3158 be32_to_cpu(f->part[i].part_type); 3159 attr->part[i].part_instance = 3160 be32_to_cpu(f->part[i].part_instance); 3161 attr->part[i].part_off = 3162 be32_to_cpu(f->part[i].part_off); 3163 attr->part[i].part_size = 3164 be32_to_cpu(f->part[i].part_size); 3165 attr->part[i].part_len = 3166 be32_to_cpu(f->part[i].part_len); 3167 attr->part[i].part_status = 3168 be32_to_cpu(f->part[i].part_status); 3169 } 3170 } 3171 flash->status = status; 3172 bfa_flash_cb(flash); 3173 break; 3174 case BFI_FLASH_I2H_WRITE_RSP: 3175 status = be32_to_cpu(m.write->status); 3176 if (status != BFA_STATUS_OK || flash->residue == 0) { 3177 flash->status = status; 3178 bfa_flash_cb(flash); 3179 } else 3180 bfa_flash_write_send(flash); 3181 break; 3182 case BFI_FLASH_I2H_READ_RSP: 3183 status = be32_to_cpu(m.read->status); 3184 if (status != BFA_STATUS_OK) { 3185 flash->status = status; 3186 bfa_flash_cb(flash); 3187 } else { 3188 u32 len = be32_to_cpu(m.read->length); 3189 memcpy(flash->ubuf + flash->offset, 3190 flash->dbuf_kva, len); 3191 flash->residue -= len; 3192 flash->offset += len; 3193 if (flash->residue == 0) { 3194 flash->status = status; 3195 bfa_flash_cb(flash); 3196 } else 3197 bfa_flash_read_send(flash); 3198 } 3199 break; 3200 case BFI_FLASH_I2H_BOOT_VER_RSP: 3201 case BFI_FLASH_I2H_EVENT: 3202 break; 3203 default: 3204 WARN_ON(1); 3205 } 3206 } 3207 3208 /* 3209 * Flash memory info API. 3210 */ 3211 u32 3212 bfa_nw_flash_meminfo(void) 3213 { 3214 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3215 } 3216 3217 /** 3218 * bfa_nw_flash_attach - Flash attach API. 3219 * 3220 * @flash: flash structure 3221 * @ioc: ioc structure 3222 * @dev: device structure 3223 */ 3224 void 3225 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 3226 { 3227 flash->ioc = ioc; 3228 flash->cbfn = NULL; 3229 flash->cbarg = NULL; 3230 flash->op_busy = 0; 3231 3232 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); 3233 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); 3234 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 3235 } 3236 3237 /** 3238 * bfa_nw_flash_memclaim - Claim memory for flash 3239 * 3240 * @flash: flash structure 3241 * @dm_kva: pointer to virtual memory address 3242 * @dm_pa: physical memory address 3243 */ 3244 void 3245 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 3246 { 3247 flash->dbuf_kva = dm_kva; 3248 flash->dbuf_pa = dm_pa; 3249 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); 3250 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3251 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 3252 } 3253 3254 /** 3255 * bfa_nw_flash_get_attr - Get flash attribute. 3256 * 3257 * @flash: flash structure 3258 * @attr: flash attribute structure 3259 * @cbfn: callback function 3260 * @cbarg: callback argument 3261 * 3262 * Return status. 3263 */ 3264 enum bfa_status 3265 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, 3266 bfa_cb_flash cbfn, void *cbarg) 3267 { 3268 struct bfi_flash_query_req *msg = 3269 (struct bfi_flash_query_req *) flash->mb.msg; 3270 3271 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3272 return BFA_STATUS_IOC_NON_OP; 3273 3274 if (flash->op_busy) 3275 return BFA_STATUS_DEVBUSY; 3276 3277 flash->op_busy = 1; 3278 flash->cbfn = cbfn; 3279 flash->cbarg = cbarg; 3280 flash->ubuf = (u8 *) attr; 3281 3282 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, 3283 bfa_ioc_portid(flash->ioc)); 3284 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); 3285 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 3286 3287 return BFA_STATUS_OK; 3288 } 3289 3290 /** 3291 * bfa_nw_flash_update_part - Update flash partition. 3292 * 3293 * @flash: flash structure 3294 * @type: flash partition type 3295 * @instance: flash partition instance 3296 * @buf: update data buffer 3297 * @len: data buffer length 3298 * @offset: offset relative to the partition starting address 3299 * @cbfn: callback function 3300 * @cbarg: callback argument 3301 * 3302 * Return status. 3303 */ 3304 enum bfa_status 3305 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, 3306 void *buf, u32 len, u32 offset, 3307 bfa_cb_flash cbfn, void *cbarg) 3308 { 3309 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3310 return BFA_STATUS_IOC_NON_OP; 3311 3312 /* 3313 * 'len' must be in word (4-byte) boundary 3314 */ 3315 if (!len || (len & 0x03)) 3316 return BFA_STATUS_FLASH_BAD_LEN; 3317 3318 if (type == BFA_FLASH_PART_MFG) 3319 return BFA_STATUS_EINVAL; 3320 3321 if (flash->op_busy) 3322 return BFA_STATUS_DEVBUSY; 3323 3324 flash->op_busy = 1; 3325 flash->cbfn = cbfn; 3326 flash->cbarg = cbarg; 3327 flash->type = type; 3328 flash->instance = instance; 3329 flash->residue = len; 3330 flash->offset = 0; 3331 flash->addr_off = offset; 3332 flash->ubuf = buf; 3333 3334 bfa_flash_write_send(flash); 3335 3336 return BFA_STATUS_OK; 3337 } 3338 3339 /** 3340 * bfa_nw_flash_read_part - Read flash partition. 3341 * 3342 * @flash: flash structure 3343 * @type: flash partition type 3344 * @instance: flash partition instance 3345 * @buf: read data buffer 3346 * @len: data buffer length 3347 * @offset: offset relative to the partition starting address 3348 * @cbfn: callback function 3349 * @cbarg: callback argument 3350 * 3351 * Return status. 3352 */ 3353 enum bfa_status 3354 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, 3355 void *buf, u32 len, u32 offset, 3356 bfa_cb_flash cbfn, void *cbarg) 3357 { 3358 if (!bfa_nw_ioc_is_operational(flash->ioc)) 3359 return BFA_STATUS_IOC_NON_OP; 3360 3361 /* 3362 * 'len' must be in word (4-byte) boundary 3363 */ 3364 if (!len || (len & 0x03)) 3365 return BFA_STATUS_FLASH_BAD_LEN; 3366 3367 if (flash->op_busy) 3368 return BFA_STATUS_DEVBUSY; 3369 3370 flash->op_busy = 1; 3371 flash->cbfn = cbfn; 3372 flash->cbarg = cbarg; 3373 flash->type = type; 3374 flash->instance = instance; 3375 flash->residue = len; 3376 flash->offset = 0; 3377 flash->addr_off = offset; 3378 flash->ubuf = buf; 3379 3380 bfa_flash_read_send(flash); 3381 3382 return BFA_STATUS_OK; 3383 } 3384