1 /* 2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 3 * Copyright (c) 2014- QLogic Corporation. 4 * All rights reserved 5 * www.qlogic.com 6 * 7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License (GPL) Version 2 as 11 * published by the Free Software Foundation 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include "bfad_drv.h" 20 #include "bfa_modules.h" 21 #include "bfi_reg.h" 22 23 BFA_TRC_FILE(HAL, CORE); 24 25 /* 26 * Message handlers for various modules. 27 */ 28 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 29 bfa_isr_unhandled, /* NONE */ 30 bfa_isr_unhandled, /* BFI_MC_IOC */ 31 bfa_fcdiag_intr, /* BFI_MC_DIAG */ 32 bfa_isr_unhandled, /* BFI_MC_FLASH */ 33 bfa_isr_unhandled, /* BFI_MC_CEE */ 34 bfa_fcport_isr, /* BFI_MC_FCPORT */ 35 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 36 bfa_isr_unhandled, /* BFI_MC_LL */ 37 bfa_uf_isr, /* BFI_MC_UF */ 38 bfa_fcxp_isr, /* BFI_MC_FCXP */ 39 bfa_lps_isr, /* BFI_MC_LPS */ 40 bfa_rport_isr, /* BFI_MC_RPORT */ 41 bfa_itn_isr, /* BFI_MC_ITN */ 42 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ 43 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ 44 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ 45 bfa_ioim_isr, /* BFI_MC_IOIM */ 46 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ 47 bfa_tskim_isr, /* BFI_MC_TSKIM */ 48 bfa_isr_unhandled, /* BFI_MC_SBOOT */ 49 bfa_isr_unhandled, /* BFI_MC_IPFC */ 50 bfa_isr_unhandled, /* BFI_MC_PORT */ 51 bfa_isr_unhandled, /* --------- */ 52 bfa_isr_unhandled, /* --------- */ 53 bfa_isr_unhandled, /* --------- */ 54 bfa_isr_unhandled, /* --------- */ 55 bfa_isr_unhandled, /* --------- */ 56 bfa_isr_unhandled, /* --------- */ 57 bfa_isr_unhandled, /* --------- */ 58 bfa_isr_unhandled, /* --------- */ 59 bfa_isr_unhandled, /* --------- */ 60 bfa_isr_unhandled, /* --------- */ 61 }; 62 /* 63 * Message handlers for mailbox command classes 64 */ 65 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 66 NULL, 67 NULL, /* BFI_MC_IOC */ 68 NULL, /* BFI_MC_DIAG */ 69 NULL, /* BFI_MC_FLASH */ 70 NULL, /* BFI_MC_CEE */ 71 NULL, /* BFI_MC_PORT */ 72 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 73 NULL, 74 }; 75 76 77 78 void 79 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) 80 { 81 int tail = trcm->tail; 82 struct bfa_trc_s *trc = &trcm->trc[tail]; 83 84 if (trcm->stopped) 85 return; 86 87 trc->fileno = (u16) fileno; 88 trc->line = (u16) line; 89 trc->data.u64 = data; 90 trc->timestamp = BFA_TRC_TS(trcm); 91 92 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); 93 if (trcm->tail == trcm->head) 94 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); 95 } 96 97 static void 98 bfa_com_port_attach(struct bfa_s *bfa) 99 { 100 struct bfa_port_s *port = &bfa->modules.port; 101 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 102 103 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); 104 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); 105 } 106 107 /* 108 * ablk module attach 109 */ 110 static void 111 bfa_com_ablk_attach(struct bfa_s *bfa) 112 { 113 struct bfa_ablk_s *ablk = &bfa->modules.ablk; 114 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 115 116 bfa_ablk_attach(ablk, &bfa->ioc); 117 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); 118 } 119 120 static void 121 bfa_com_cee_attach(struct bfa_s *bfa) 122 { 123 struct bfa_cee_s *cee = &bfa->modules.cee; 124 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 125 126 cee->trcmod = bfa->trcmod; 127 bfa_cee_attach(cee, &bfa->ioc, bfa); 128 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); 129 } 130 131 static void 132 bfa_com_sfp_attach(struct bfa_s *bfa) 133 { 134 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); 135 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 136 137 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); 138 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); 139 } 140 141 static void 142 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 143 { 144 struct bfa_flash_s *flash = BFA_FLASH(bfa); 145 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 146 147 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); 148 bfa_flash_memclaim(flash, flash_dma->kva_curp, 149 flash_dma->dma_curp, mincfg); 150 } 151 152 static void 153 bfa_com_diag_attach(struct bfa_s *bfa) 154 { 155 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); 156 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 157 158 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); 159 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); 160 } 161 162 static void 163 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 164 { 165 struct bfa_phy_s *phy = BFA_PHY(bfa); 166 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 167 168 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); 169 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); 170 } 171 172 static void 173 bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 174 { 175 struct bfa_fru_s *fru = BFA_FRU(bfa); 176 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); 177 178 bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg); 179 bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg); 180 } 181 182 /* 183 * BFA IOC FC related definitions 184 */ 185 186 /* 187 * IOC local definitions 188 */ 189 #define BFA_IOCFC_TOV 5000 /* msecs */ 190 191 enum { 192 BFA_IOCFC_ACT_NONE = 0, 193 BFA_IOCFC_ACT_INIT = 1, 194 BFA_IOCFC_ACT_STOP = 2, 195 BFA_IOCFC_ACT_DISABLE = 3, 196 BFA_IOCFC_ACT_ENABLE = 4, 197 }; 198 199 #define DEF_CFG_NUM_FABRICS 1 200 #define DEF_CFG_NUM_LPORTS 256 201 #define DEF_CFG_NUM_CQS 4 202 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) 203 #define DEF_CFG_NUM_TSKIM_REQS 128 204 #define DEF_CFG_NUM_FCXP_REQS 64 205 #define DEF_CFG_NUM_UF_BUFS 64 206 #define DEF_CFG_NUM_RPORTS 1024 207 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) 208 #define DEF_CFG_NUM_TINS 256 209 210 #define DEF_CFG_NUM_SGPGS 2048 211 #define DEF_CFG_NUM_REQQ_ELEMS 256 212 #define DEF_CFG_NUM_RSPQ_ELEMS 64 213 #define DEF_CFG_NUM_SBOOT_TGTS 16 214 #define DEF_CFG_NUM_SBOOT_LUNS 16 215 216 /* 217 * IOCFC state machine definitions/declarations 218 */ 219 bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); 220 bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); 221 bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); 222 bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, 223 struct bfa_iocfc_s, enum iocfc_event); 224 bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, 225 struct bfa_iocfc_s, enum iocfc_event); 226 bfa_fsm_state_decl(bfa_iocfc, operational, 227 struct bfa_iocfc_s, enum iocfc_event); 228 bfa_fsm_state_decl(bfa_iocfc, dconf_write, 229 struct bfa_iocfc_s, enum iocfc_event); 230 bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); 231 bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); 232 bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); 233 bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); 234 bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); 235 bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); 236 bfa_fsm_state_decl(bfa_iocfc, init_failed, 237 struct bfa_iocfc_s, enum iocfc_event); 238 239 /* 240 * forward declaration for IOC FC functions 241 */ 242 static void bfa_iocfc_start_submod(struct bfa_s *bfa); 243 static void bfa_iocfc_disable_submod(struct bfa_s *bfa); 244 static void bfa_iocfc_send_cfg(void *bfa_arg); 245 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 246 static void bfa_iocfc_disable_cbfn(void *bfa_arg); 247 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 248 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 249 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 250 static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); 251 static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); 252 static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); 253 static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); 254 255 static void 256 bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) 257 { 258 } 259 260 static void 261 bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 262 { 263 bfa_trc(iocfc->bfa, event); 264 265 switch (event) { 266 case IOCFC_E_INIT: 267 case IOCFC_E_ENABLE: 268 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); 269 break; 270 default: 271 bfa_sm_fault(iocfc->bfa, event); 272 break; 273 } 274 } 275 276 static void 277 bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) 278 { 279 bfa_ioc_enable(&iocfc->bfa->ioc); 280 } 281 282 static void 283 bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 284 { 285 bfa_trc(iocfc->bfa, event); 286 287 switch (event) { 288 case IOCFC_E_IOC_ENABLED: 289 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); 290 break; 291 292 case IOCFC_E_DISABLE: 293 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 294 break; 295 296 case IOCFC_E_STOP: 297 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 298 break; 299 300 case IOCFC_E_IOC_FAILED: 301 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 302 break; 303 default: 304 bfa_sm_fault(iocfc->bfa, event); 305 break; 306 } 307 } 308 309 static void 310 bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) 311 { 312 bfa_dconf_modinit(iocfc->bfa); 313 } 314 315 static void 316 bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 317 { 318 bfa_trc(iocfc->bfa, event); 319 320 switch (event) { 321 case IOCFC_E_DCONF_DONE: 322 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); 323 break; 324 325 case IOCFC_E_DISABLE: 326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 327 break; 328 329 case IOCFC_E_STOP: 330 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 331 break; 332 333 case IOCFC_E_IOC_FAILED: 334 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 335 break; 336 default: 337 bfa_sm_fault(iocfc->bfa, event); 338 break; 339 } 340 } 341 342 static void 343 bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) 344 { 345 bfa_iocfc_send_cfg(iocfc->bfa); 346 } 347 348 static void 349 bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 350 { 351 bfa_trc(iocfc->bfa, event); 352 353 switch (event) { 354 case IOCFC_E_CFG_DONE: 355 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); 356 break; 357 358 case IOCFC_E_DISABLE: 359 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 360 break; 361 362 case IOCFC_E_STOP: 363 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 364 break; 365 366 case IOCFC_E_IOC_FAILED: 367 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 368 break; 369 default: 370 bfa_sm_fault(iocfc->bfa, event); 371 break; 372 } 373 } 374 375 static void 376 bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) 377 { 378 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 379 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, 380 bfa_iocfc_init_cb, iocfc->bfa); 381 } 382 383 static void 384 bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 385 { 386 bfa_trc(iocfc->bfa, event); 387 388 switch (event) { 389 case IOCFC_E_START: 390 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); 391 break; 392 case IOCFC_E_STOP: 393 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 394 break; 395 case IOCFC_E_DISABLE: 396 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 397 break; 398 case IOCFC_E_IOC_FAILED: 399 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 400 break; 401 default: 402 bfa_sm_fault(iocfc->bfa, event); 403 break; 404 } 405 } 406 407 static void 408 bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) 409 { 410 bfa_fcport_init(iocfc->bfa); 411 bfa_iocfc_start_submod(iocfc->bfa); 412 } 413 414 static void 415 bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 416 { 417 bfa_trc(iocfc->bfa, event); 418 419 switch (event) { 420 case IOCFC_E_STOP: 421 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 422 break; 423 case IOCFC_E_DISABLE: 424 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 425 break; 426 case IOCFC_E_IOC_FAILED: 427 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 428 break; 429 default: 430 bfa_sm_fault(iocfc->bfa, event); 431 break; 432 } 433 } 434 435 static void 436 bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) 437 { 438 bfa_dconf_modexit(iocfc->bfa); 439 } 440 441 static void 442 bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 443 { 444 bfa_trc(iocfc->bfa, event); 445 446 switch (event) { 447 case IOCFC_E_DCONF_DONE: 448 case IOCFC_E_IOC_FAILED: 449 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 450 break; 451 default: 452 bfa_sm_fault(iocfc->bfa, event); 453 break; 454 } 455 } 456 457 static void 458 bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) 459 { 460 bfa_ioc_disable(&iocfc->bfa->ioc); 461 } 462 463 static void 464 bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 465 { 466 bfa_trc(iocfc->bfa, event); 467 468 switch (event) { 469 case IOCFC_E_IOC_DISABLED: 470 bfa_isr_disable(iocfc->bfa); 471 bfa_iocfc_disable_submod(iocfc->bfa); 472 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); 473 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 474 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, 475 bfa_iocfc_stop_cb, iocfc->bfa); 476 break; 477 478 case IOCFC_E_IOC_ENABLED: 479 case IOCFC_E_DCONF_DONE: 480 case IOCFC_E_CFG_DONE: 481 break; 482 483 default: 484 bfa_sm_fault(iocfc->bfa, event); 485 break; 486 } 487 } 488 489 static void 490 bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) 491 { 492 bfa_ioc_enable(&iocfc->bfa->ioc); 493 } 494 495 static void 496 bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 497 { 498 bfa_trc(iocfc->bfa, event); 499 500 switch (event) { 501 case IOCFC_E_IOC_ENABLED: 502 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); 503 break; 504 505 case IOCFC_E_DISABLE: 506 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 507 break; 508 509 case IOCFC_E_STOP: 510 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 511 break; 512 513 case IOCFC_E_IOC_FAILED: 514 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 515 516 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 517 break; 518 519 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; 520 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, 521 bfa_iocfc_enable_cb, iocfc->bfa); 522 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 523 break; 524 default: 525 bfa_sm_fault(iocfc->bfa, event); 526 break; 527 } 528 } 529 530 static void 531 bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) 532 { 533 bfa_iocfc_send_cfg(iocfc->bfa); 534 } 535 536 static void 537 bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 538 { 539 bfa_trc(iocfc->bfa, event); 540 541 switch (event) { 542 case IOCFC_E_CFG_DONE: 543 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); 544 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 545 break; 546 547 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 548 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, 549 bfa_iocfc_enable_cb, iocfc->bfa); 550 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 551 break; 552 case IOCFC_E_DISABLE: 553 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 554 break; 555 556 case IOCFC_E_STOP: 557 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 558 break; 559 case IOCFC_E_IOC_FAILED: 560 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 561 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 562 break; 563 564 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; 565 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, 566 bfa_iocfc_enable_cb, iocfc->bfa); 567 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 568 break; 569 default: 570 bfa_sm_fault(iocfc->bfa, event); 571 break; 572 } 573 } 574 575 static void 576 bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) 577 { 578 bfa_ioc_disable(&iocfc->bfa->ioc); 579 } 580 581 static void 582 bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 583 { 584 bfa_trc(iocfc->bfa, event); 585 586 switch (event) { 587 case IOCFC_E_IOC_DISABLED: 588 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); 589 break; 590 case IOCFC_E_IOC_ENABLED: 591 case IOCFC_E_DCONF_DONE: 592 case IOCFC_E_CFG_DONE: 593 break; 594 default: 595 bfa_sm_fault(iocfc->bfa, event); 596 break; 597 } 598 } 599 600 static void 601 bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) 602 { 603 bfa_isr_disable(iocfc->bfa); 604 bfa_iocfc_disable_submod(iocfc->bfa); 605 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 606 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, 607 bfa_iocfc_disable_cb, iocfc->bfa); 608 } 609 610 static void 611 bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 612 { 613 bfa_trc(iocfc->bfa, event); 614 615 switch (event) { 616 case IOCFC_E_STOP: 617 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 618 break; 619 case IOCFC_E_ENABLE: 620 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); 621 break; 622 default: 623 bfa_sm_fault(iocfc->bfa, event); 624 break; 625 } 626 } 627 628 static void 629 bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) 630 { 631 bfa_isr_disable(iocfc->bfa); 632 bfa_iocfc_disable_submod(iocfc->bfa); 633 } 634 635 static void 636 bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 637 { 638 bfa_trc(iocfc->bfa, event); 639 640 switch (event) { 641 case IOCFC_E_STOP: 642 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); 643 break; 644 case IOCFC_E_DISABLE: 645 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); 646 break; 647 case IOCFC_E_IOC_ENABLED: 648 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); 649 break; 650 case IOCFC_E_IOC_FAILED: 651 break; 652 default: 653 bfa_sm_fault(iocfc->bfa, event); 654 break; 655 } 656 } 657 658 static void 659 bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) 660 { 661 bfa_isr_disable(iocfc->bfa); 662 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; 663 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, 664 bfa_iocfc_init_cb, iocfc->bfa); 665 } 666 667 static void 668 bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) 669 { 670 bfa_trc(iocfc->bfa, event); 671 672 switch (event) { 673 case IOCFC_E_STOP: 674 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); 675 break; 676 case IOCFC_E_DISABLE: 677 bfa_ioc_disable(&iocfc->bfa->ioc); 678 break; 679 case IOCFC_E_IOC_ENABLED: 680 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); 681 break; 682 case IOCFC_E_IOC_DISABLED: 683 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); 684 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; 685 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, 686 bfa_iocfc_disable_cb, iocfc->bfa); 687 break; 688 case IOCFC_E_IOC_FAILED: 689 break; 690 default: 691 bfa_sm_fault(iocfc->bfa, event); 692 break; 693 } 694 } 695 696 /* 697 * BFA Interrupt handling functions 698 */ 699 static void 700 bfa_reqq_resume(struct bfa_s *bfa, int qid) 701 { 702 struct list_head *waitq, *qe, *qen; 703 struct bfa_reqq_wait_s *wqe; 704 705 waitq = bfa_reqq(bfa, qid); 706 list_for_each_safe(qe, qen, waitq) { 707 /* 708 * Callback only as long as there is room in request queue 709 */ 710 if (bfa_reqq_full(bfa, qid)) 711 break; 712 713 list_del(qe); 714 wqe = (struct bfa_reqq_wait_s *) qe; 715 wqe->qresume(wqe->cbarg); 716 } 717 } 718 719 bfa_boolean_t 720 bfa_isr_rspq(struct bfa_s *bfa, int qid) 721 { 722 struct bfi_msg_s *m; 723 u32 pi, ci; 724 struct list_head *waitq; 725 bfa_boolean_t ret; 726 727 ci = bfa_rspq_ci(bfa, qid); 728 pi = bfa_rspq_pi(bfa, qid); 729 730 ret = (ci != pi); 731 732 while (ci != pi) { 733 m = bfa_rspq_elem(bfa, qid, ci); 734 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); 735 736 bfa_isrs[m->mhdr.msg_class] (bfa, m); 737 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 738 } 739 740 /* 741 * acknowledge RME completions and update CI 742 */ 743 bfa_isr_rspq_ack(bfa, qid, ci); 744 745 /* 746 * Resume any pending requests in the corresponding reqq. 747 */ 748 waitq = bfa_reqq(bfa, qid); 749 if (!list_empty(waitq)) 750 bfa_reqq_resume(bfa, qid); 751 752 return ret; 753 } 754 755 static inline void 756 bfa_isr_reqq(struct bfa_s *bfa, int qid) 757 { 758 struct list_head *waitq; 759 760 bfa_isr_reqq_ack(bfa, qid); 761 762 /* 763 * Resume any pending requests in the corresponding reqq. 764 */ 765 waitq = bfa_reqq(bfa, qid); 766 if (!list_empty(waitq)) 767 bfa_reqq_resume(bfa, qid); 768 } 769 770 void 771 bfa_msix_all(struct bfa_s *bfa, int vec) 772 { 773 u32 intr, qintr; 774 int queue; 775 776 intr = readl(bfa->iocfc.bfa_regs.intr_status); 777 if (!intr) 778 return; 779 780 /* 781 * RME completion queue interrupt 782 */ 783 qintr = intr & __HFN_INT_RME_MASK; 784 if (qintr && bfa->queue_process) { 785 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 786 bfa_isr_rspq(bfa, queue); 787 } 788 789 intr &= ~qintr; 790 if (!intr) 791 return; 792 793 /* 794 * CPE completion queue interrupt 795 */ 796 qintr = intr & __HFN_INT_CPE_MASK; 797 if (qintr && bfa->queue_process) { 798 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 799 bfa_isr_reqq(bfa, queue); 800 } 801 intr &= ~qintr; 802 if (!intr) 803 return; 804 805 bfa_msix_lpu_err(bfa, intr); 806 } 807 808 bfa_boolean_t 809 bfa_intx(struct bfa_s *bfa) 810 { 811 u32 intr, qintr; 812 int queue; 813 bfa_boolean_t rspq_comp = BFA_FALSE; 814 815 intr = readl(bfa->iocfc.bfa_regs.intr_status); 816 817 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 818 if (qintr) 819 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 820 821 /* 822 * Unconditional RME completion queue interrupt 823 */ 824 if (bfa->queue_process) { 825 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 826 if (bfa_isr_rspq(bfa, queue)) 827 rspq_comp = BFA_TRUE; 828 } 829 830 if (!intr) 831 return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; 832 833 /* 834 * CPE completion queue interrupt 835 */ 836 qintr = intr & __HFN_INT_CPE_MASK; 837 if (qintr && bfa->queue_process) { 838 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 839 bfa_isr_reqq(bfa, queue); 840 } 841 intr &= ~qintr; 842 if (!intr) 843 return BFA_TRUE; 844 845 if (bfa->intr_enabled) 846 bfa_msix_lpu_err(bfa, intr); 847 848 return BFA_TRUE; 849 } 850 851 void 852 bfa_isr_enable(struct bfa_s *bfa) 853 { 854 u32 umsk; 855 int port_id = bfa_ioc_portid(&bfa->ioc); 856 857 bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc)); 858 bfa_trc(bfa, port_id); 859 860 bfa_msix_ctrl_install(bfa); 861 862 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 863 umsk = __HFN_INT_ERR_MASK_CT2; 864 umsk |= port_id == 0 ? 865 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; 866 } else { 867 umsk = __HFN_INT_ERR_MASK; 868 umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; 869 } 870 871 writel(umsk, bfa->iocfc.bfa_regs.intr_status); 872 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); 873 bfa->iocfc.intr_mask = ~umsk; 874 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 875 876 /* 877 * Set the flag indicating successful enabling of interrupts 878 */ 879 bfa->intr_enabled = BFA_TRUE; 880 } 881 882 void 883 bfa_isr_disable(struct bfa_s *bfa) 884 { 885 bfa->intr_enabled = BFA_FALSE; 886 bfa_isr_mode_set(bfa, BFA_FALSE); 887 writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 888 bfa_msix_uninstall(bfa); 889 } 890 891 void 892 bfa_msix_reqq(struct bfa_s *bfa, int vec) 893 { 894 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); 895 } 896 897 void 898 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) 899 { 900 bfa_trc(bfa, m->mhdr.msg_class); 901 bfa_trc(bfa, m->mhdr.msg_id); 902 bfa_trc(bfa, m->mhdr.mtag.i2htok); 903 WARN_ON(1); 904 bfa_trc_stop(bfa->trcmod); 905 } 906 907 void 908 bfa_msix_rspq(struct bfa_s *bfa, int vec) 909 { 910 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); 911 } 912 913 void 914 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 915 { 916 u32 intr, curr_value; 917 bfa_boolean_t lpu_isr, halt_isr, pss_isr; 918 919 intr = readl(bfa->iocfc.bfa_regs.intr_status); 920 921 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 922 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; 923 pss_isr = intr & __HFN_INT_ERR_PSS_CT2; 924 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | 925 __HFN_INT_MBOX_LPU1_CT2); 926 intr &= __HFN_INT_ERR_MASK_CT2; 927 } else { 928 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? 929 (intr & __HFN_INT_LL_HALT) : 0; 930 pss_isr = intr & __HFN_INT_ERR_PSS; 931 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 932 intr &= __HFN_INT_ERR_MASK; 933 } 934 935 if (lpu_isr) 936 bfa_ioc_mbox_isr(&bfa->ioc); 937 938 if (intr) { 939 if (halt_isr) { 940 /* 941 * If LL_HALT bit is set then FW Init Halt LL Port 942 * Register needs to be cleared as well so Interrupt 943 * Status Register will be cleared. 944 */ 945 curr_value = readl(bfa->ioc.ioc_regs.ll_halt); 946 curr_value &= ~__FW_INIT_HALT_P; 947 writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 948 } 949 950 if (pss_isr) { 951 /* 952 * ERR_PSS bit needs to be cleared as well in case 953 * interrups are shared so driver's interrupt handler is 954 * still called even though it is already masked out. 955 */ 956 curr_value = readl( 957 bfa->ioc.ioc_regs.pss_err_status_reg); 958 writel(curr_value, 959 bfa->ioc.ioc_regs.pss_err_status_reg); 960 } 961 962 writel(intr, bfa->iocfc.bfa_regs.intr_status); 963 bfa_ioc_error_isr(&bfa->ioc); 964 } 965 } 966 967 /* 968 * BFA IOC FC related functions 969 */ 970 971 /* 972 * BFA IOC private functions 973 */ 974 975 /* 976 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 977 */ 978 static void 979 bfa_iocfc_send_cfg(void *bfa_arg) 980 { 981 struct bfa_s *bfa = bfa_arg; 982 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 983 struct bfi_iocfc_cfg_req_s cfg_req; 984 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 985 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 986 int i; 987 988 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); 989 bfa_trc(bfa, cfg->fwcfg.num_cqs); 990 991 bfa_iocfc_reset_queues(bfa); 992 993 /* 994 * initialize IOC configuration info 995 */ 996 cfg_info->single_msix_vec = 0; 997 if (bfa->msix.nvecs == 1) 998 cfg_info->single_msix_vec = 1; 999 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 1000 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 1001 cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, 1002 cfg->fwcfg.num_ioim_reqs)); 1003 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 1004 1005 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 1006 /* 1007 * dma map REQ and RSP circular queues and shadow pointers 1008 */ 1009 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 1010 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], 1011 iocfc->req_cq_ba[i].pa); 1012 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 1013 iocfc->req_cq_shadow_ci[i].pa); 1014 cfg_info->req_cq_elems[i] = 1015 cpu_to_be16(cfg->drvcfg.num_reqq_elems); 1016 1017 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 1018 iocfc->rsp_cq_ba[i].pa); 1019 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 1020 iocfc->rsp_cq_shadow_pi[i].pa); 1021 cfg_info->rsp_cq_elems[i] = 1022 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 1023 } 1024 1025 /* 1026 * Enable interrupt coalescing if it is driver init path 1027 * and not ioc disable/enable path. 1028 */ 1029 if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) 1030 cfg_info->intr_attr.coalesce = BFA_TRUE; 1031 1032 /* 1033 * dma map IOC configuration itself 1034 */ 1035 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 1036 bfa_fn_lpu(bfa)); 1037 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 1038 1039 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 1040 sizeof(struct bfi_iocfc_cfg_req_s)); 1041 } 1042 1043 static void 1044 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1045 struct bfa_pcidev_s *pcidev) 1046 { 1047 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1048 1049 bfa->bfad = bfad; 1050 iocfc->bfa = bfa; 1051 iocfc->cfg = *cfg; 1052 1053 /* 1054 * Initialize chip specific handlers. 1055 */ 1056 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { 1057 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 1058 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 1059 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 1060 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 1061 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; 1062 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; 1063 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 1064 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 1065 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 1066 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 1067 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; 1068 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; 1069 } else { 1070 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 1071 iocfc->hwif.hw_reqq_ack = NULL; 1072 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 1073 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 1074 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 1075 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 1076 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 1077 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 1078 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 1079 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 1080 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + 1081 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 1082 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + 1083 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 1084 } 1085 1086 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 1087 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 1088 iocfc->hwif.hw_isr_mode_set = NULL; 1089 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; 1090 } 1091 1092 iocfc->hwif.hw_reginit(bfa); 1093 bfa->msix.nvecs = 0; 1094 } 1095 1096 static void 1097 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) 1098 { 1099 u8 *dm_kva = NULL; 1100 u64 dm_pa = 0; 1101 int i, per_reqq_sz, per_rspq_sz; 1102 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1103 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1104 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1105 struct bfa_mem_dma_s *reqq_dma, *rspq_dma; 1106 1107 /* First allocate dma memory for IOC */ 1108 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), 1109 bfa_mem_dma_phys(ioc_dma)); 1110 1111 /* Claim DMA-able memory for the request/response queues */ 1112 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 1113 BFA_DMA_ALIGN_SZ); 1114 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 1115 BFA_DMA_ALIGN_SZ); 1116 1117 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 1118 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); 1119 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); 1120 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); 1121 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); 1122 1123 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); 1124 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); 1125 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); 1126 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); 1127 } 1128 1129 /* Claim IOCFC dma memory - for shadow CI/PI */ 1130 dm_kva = bfa_mem_dma_virt(iocfc_dma); 1131 dm_pa = bfa_mem_dma_phys(iocfc_dma); 1132 1133 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 1134 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 1135 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 1136 dm_kva += BFA_CACHELINE_SZ; 1137 dm_pa += BFA_CACHELINE_SZ; 1138 1139 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; 1140 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; 1141 dm_kva += BFA_CACHELINE_SZ; 1142 dm_pa += BFA_CACHELINE_SZ; 1143 } 1144 1145 /* Claim IOCFC dma memory - for the config info page */ 1146 bfa->iocfc.cfg_info.kva = dm_kva; 1147 bfa->iocfc.cfg_info.pa = dm_pa; 1148 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 1149 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1150 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1151 1152 /* Claim IOCFC dma memory - for the config response */ 1153 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 1154 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 1155 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 1156 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1157 BFA_CACHELINE_SZ); 1158 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1159 BFA_CACHELINE_SZ); 1160 1161 /* Claim IOCFC kva memory */ 1162 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); 1163 bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN; 1164 } 1165 1166 /* 1167 * Start BFA submodules. 1168 */ 1169 static void 1170 bfa_iocfc_start_submod(struct bfa_s *bfa) 1171 { 1172 int i; 1173 1174 bfa->queue_process = BFA_TRUE; 1175 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1176 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); 1177 1178 bfa_fcport_start(bfa); 1179 bfa_uf_start(bfa); 1180 /* 1181 * bfa_init() with flash read is complete. now invalidate the stale 1182 * content of lun mask like unit attention, rp tag and lp tag. 1183 */ 1184 bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa); 1185 1186 bfa->iocfc.submod_enabled = BFA_TRUE; 1187 } 1188 1189 /* 1190 * Disable BFA submodules. 1191 */ 1192 static void 1193 bfa_iocfc_disable_submod(struct bfa_s *bfa) 1194 { 1195 if (bfa->iocfc.submod_enabled == BFA_FALSE) 1196 return; 1197 1198 bfa_fcdiag_iocdisable(bfa); 1199 bfa_fcport_iocdisable(bfa); 1200 bfa_fcxp_iocdisable(bfa); 1201 bfa_lps_iocdisable(bfa); 1202 bfa_rport_iocdisable(bfa); 1203 bfa_fcp_iocdisable(bfa); 1204 bfa_dconf_iocdisable(bfa); 1205 1206 bfa->iocfc.submod_enabled = BFA_FALSE; 1207 } 1208 1209 static void 1210 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) 1211 { 1212 struct bfa_s *bfa = bfa_arg; 1213 1214 if (complete) 1215 bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); 1216 } 1217 1218 static void 1219 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) 1220 { 1221 struct bfa_s *bfa = bfa_arg; 1222 struct bfad_s *bfad = bfa->bfad; 1223 1224 if (compl) 1225 complete(&bfad->comp); 1226 } 1227 1228 static void 1229 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) 1230 { 1231 struct bfa_s *bfa = bfa_arg; 1232 struct bfad_s *bfad = bfa->bfad; 1233 1234 if (compl) 1235 complete(&bfad->enable_comp); 1236 } 1237 1238 static void 1239 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 1240 { 1241 struct bfa_s *bfa = bfa_arg; 1242 struct bfad_s *bfad = bfa->bfad; 1243 1244 if (compl) 1245 complete(&bfad->disable_comp); 1246 } 1247 1248 /** 1249 * configure queue registers from firmware response 1250 */ 1251 static void 1252 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) 1253 { 1254 int i; 1255 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; 1256 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 1257 1258 for (i = 0; i < BFI_IOC_MAX_CQS; i++) { 1259 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; 1260 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); 1261 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); 1262 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); 1263 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); 1264 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); 1265 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); 1266 } 1267 } 1268 1269 static void 1270 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 1271 { 1272 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1273 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 1274 1275 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 1276 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 1277 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 1278 bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), 1279 fwcfg->num_ioim_reqs); 1280 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 1281 } 1282 1283 /* 1284 * Update BFA configuration from firmware configuration. 1285 */ 1286 static void 1287 bfa_iocfc_cfgrsp(struct bfa_s *bfa) 1288 { 1289 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1290 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1291 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 1292 1293 fwcfg->num_cqs = fwcfg->num_cqs; 1294 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 1295 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); 1296 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 1297 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 1298 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 1299 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 1300 1301 /* 1302 * configure queue register offsets as learnt from firmware 1303 */ 1304 bfa_iocfc_qreg(bfa, &cfgrsp->qreg); 1305 1306 /* 1307 * Re-configure resources as learnt from Firmware 1308 */ 1309 bfa_iocfc_res_recfg(bfa, fwcfg); 1310 1311 /* 1312 * Install MSIX queue handlers 1313 */ 1314 bfa_msix_queue_install(bfa); 1315 1316 if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { 1317 bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; 1318 bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; 1319 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); 1320 } 1321 } 1322 1323 void 1324 bfa_iocfc_reset_queues(struct bfa_s *bfa) 1325 { 1326 int q; 1327 1328 for (q = 0; q < BFI_IOC_MAX_CQS; q++) { 1329 bfa_reqq_ci(bfa, q) = 0; 1330 bfa_reqq_pi(bfa, q) = 0; 1331 bfa_rspq_ci(bfa, q) = 0; 1332 bfa_rspq_pi(bfa, q) = 0; 1333 } 1334 } 1335 1336 /* 1337 * Process FAA pwwn msg from fw. 1338 */ 1339 static void 1340 bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) 1341 { 1342 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1343 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1344 1345 cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; 1346 cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; 1347 1348 bfa->ioc.attr->pwwn = msg->pwwn; 1349 bfa->ioc.attr->nwwn = msg->nwwn; 1350 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); 1351 } 1352 1353 /* Fabric Assigned Address specific functions */ 1354 1355 /* 1356 * Check whether IOC is ready before sending command down 1357 */ 1358 static bfa_status_t 1359 bfa_faa_validate_request(struct bfa_s *bfa) 1360 { 1361 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); 1362 u32 card_type = bfa->ioc.attr->card_type; 1363 1364 if (bfa_ioc_is_operational(&bfa->ioc)) { 1365 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) 1366 return BFA_STATUS_FEATURE_NOT_SUPPORTED; 1367 } else { 1368 return BFA_STATUS_IOC_NON_OP; 1369 } 1370 1371 return BFA_STATUS_OK; 1372 } 1373 1374 bfa_status_t 1375 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, 1376 bfa_cb_iocfc_t cbfn, void *cbarg) 1377 { 1378 struct bfi_faa_query_s faa_attr_req; 1379 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1380 bfa_status_t status; 1381 1382 status = bfa_faa_validate_request(bfa); 1383 if (status != BFA_STATUS_OK) 1384 return status; 1385 1386 if (iocfc->faa_args.busy == BFA_TRUE) 1387 return BFA_STATUS_DEVBUSY; 1388 1389 iocfc->faa_args.faa_attr = attr; 1390 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 1391 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 1392 1393 iocfc->faa_args.busy = BFA_TRUE; 1394 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); 1395 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, 1396 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); 1397 1398 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, 1399 sizeof(struct bfi_faa_query_s)); 1400 1401 return BFA_STATUS_OK; 1402 } 1403 1404 /* 1405 * FAA query response 1406 */ 1407 static void 1408 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, 1409 bfi_faa_query_rsp_t *rsp) 1410 { 1411 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 1412 1413 if (iocfc->faa_args.faa_attr) { 1414 iocfc->faa_args.faa_attr->faa = rsp->faa; 1415 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; 1416 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; 1417 } 1418 1419 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 1420 1421 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); 1422 iocfc->faa_args.busy = BFA_FALSE; 1423 } 1424 1425 /* 1426 * IOC enable request is complete 1427 */ 1428 static void 1429 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) 1430 { 1431 struct bfa_s *bfa = bfa_arg; 1432 1433 if (status == BFA_STATUS_OK) 1434 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); 1435 else 1436 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); 1437 } 1438 1439 /* 1440 * IOC disable request is complete 1441 */ 1442 static void 1443 bfa_iocfc_disable_cbfn(void *bfa_arg) 1444 { 1445 struct bfa_s *bfa = bfa_arg; 1446 1447 bfa->queue_process = BFA_FALSE; 1448 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); 1449 } 1450 1451 /* 1452 * Notify sub-modules of hardware failure. 1453 */ 1454 static void 1455 bfa_iocfc_hbfail_cbfn(void *bfa_arg) 1456 { 1457 struct bfa_s *bfa = bfa_arg; 1458 1459 bfa->queue_process = BFA_FALSE; 1460 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); 1461 } 1462 1463 /* 1464 * Actions on chip-reset completion. 1465 */ 1466 static void 1467 bfa_iocfc_reset_cbfn(void *bfa_arg) 1468 { 1469 struct bfa_s *bfa = bfa_arg; 1470 1471 bfa_iocfc_reset_queues(bfa); 1472 bfa_isr_enable(bfa); 1473 } 1474 1475 /* 1476 * Query IOC memory requirement information. 1477 */ 1478 void 1479 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1480 struct bfa_s *bfa) 1481 { 1482 int q, per_reqq_sz, per_rspq_sz; 1483 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1484 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1485 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); 1486 u32 dm_len = 0; 1487 1488 /* dma memory setup for IOC */ 1489 bfa_mem_dma_setup(meminfo, ioc_dma, 1490 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); 1491 1492 /* dma memory setup for REQ/RSP queues */ 1493 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 1494 BFA_DMA_ALIGN_SZ); 1495 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 1496 BFA_DMA_ALIGN_SZ); 1497 1498 for (q = 0; q < cfg->fwcfg.num_cqs; q++) { 1499 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), 1500 per_reqq_sz); 1501 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), 1502 per_rspq_sz); 1503 } 1504 1505 /* IOCFC dma memory - calculate Shadow CI/PI size */ 1506 for (q = 0; q < cfg->fwcfg.num_cqs; q++) 1507 dm_len += (2 * BFA_CACHELINE_SZ); 1508 1509 /* IOCFC dma memory - calculate config info / rsp size */ 1510 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1511 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1512 BFA_CACHELINE_SZ); 1513 1514 /* dma memory setup for IOCFC */ 1515 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); 1516 1517 /* kva memory setup for IOCFC */ 1518 bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN); 1519 } 1520 1521 /* 1522 * Query IOC memory requirement information. 1523 */ 1524 void 1525 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1526 struct bfa_pcidev_s *pcidev) 1527 { 1528 int i; 1529 struct bfa_ioc_s *ioc = &bfa->ioc; 1530 1531 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; 1532 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; 1533 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; 1534 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; 1535 1536 ioc->trcmod = bfa->trcmod; 1537 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 1538 1539 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); 1540 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 1541 1542 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 1543 bfa_iocfc_mem_claim(bfa, cfg); 1544 INIT_LIST_HEAD(&bfa->timer_mod.timer_q); 1545 1546 INIT_LIST_HEAD(&bfa->comp_q); 1547 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1548 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 1549 1550 bfa->iocfc.cb_reqd = BFA_FALSE; 1551 bfa->iocfc.op_status = BFA_STATUS_OK; 1552 bfa->iocfc.submod_enabled = BFA_FALSE; 1553 1554 bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); 1555 } 1556 1557 /* 1558 * Query IOC memory requirement information. 1559 */ 1560 void 1561 bfa_iocfc_init(struct bfa_s *bfa) 1562 { 1563 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); 1564 } 1565 1566 /* 1567 * IOC start called from bfa_start(). Called to start IOC operations 1568 * at driver instantiation for this instance. 1569 */ 1570 void 1571 bfa_iocfc_start(struct bfa_s *bfa) 1572 { 1573 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); 1574 } 1575 1576 /* 1577 * IOC stop called from bfa_stop(). Called only when driver is unloaded 1578 * for this instance. 1579 */ 1580 void 1581 bfa_iocfc_stop(struct bfa_s *bfa) 1582 { 1583 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); 1584 } 1585 1586 void 1587 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) 1588 { 1589 struct bfa_s *bfa = bfaarg; 1590 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1591 union bfi_iocfc_i2h_msg_u *msg; 1592 1593 msg = (union bfi_iocfc_i2h_msg_u *) m; 1594 bfa_trc(bfa, msg->mh.msg_id); 1595 1596 switch (msg->mh.msg_id) { 1597 case BFI_IOCFC_I2H_CFG_REPLY: 1598 bfa_iocfc_cfgrsp(bfa); 1599 break; 1600 case BFI_IOCFC_I2H_UPDATEQ_RSP: 1601 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 1602 break; 1603 case BFI_IOCFC_I2H_ADDR_MSG: 1604 bfa_iocfc_process_faa_addr(bfa, 1605 (struct bfi_faa_addr_msg_s *)msg); 1606 break; 1607 case BFI_IOCFC_I2H_FAA_QUERY_RSP: 1608 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); 1609 break; 1610 default: 1611 WARN_ON(1); 1612 } 1613 } 1614 1615 void 1616 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 1617 { 1618 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1619 1620 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 1621 1622 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 1623 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : 1624 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); 1625 1626 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 1627 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : 1628 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); 1629 1630 attr->config = iocfc->cfg; 1631 } 1632 1633 bfa_status_t 1634 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) 1635 { 1636 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1637 struct bfi_iocfc_set_intr_req_s *m; 1638 1639 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 1640 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); 1641 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); 1642 1643 if (!bfa_iocfc_is_operational(bfa)) 1644 return BFA_STATUS_OK; 1645 1646 m = bfa_reqq_next(bfa, BFA_REQQ_IOC); 1647 if (!m) 1648 return BFA_STATUS_DEVBUSY; 1649 1650 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 1651 bfa_fn_lpu(bfa)); 1652 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 1653 m->delay = iocfc->cfginfo->intr_attr.delay; 1654 m->latency = iocfc->cfginfo->intr_attr.latency; 1655 1656 bfa_trc(bfa, attr->delay); 1657 bfa_trc(bfa, attr->latency); 1658 1659 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); 1660 return BFA_STATUS_OK; 1661 } 1662 1663 void 1664 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) 1665 { 1666 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1667 1668 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 1669 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); 1670 } 1671 /* 1672 * Enable IOC after it is disabled. 1673 */ 1674 void 1675 bfa_iocfc_enable(struct bfa_s *bfa) 1676 { 1677 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1678 "IOC Enable"); 1679 bfa->iocfc.cb_reqd = BFA_TRUE; 1680 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); 1681 } 1682 1683 void 1684 bfa_iocfc_disable(struct bfa_s *bfa) 1685 { 1686 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1687 "IOC Disable"); 1688 1689 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); 1690 } 1691 1692 bfa_boolean_t 1693 bfa_iocfc_is_operational(struct bfa_s *bfa) 1694 { 1695 return bfa_ioc_is_operational(&bfa->ioc) && 1696 bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); 1697 } 1698 1699 /* 1700 * Return boot target port wwns -- read from boot information in flash. 1701 */ 1702 void 1703 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) 1704 { 1705 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1706 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1707 int i; 1708 1709 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { 1710 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); 1711 *nwwns = cfgrsp->pbc_cfg.nbluns; 1712 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) 1713 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; 1714 1715 return; 1716 } 1717 1718 *nwwns = cfgrsp->bootwwns.nwwns; 1719 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 1720 } 1721 1722 int 1723 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 1724 { 1725 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1726 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1727 1728 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); 1729 return cfgrsp->pbc_cfg.nvports; 1730 } 1731 1732 1733 /* 1734 * Use this function query the memory requirement of the BFA library. 1735 * This function needs to be called before bfa_attach() to get the 1736 * memory required of the BFA layer for a given driver configuration. 1737 * 1738 * This call will fail, if the cap is out of range compared to pre-defined 1739 * values within the BFA library 1740 * 1741 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1742 * its configuration in this structure. 1743 * The default values for struct bfa_iocfc_cfg_s can be 1744 * fetched using bfa_cfg_get_default() API. 1745 * 1746 * If cap's boundary check fails, the library will use 1747 * the default bfa_cap_t values (and log a warning msg). 1748 * 1749 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1750 * indicates the memory type (see bfa_mem_type_t) and 1751 * amount of memory required. 1752 * 1753 * Driver should allocate the memory, populate the 1754 * starting address for each block and provide the same 1755 * structure as input parameter to bfa_attach() call. 1756 * 1757 * @param[in] bfa - pointer to the bfa structure, used while fetching the 1758 * dma, kva memory information of the bfa sub-modules. 1759 * 1760 * @return void 1761 * 1762 * Special Considerations: @note 1763 */ 1764 void 1765 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1766 struct bfa_s *bfa) 1767 { 1768 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 1769 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 1770 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 1771 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 1772 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 1773 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 1774 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 1775 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); 1776 1777 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1778 1779 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1780 1781 /* Initialize the DMA & KVA meminfo queues */ 1782 INIT_LIST_HEAD(&meminfo->dma_info.qe); 1783 INIT_LIST_HEAD(&meminfo->kva_info.qe); 1784 1785 bfa_iocfc_meminfo(cfg, meminfo, bfa); 1786 bfa_sgpg_meminfo(cfg, meminfo, bfa); 1787 bfa_fcport_meminfo(cfg, meminfo, bfa); 1788 bfa_fcxp_meminfo(cfg, meminfo, bfa); 1789 bfa_lps_meminfo(cfg, meminfo, bfa); 1790 bfa_uf_meminfo(cfg, meminfo, bfa); 1791 bfa_rport_meminfo(cfg, meminfo, bfa); 1792 bfa_fcp_meminfo(cfg, meminfo, bfa); 1793 bfa_dconf_meminfo(cfg, meminfo, bfa); 1794 1795 /* dma info setup */ 1796 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); 1797 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); 1798 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); 1799 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); 1800 bfa_mem_dma_setup(meminfo, flash_dma, 1801 bfa_flash_meminfo(cfg->drvcfg.min_cfg)); 1802 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); 1803 bfa_mem_dma_setup(meminfo, phy_dma, 1804 bfa_phy_meminfo(cfg->drvcfg.min_cfg)); 1805 bfa_mem_dma_setup(meminfo, fru_dma, 1806 bfa_fru_meminfo(cfg->drvcfg.min_cfg)); 1807 } 1808 1809 /* 1810 * Use this function to do attach the driver instance with the BFA 1811 * library. This function will not trigger any HW initialization 1812 * process (which will be done in bfa_init() call) 1813 * 1814 * This call will fail, if the cap is out of range compared to 1815 * pre-defined values within the BFA library 1816 * 1817 * @param[out] bfa Pointer to bfa_t. 1818 * @param[in] bfad Opaque handle back to the driver's IOC structure 1819 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1820 * that was used in bfa_cfg_get_meminfo(). 1821 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1822 * use the bfa_cfg_get_meminfo() call to 1823 * find the memory blocks required, allocate the 1824 * required memory and provide the starting addresses. 1825 * @param[in] pcidev pointer to struct bfa_pcidev_s 1826 * 1827 * @return 1828 * void 1829 * 1830 * Special Considerations: 1831 * 1832 * @note 1833 * 1834 */ 1835 void 1836 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1837 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1838 { 1839 struct bfa_mem_dma_s *dma_info, *dma_elem; 1840 struct bfa_mem_kva_s *kva_info, *kva_elem; 1841 struct list_head *dm_qe, *km_qe; 1842 1843 bfa->fcs = BFA_FALSE; 1844 1845 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1846 1847 /* Initialize memory pointers for iterative allocation */ 1848 dma_info = &meminfo->dma_info; 1849 dma_info->kva_curp = dma_info->kva; 1850 dma_info->dma_curp = dma_info->dma; 1851 1852 kva_info = &meminfo->kva_info; 1853 kva_info->kva_curp = kva_info->kva; 1854 1855 list_for_each(dm_qe, &dma_info->qe) { 1856 dma_elem = (struct bfa_mem_dma_s *) dm_qe; 1857 dma_elem->kva_curp = dma_elem->kva; 1858 dma_elem->dma_curp = dma_elem->dma; 1859 } 1860 1861 list_for_each(km_qe, &kva_info->qe) { 1862 kva_elem = (struct bfa_mem_kva_s *) km_qe; 1863 kva_elem->kva_curp = kva_elem->kva; 1864 } 1865 1866 bfa_iocfc_attach(bfa, bfad, cfg, pcidev); 1867 bfa_fcdiag_attach(bfa, bfad, cfg, pcidev); 1868 bfa_sgpg_attach(bfa, bfad, cfg, pcidev); 1869 bfa_fcport_attach(bfa, bfad, cfg, pcidev); 1870 bfa_fcxp_attach(bfa, bfad, cfg, pcidev); 1871 bfa_lps_attach(bfa, bfad, cfg, pcidev); 1872 bfa_uf_attach(bfa, bfad, cfg, pcidev); 1873 bfa_rport_attach(bfa, bfad, cfg, pcidev); 1874 bfa_fcp_attach(bfa, bfad, cfg, pcidev); 1875 bfa_dconf_attach(bfa, bfad, cfg); 1876 bfa_com_port_attach(bfa); 1877 bfa_com_ablk_attach(bfa); 1878 bfa_com_cee_attach(bfa); 1879 bfa_com_sfp_attach(bfa); 1880 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); 1881 bfa_com_diag_attach(bfa); 1882 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); 1883 bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg); 1884 } 1885 1886 /* 1887 * Use this function to delete a BFA IOC. IOC should be stopped (by 1888 * calling bfa_stop()) before this function call. 1889 * 1890 * @param[in] bfa - pointer to bfa_t. 1891 * 1892 * @return 1893 * void 1894 * 1895 * Special Considerations: 1896 * 1897 * @note 1898 */ 1899 void 1900 bfa_detach(struct bfa_s *bfa) 1901 { 1902 bfa_ioc_detach(&bfa->ioc); 1903 } 1904 1905 void 1906 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) 1907 { 1908 INIT_LIST_HEAD(comp_q); 1909 list_splice_tail_init(&bfa->comp_q, comp_q); 1910 } 1911 1912 void 1913 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1914 { 1915 struct list_head *qe; 1916 struct list_head *qen; 1917 struct bfa_cb_qe_s *hcb_qe; 1918 bfa_cb_cbfn_status_t cbfn; 1919 1920 list_for_each_safe(qe, qen, comp_q) { 1921 hcb_qe = (struct bfa_cb_qe_s *) qe; 1922 if (hcb_qe->pre_rmv) { 1923 /* qe is invalid after return, dequeue before cbfn() */ 1924 list_del(qe); 1925 cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); 1926 cbfn(hcb_qe->cbarg, hcb_qe->fw_status); 1927 } else 1928 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1929 } 1930 } 1931 1932 void 1933 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1934 { 1935 struct list_head *qe; 1936 struct bfa_cb_qe_s *hcb_qe; 1937 1938 while (!list_empty(comp_q)) { 1939 bfa_q_deq(comp_q, &qe); 1940 hcb_qe = (struct bfa_cb_qe_s *) qe; 1941 WARN_ON(hcb_qe->pre_rmv); 1942 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1943 } 1944 } 1945 1946 /* 1947 * Return the list of PCI vendor/device id lists supported by this 1948 * BFA instance. 1949 */ 1950 void 1951 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) 1952 { 1953 static struct bfa_pciid_s __pciids[] = { 1954 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, 1955 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, 1956 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, 1957 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1958 }; 1959 1960 *npciids = ARRAY_SIZE(__pciids); 1961 *pciids = __pciids; 1962 } 1963 1964 /* 1965 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1966 * into BFA layer). The OS driver can then turn back and overwrite entries that 1967 * have been configured by the user. 1968 * 1969 * @param[in] cfg - pointer to bfa_ioc_cfg_t 1970 * 1971 * @return 1972 * void 1973 * 1974 * Special Considerations: 1975 * note 1976 */ 1977 void 1978 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1979 { 1980 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; 1981 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; 1982 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; 1983 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; 1984 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; 1985 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1986 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1987 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1988 cfg->fwcfg.num_fwtio_reqs = 0; 1989 1990 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1991 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1992 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; 1993 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; 1994 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; 1995 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; 1996 cfg->drvcfg.ioc_recover = BFA_FALSE; 1997 cfg->drvcfg.delay_comp = BFA_FALSE; 1998 1999 } 2000 2001 void 2002 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) 2003 { 2004 bfa_cfg_get_default(cfg); 2005 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; 2006 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; 2007 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; 2008 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; 2009 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 2010 cfg->fwcfg.num_fwtio_reqs = 0; 2011 2012 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 2013 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 2014 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 2015 cfg->drvcfg.min_cfg = BFA_TRUE; 2016 } 2017