1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include "bfad_drv.h" 19 #include "bfa_modules.h" 20 #include "bfi_reg.h" 21 22 BFA_TRC_FILE(HAL, CORE); 23 24 /* 25 * BFA module list terminated by NULL 26 */ 27 static struct bfa_module_s *hal_mods[] = { 28 &hal_mod_fcdiag, 29 &hal_mod_sgpg, 30 &hal_mod_fcport, 31 &hal_mod_fcxp, 32 &hal_mod_lps, 33 &hal_mod_uf, 34 &hal_mod_rport, 35 &hal_mod_fcp, 36 NULL 37 }; 38 39 /* 40 * Message handlers for various modules. 41 */ 42 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 43 bfa_isr_unhandled, /* NONE */ 44 bfa_isr_unhandled, /* BFI_MC_IOC */ 45 bfa_fcdiag_intr, /* BFI_MC_DIAG */ 46 bfa_isr_unhandled, /* BFI_MC_FLASH */ 47 bfa_isr_unhandled, /* BFI_MC_CEE */ 48 bfa_fcport_isr, /* BFI_MC_FCPORT */ 49 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 50 bfa_isr_unhandled, /* BFI_MC_LL */ 51 bfa_uf_isr, /* BFI_MC_UF */ 52 bfa_fcxp_isr, /* BFI_MC_FCXP */ 53 bfa_lps_isr, /* BFI_MC_LPS */ 54 bfa_rport_isr, /* BFI_MC_RPORT */ 55 bfa_itn_isr, /* BFI_MC_ITN */ 56 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ 57 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ 58 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ 59 bfa_ioim_isr, /* BFI_MC_IOIM */ 60 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ 61 bfa_tskim_isr, /* BFI_MC_TSKIM */ 62 bfa_isr_unhandled, /* BFI_MC_SBOOT */ 63 bfa_isr_unhandled, /* BFI_MC_IPFC */ 64 bfa_isr_unhandled, /* BFI_MC_PORT */ 65 bfa_isr_unhandled, /* --------- */ 66 bfa_isr_unhandled, /* --------- */ 67 bfa_isr_unhandled, /* --------- */ 68 bfa_isr_unhandled, /* --------- */ 69 bfa_isr_unhandled, /* --------- */ 70 bfa_isr_unhandled, /* --------- */ 71 bfa_isr_unhandled, /* --------- */ 72 bfa_isr_unhandled, /* --------- */ 73 bfa_isr_unhandled, /* --------- */ 74 bfa_isr_unhandled, /* --------- */ 75 }; 76 /* 77 * Message handlers for mailbox command classes 78 */ 79 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 80 NULL, 81 NULL, /* BFI_MC_IOC */ 82 NULL, /* BFI_MC_DIAG */ 83 NULL, /* BFI_MC_FLASH */ 84 NULL, /* BFI_MC_CEE */ 85 NULL, /* BFI_MC_PORT */ 86 bfa_iocfc_isr, /* BFI_MC_IOCFC */ 87 NULL, 88 }; 89 90 91 92 static void 93 bfa_com_port_attach(struct bfa_s *bfa) 94 { 95 struct bfa_port_s *port = &bfa->modules.port; 96 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 97 98 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); 99 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); 100 } 101 102 /* 103 * ablk module attach 104 */ 105 static void 106 bfa_com_ablk_attach(struct bfa_s *bfa) 107 { 108 struct bfa_ablk_s *ablk = &bfa->modules.ablk; 109 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 110 111 bfa_ablk_attach(ablk, &bfa->ioc); 112 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); 113 } 114 115 static void 116 bfa_com_cee_attach(struct bfa_s *bfa) 117 { 118 struct bfa_cee_s *cee = &bfa->modules.cee; 119 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 120 121 cee->trcmod = bfa->trcmod; 122 bfa_cee_attach(cee, &bfa->ioc, bfa); 123 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); 124 } 125 126 static void 127 bfa_com_sfp_attach(struct bfa_s *bfa) 128 { 129 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); 130 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 131 132 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); 133 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); 134 } 135 136 static void 137 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 138 { 139 struct bfa_flash_s *flash = BFA_FLASH(bfa); 140 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 141 142 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); 143 bfa_flash_memclaim(flash, flash_dma->kva_curp, 144 flash_dma->dma_curp, mincfg); 145 } 146 147 static void 148 bfa_com_diag_attach(struct bfa_s *bfa) 149 { 150 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); 151 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 152 153 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); 154 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); 155 } 156 157 static void 158 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) 159 { 160 struct bfa_phy_s *phy = BFA_PHY(bfa); 161 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 162 163 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); 164 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); 165 } 166 167 /* 168 * BFA IOC FC related definitions 169 */ 170 171 /* 172 * IOC local definitions 173 */ 174 #define BFA_IOCFC_TOV 5000 /* msecs */ 175 176 enum { 177 BFA_IOCFC_ACT_NONE = 0, 178 BFA_IOCFC_ACT_INIT = 1, 179 BFA_IOCFC_ACT_STOP = 2, 180 BFA_IOCFC_ACT_DISABLE = 3, 181 BFA_IOCFC_ACT_ENABLE = 4, 182 }; 183 184 #define DEF_CFG_NUM_FABRICS 1 185 #define DEF_CFG_NUM_LPORTS 256 186 #define DEF_CFG_NUM_CQS 4 187 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) 188 #define DEF_CFG_NUM_TSKIM_REQS 128 189 #define DEF_CFG_NUM_FCXP_REQS 64 190 #define DEF_CFG_NUM_UF_BUFS 64 191 #define DEF_CFG_NUM_RPORTS 1024 192 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) 193 #define DEF_CFG_NUM_TINS 256 194 195 #define DEF_CFG_NUM_SGPGS 2048 196 #define DEF_CFG_NUM_REQQ_ELEMS 256 197 #define DEF_CFG_NUM_RSPQ_ELEMS 64 198 #define DEF_CFG_NUM_SBOOT_TGTS 16 199 #define DEF_CFG_NUM_SBOOT_LUNS 16 200 201 /* 202 * forward declaration for IOC FC functions 203 */ 204 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 205 static void bfa_iocfc_disable_cbfn(void *bfa_arg); 206 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 207 static void bfa_iocfc_reset_cbfn(void *bfa_arg); 208 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 209 210 /* 211 * BFA Interrupt handling functions 212 */ 213 static void 214 bfa_reqq_resume(struct bfa_s *bfa, int qid) 215 { 216 struct list_head *waitq, *qe, *qen; 217 struct bfa_reqq_wait_s *wqe; 218 219 waitq = bfa_reqq(bfa, qid); 220 list_for_each_safe(qe, qen, waitq) { 221 /* 222 * Callback only as long as there is room in request queue 223 */ 224 if (bfa_reqq_full(bfa, qid)) 225 break; 226 227 list_del(qe); 228 wqe = (struct bfa_reqq_wait_s *) qe; 229 wqe->qresume(wqe->cbarg); 230 } 231 } 232 233 static inline void 234 bfa_isr_rspq(struct bfa_s *bfa, int qid) 235 { 236 struct bfi_msg_s *m; 237 u32 pi, ci; 238 struct list_head *waitq; 239 240 bfa_isr_rspq_ack(bfa, qid); 241 242 ci = bfa_rspq_ci(bfa, qid); 243 pi = bfa_rspq_pi(bfa, qid); 244 245 while (ci != pi) { 246 m = bfa_rspq_elem(bfa, qid, ci); 247 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); 248 249 bfa_isrs[m->mhdr.msg_class] (bfa, m); 250 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); 251 } 252 253 /* 254 * update CI 255 */ 256 bfa_rspq_ci(bfa, qid) = pi; 257 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); 258 mmiowb(); 259 260 /* 261 * Resume any pending requests in the corresponding reqq. 262 */ 263 waitq = bfa_reqq(bfa, qid); 264 if (!list_empty(waitq)) 265 bfa_reqq_resume(bfa, qid); 266 } 267 268 static inline void 269 bfa_isr_reqq(struct bfa_s *bfa, int qid) 270 { 271 struct list_head *waitq; 272 273 bfa_isr_reqq_ack(bfa, qid); 274 275 /* 276 * Resume any pending requests in the corresponding reqq. 277 */ 278 waitq = bfa_reqq(bfa, qid); 279 if (!list_empty(waitq)) 280 bfa_reqq_resume(bfa, qid); 281 } 282 283 void 284 bfa_msix_all(struct bfa_s *bfa, int vec) 285 { 286 u32 intr, qintr; 287 int queue; 288 289 intr = readl(bfa->iocfc.bfa_regs.intr_status); 290 if (!intr) 291 return; 292 293 /* 294 * RME completion queue interrupt 295 */ 296 qintr = intr & __HFN_INT_RME_MASK; 297 if (qintr && bfa->queue_process) { 298 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 299 bfa_isr_rspq(bfa, queue); 300 } 301 302 intr &= ~qintr; 303 if (!intr) 304 return; 305 306 /* 307 * CPE completion queue interrupt 308 */ 309 qintr = intr & __HFN_INT_CPE_MASK; 310 if (qintr && bfa->queue_process) { 311 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 312 bfa_isr_reqq(bfa, queue); 313 } 314 intr &= ~qintr; 315 if (!intr) 316 return; 317 318 bfa_msix_lpu_err(bfa, intr); 319 } 320 321 bfa_boolean_t 322 bfa_intx(struct bfa_s *bfa) 323 { 324 u32 intr, qintr; 325 int queue; 326 327 intr = readl(bfa->iocfc.bfa_regs.intr_status); 328 if (!intr) 329 return BFA_FALSE; 330 331 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 332 if (qintr) 333 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 334 335 /* 336 * RME completion queue interrupt 337 */ 338 qintr = intr & __HFN_INT_RME_MASK; 339 if (qintr && bfa->queue_process) { 340 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 341 bfa_isr_rspq(bfa, queue); 342 } 343 344 intr &= ~qintr; 345 if (!intr) 346 return BFA_TRUE; 347 348 /* 349 * CPE completion queue interrupt 350 */ 351 qintr = intr & __HFN_INT_CPE_MASK; 352 if (qintr && bfa->queue_process) { 353 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 354 bfa_isr_reqq(bfa, queue); 355 } 356 intr &= ~qintr; 357 if (!intr) 358 return BFA_TRUE; 359 360 bfa_msix_lpu_err(bfa, intr); 361 362 return BFA_TRUE; 363 } 364 365 void 366 bfa_isr_enable(struct bfa_s *bfa) 367 { 368 u32 umsk; 369 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 370 371 bfa_trc(bfa, pci_func); 372 373 bfa_msix_ctrl_install(bfa); 374 375 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 376 umsk = __HFN_INT_ERR_MASK_CT2; 377 umsk |= pci_func == 0 ? 378 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; 379 } else { 380 umsk = __HFN_INT_ERR_MASK; 381 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; 382 } 383 384 writel(umsk, bfa->iocfc.bfa_regs.intr_status); 385 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); 386 bfa->iocfc.intr_mask = ~umsk; 387 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 388 } 389 390 void 391 bfa_isr_disable(struct bfa_s *bfa) 392 { 393 bfa_isr_mode_set(bfa, BFA_FALSE); 394 writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 395 bfa_msix_uninstall(bfa); 396 } 397 398 void 399 bfa_msix_reqq(struct bfa_s *bfa, int vec) 400 { 401 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); 402 } 403 404 void 405 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) 406 { 407 bfa_trc(bfa, m->mhdr.msg_class); 408 bfa_trc(bfa, m->mhdr.msg_id); 409 bfa_trc(bfa, m->mhdr.mtag.i2htok); 410 WARN_ON(1); 411 bfa_trc_stop(bfa->trcmod); 412 } 413 414 void 415 bfa_msix_rspq(struct bfa_s *bfa, int vec) 416 { 417 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); 418 } 419 420 void 421 bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 422 { 423 u32 intr, curr_value; 424 bfa_boolean_t lpu_isr, halt_isr, pss_isr; 425 426 intr = readl(bfa->iocfc.bfa_regs.intr_status); 427 428 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 429 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; 430 pss_isr = intr & __HFN_INT_ERR_PSS_CT2; 431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | 432 __HFN_INT_MBOX_LPU1_CT2); 433 intr &= __HFN_INT_ERR_MASK_CT2; 434 } else { 435 halt_isr = intr & __HFN_INT_LL_HALT; 436 pss_isr = intr & __HFN_INT_ERR_PSS; 437 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 438 intr &= __HFN_INT_ERR_MASK; 439 } 440 441 if (lpu_isr) 442 bfa_ioc_mbox_isr(&bfa->ioc); 443 444 if (intr) { 445 if (halt_isr) { 446 /* 447 * If LL_HALT bit is set then FW Init Halt LL Port 448 * Register needs to be cleared as well so Interrupt 449 * Status Register will be cleared. 450 */ 451 curr_value = readl(bfa->ioc.ioc_regs.ll_halt); 452 curr_value &= ~__FW_INIT_HALT_P; 453 writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 454 } 455 456 if (pss_isr) { 457 /* 458 * ERR_PSS bit needs to be cleared as well in case 459 * interrups are shared so driver's interrupt handler is 460 * still called even though it is already masked out. 461 */ 462 curr_value = readl( 463 bfa->ioc.ioc_regs.pss_err_status_reg); 464 writel(curr_value, 465 bfa->ioc.ioc_regs.pss_err_status_reg); 466 } 467 468 writel(intr, bfa->iocfc.bfa_regs.intr_status); 469 bfa_ioc_error_isr(&bfa->ioc); 470 } 471 } 472 473 /* 474 * BFA IOC FC related functions 475 */ 476 477 /* 478 * BFA IOC private functions 479 */ 480 481 /* 482 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 483 */ 484 static void 485 bfa_iocfc_send_cfg(void *bfa_arg) 486 { 487 struct bfa_s *bfa = bfa_arg; 488 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 489 struct bfi_iocfc_cfg_req_s cfg_req; 490 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 491 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; 492 int i; 493 494 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); 495 bfa_trc(bfa, cfg->fwcfg.num_cqs); 496 497 bfa_iocfc_reset_queues(bfa); 498 499 /* 500 * initialize IOC configuration info 501 */ 502 cfg_info->single_msix_vec = 0; 503 if (bfa->msix.nvecs == 1) 504 cfg_info->single_msix_vec = 1; 505 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 506 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 507 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); 508 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 509 510 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 511 /* 512 * dma map REQ and RSP circular queues and shadow pointers 513 */ 514 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 515 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], 516 iocfc->req_cq_ba[i].pa); 517 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 518 iocfc->req_cq_shadow_ci[i].pa); 519 cfg_info->req_cq_elems[i] = 520 cpu_to_be16(cfg->drvcfg.num_reqq_elems); 521 522 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 523 iocfc->rsp_cq_ba[i].pa); 524 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 525 iocfc->rsp_cq_shadow_pi[i].pa); 526 cfg_info->rsp_cq_elems[i] = 527 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 528 } 529 530 /* 531 * Enable interrupt coalescing if it is driver init path 532 * and not ioc disable/enable path. 533 */ 534 if (!iocfc->cfgdone) 535 cfg_info->intr_attr.coalesce = BFA_TRUE; 536 537 iocfc->cfgdone = BFA_FALSE; 538 539 /* 540 * dma map IOC configuration itself 541 */ 542 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 543 bfa_fn_lpu(bfa)); 544 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 545 546 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 547 sizeof(struct bfi_iocfc_cfg_req_s)); 548 } 549 550 static void 551 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 552 struct bfa_pcidev_s *pcidev) 553 { 554 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 555 556 bfa->bfad = bfad; 557 iocfc->bfa = bfa; 558 iocfc->action = BFA_IOCFC_ACT_NONE; 559 560 iocfc->cfg = *cfg; 561 562 /* 563 * Initialize chip specific handlers. 564 */ 565 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { 566 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 567 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 568 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 569 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 570 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; 571 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; 572 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 573 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 574 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 575 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 576 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; 577 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; 578 } else { 579 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 580 iocfc->hwif.hw_reqq_ack = NULL; 581 iocfc->hwif.hw_rspq_ack = NULL; 582 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 583 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 584 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 585 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 586 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 587 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 588 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 589 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + 590 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 591 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + 592 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; 593 } 594 595 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 596 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 597 iocfc->hwif.hw_isr_mode_set = NULL; 598 iocfc->hwif.hw_rspq_ack = NULL; 599 } 600 601 iocfc->hwif.hw_reginit(bfa); 602 bfa->msix.nvecs = 0; 603 } 604 605 static void 606 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) 607 { 608 u8 *dm_kva = NULL; 609 u64 dm_pa = 0; 610 int i, per_reqq_sz, per_rspq_sz, dbgsz; 611 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 612 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 613 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 614 struct bfa_mem_dma_s *reqq_dma, *rspq_dma; 615 616 /* First allocate dma memory for IOC */ 617 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), 618 bfa_mem_dma_phys(ioc_dma)); 619 620 /* Claim DMA-able memory for the request/response queues */ 621 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 622 BFA_DMA_ALIGN_SZ); 623 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 624 BFA_DMA_ALIGN_SZ); 625 626 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 627 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); 628 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); 629 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); 630 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); 631 632 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); 633 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); 634 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); 635 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); 636 } 637 638 /* Claim IOCFC dma memory - for shadow CI/PI */ 639 dm_kva = bfa_mem_dma_virt(iocfc_dma); 640 dm_pa = bfa_mem_dma_phys(iocfc_dma); 641 642 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 643 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 644 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 645 dm_kva += BFA_CACHELINE_SZ; 646 dm_pa += BFA_CACHELINE_SZ; 647 648 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; 649 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; 650 dm_kva += BFA_CACHELINE_SZ; 651 dm_pa += BFA_CACHELINE_SZ; 652 } 653 654 /* Claim IOCFC dma memory - for the config info page */ 655 bfa->iocfc.cfg_info.kva = dm_kva; 656 bfa->iocfc.cfg_info.pa = dm_pa; 657 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 658 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 659 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 660 661 /* Claim IOCFC dma memory - for the config response */ 662 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 663 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 664 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 665 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 666 BFA_CACHELINE_SZ); 667 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 668 BFA_CACHELINE_SZ); 669 670 /* Claim IOCFC kva memory */ 671 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 672 if (dbgsz > 0) { 673 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); 674 bfa_mem_kva_curp(iocfc) += dbgsz; 675 } 676 } 677 678 /* 679 * Start BFA submodules. 680 */ 681 static void 682 bfa_iocfc_start_submod(struct bfa_s *bfa) 683 { 684 int i; 685 686 bfa->queue_process = BFA_TRUE; 687 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 688 bfa_isr_rspq_ack(bfa, i); 689 690 for (i = 0; hal_mods[i]; i++) 691 hal_mods[i]->start(bfa); 692 } 693 694 /* 695 * Disable BFA submodules. 696 */ 697 static void 698 bfa_iocfc_disable_submod(struct bfa_s *bfa) 699 { 700 int i; 701 702 for (i = 0; hal_mods[i]; i++) 703 hal_mods[i]->iocdisable(bfa); 704 } 705 706 static void 707 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) 708 { 709 struct bfa_s *bfa = bfa_arg; 710 711 if (complete) { 712 if (bfa->iocfc.cfgdone) 713 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 714 else 715 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 716 } else { 717 if (bfa->iocfc.cfgdone) 718 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 719 } 720 } 721 722 static void 723 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) 724 { 725 struct bfa_s *bfa = bfa_arg; 726 struct bfad_s *bfad = bfa->bfad; 727 728 if (compl) 729 complete(&bfad->comp); 730 else 731 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 732 } 733 734 static void 735 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) 736 { 737 struct bfa_s *bfa = bfa_arg; 738 struct bfad_s *bfad = bfa->bfad; 739 740 if (compl) 741 complete(&bfad->enable_comp); 742 } 743 744 static void 745 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 746 { 747 struct bfa_s *bfa = bfa_arg; 748 struct bfad_s *bfad = bfa->bfad; 749 750 if (compl) 751 complete(&bfad->disable_comp); 752 } 753 754 /** 755 * configure queue registers from firmware response 756 */ 757 static void 758 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) 759 { 760 int i; 761 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; 762 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 763 764 for (i = 0; i < BFI_IOC_MAX_CQS; i++) { 765 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; 766 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); 767 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); 768 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); 769 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); 770 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); 771 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); 772 } 773 } 774 775 static void 776 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 777 { 778 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 779 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 780 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 781 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); 782 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 783 } 784 785 /* 786 * Update BFA configuration from firmware configuration. 787 */ 788 static void 789 bfa_iocfc_cfgrsp(struct bfa_s *bfa) 790 { 791 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 792 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 793 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 794 795 fwcfg->num_cqs = fwcfg->num_cqs; 796 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 797 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); 798 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 799 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 800 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 801 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 802 803 iocfc->cfgdone = BFA_TRUE; 804 805 /* 806 * configure queue register offsets as learnt from firmware 807 */ 808 bfa_iocfc_qreg(bfa, &cfgrsp->qreg); 809 810 /* 811 * Re-configure resources as learnt from Firmware 812 */ 813 bfa_iocfc_res_recfg(bfa, fwcfg); 814 815 /* 816 * Install MSIX queue handlers 817 */ 818 bfa_msix_queue_install(bfa); 819 820 /* 821 * Configuration is complete - initialize/start submodules 822 */ 823 bfa_fcport_init(bfa); 824 825 if (iocfc->action == BFA_IOCFC_ACT_INIT) 826 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 827 else { 828 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 829 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 830 bfa_iocfc_enable_cb, bfa); 831 bfa_iocfc_start_submod(bfa); 832 } 833 } 834 void 835 bfa_iocfc_reset_queues(struct bfa_s *bfa) 836 { 837 int q; 838 839 for (q = 0; q < BFI_IOC_MAX_CQS; q++) { 840 bfa_reqq_ci(bfa, q) = 0; 841 bfa_reqq_pi(bfa, q) = 0; 842 bfa_rspq_ci(bfa, q) = 0; 843 bfa_rspq_pi(bfa, q) = 0; 844 } 845 } 846 847 /* Fabric Assigned Address specific functions */ 848 849 /* 850 * Check whether IOC is ready before sending command down 851 */ 852 static bfa_status_t 853 bfa_faa_validate_request(struct bfa_s *bfa) 854 { 855 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); 856 u32 card_type = bfa->ioc.attr->card_type; 857 858 if (bfa_ioc_is_operational(&bfa->ioc)) { 859 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) 860 return BFA_STATUS_FEATURE_NOT_SUPPORTED; 861 } else { 862 if (!bfa_ioc_is_acq_addr(&bfa->ioc)) 863 return BFA_STATUS_IOC_NON_OP; 864 } 865 866 return BFA_STATUS_OK; 867 } 868 869 bfa_status_t 870 bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg) 871 { 872 struct bfi_faa_en_dis_s faa_enable_req; 873 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 874 bfa_status_t status; 875 876 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 877 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 878 879 status = bfa_faa_validate_request(bfa); 880 if (status != BFA_STATUS_OK) 881 return status; 882 883 if (iocfc->faa_args.busy == BFA_TRUE) 884 return BFA_STATUS_DEVBUSY; 885 886 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED) 887 return BFA_STATUS_FAA_ENABLED; 888 889 if (bfa_fcport_is_trunk_enabled(bfa)) 890 return BFA_STATUS_ERROR_TRUNK_ENABLED; 891 892 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED); 893 iocfc->faa_args.busy = BFA_TRUE; 894 895 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s)); 896 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC, 897 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa)); 898 899 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req, 900 sizeof(struct bfi_faa_en_dis_s)); 901 902 return BFA_STATUS_OK; 903 } 904 905 bfa_status_t 906 bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, 907 void *cbarg) 908 { 909 struct bfi_faa_en_dis_s faa_disable_req; 910 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 911 bfa_status_t status; 912 913 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 914 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 915 916 status = bfa_faa_validate_request(bfa); 917 if (status != BFA_STATUS_OK) 918 return status; 919 920 if (iocfc->faa_args.busy == BFA_TRUE) 921 return BFA_STATUS_DEVBUSY; 922 923 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED) 924 return BFA_STATUS_FAA_DISABLED; 925 926 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED); 927 iocfc->faa_args.busy = BFA_TRUE; 928 929 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s)); 930 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC, 931 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa)); 932 933 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req, 934 sizeof(struct bfi_faa_en_dis_s)); 935 936 return BFA_STATUS_OK; 937 } 938 939 bfa_status_t 940 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, 941 bfa_cb_iocfc_t cbfn, void *cbarg) 942 { 943 struct bfi_faa_query_s faa_attr_req; 944 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 945 bfa_status_t status; 946 947 iocfc->faa_args.faa_attr = attr; 948 iocfc->faa_args.faa_cb.faa_cbfn = cbfn; 949 iocfc->faa_args.faa_cb.faa_cbarg = cbarg; 950 951 status = bfa_faa_validate_request(bfa); 952 if (status != BFA_STATUS_OK) 953 return status; 954 955 if (iocfc->faa_args.busy == BFA_TRUE) 956 return BFA_STATUS_DEVBUSY; 957 958 iocfc->faa_args.busy = BFA_TRUE; 959 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); 960 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, 961 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); 962 963 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, 964 sizeof(struct bfi_faa_query_s)); 965 966 return BFA_STATUS_OK; 967 } 968 969 /* 970 * FAA enable response 971 */ 972 static void 973 bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc, 974 struct bfi_faa_en_dis_rsp_s *rsp) 975 { 976 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 977 bfa_status_t status = rsp->status; 978 979 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 980 981 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); 982 iocfc->faa_args.busy = BFA_FALSE; 983 } 984 985 /* 986 * FAA disable response 987 */ 988 static void 989 bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc, 990 struct bfi_faa_en_dis_rsp_s *rsp) 991 { 992 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 993 bfa_status_t status = rsp->status; 994 995 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 996 997 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); 998 iocfc->faa_args.busy = BFA_FALSE; 999 } 1000 1001 /* 1002 * FAA query response 1003 */ 1004 static void 1005 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, 1006 bfi_faa_query_rsp_t *rsp) 1007 { 1008 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; 1009 1010 if (iocfc->faa_args.faa_attr) { 1011 iocfc->faa_args.faa_attr->faa = rsp->faa; 1012 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; 1013 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; 1014 } 1015 1016 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); 1017 1018 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); 1019 iocfc->faa_args.busy = BFA_FALSE; 1020 } 1021 1022 /* 1023 * IOC enable request is complete 1024 */ 1025 static void 1026 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) 1027 { 1028 struct bfa_s *bfa = bfa_arg; 1029 1030 if (status == BFA_STATUS_FAA_ACQ_ADDR) { 1031 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1032 bfa_iocfc_init_cb, bfa); 1033 return; 1034 } 1035 1036 if (status != BFA_STATUS_OK) { 1037 bfa_isr_disable(bfa); 1038 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 1039 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1040 bfa_iocfc_init_cb, bfa); 1041 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 1042 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 1043 bfa_iocfc_enable_cb, bfa); 1044 return; 1045 } 1046 1047 bfa_iocfc_send_cfg(bfa); 1048 } 1049 1050 /* 1051 * IOC disable request is complete 1052 */ 1053 static void 1054 bfa_iocfc_disable_cbfn(void *bfa_arg) 1055 { 1056 struct bfa_s *bfa = bfa_arg; 1057 1058 bfa_isr_disable(bfa); 1059 bfa_iocfc_disable_submod(bfa); 1060 1061 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) 1062 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, 1063 bfa); 1064 else { 1065 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); 1066 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, 1067 bfa); 1068 } 1069 } 1070 1071 /* 1072 * Notify sub-modules of hardware failure. 1073 */ 1074 static void 1075 bfa_iocfc_hbfail_cbfn(void *bfa_arg) 1076 { 1077 struct bfa_s *bfa = bfa_arg; 1078 1079 bfa->queue_process = BFA_FALSE; 1080 1081 bfa_isr_disable(bfa); 1082 bfa_iocfc_disable_submod(bfa); 1083 1084 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 1085 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, 1086 bfa); 1087 } 1088 1089 /* 1090 * Actions on chip-reset completion. 1091 */ 1092 static void 1093 bfa_iocfc_reset_cbfn(void *bfa_arg) 1094 { 1095 struct bfa_s *bfa = bfa_arg; 1096 1097 bfa_iocfc_reset_queues(bfa); 1098 bfa_isr_enable(bfa); 1099 } 1100 1101 1102 /* 1103 * Query IOC memory requirement information. 1104 */ 1105 void 1106 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1107 struct bfa_s *bfa) 1108 { 1109 int q, per_reqq_sz, per_rspq_sz; 1110 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1111 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1112 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); 1113 u32 dm_len = 0; 1114 1115 /* dma memory setup for IOC */ 1116 bfa_mem_dma_setup(meminfo, ioc_dma, 1117 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); 1118 1119 /* dma memory setup for REQ/RSP queues */ 1120 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 1121 BFA_DMA_ALIGN_SZ); 1122 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 1123 BFA_DMA_ALIGN_SZ); 1124 1125 for (q = 0; q < cfg->fwcfg.num_cqs; q++) { 1126 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), 1127 per_reqq_sz); 1128 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), 1129 per_rspq_sz); 1130 } 1131 1132 /* IOCFC dma memory - calculate Shadow CI/PI size */ 1133 for (q = 0; q < cfg->fwcfg.num_cqs; q++) 1134 dm_len += (2 * BFA_CACHELINE_SZ); 1135 1136 /* IOCFC dma memory - calculate config info / rsp size */ 1137 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 1138 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 1139 BFA_CACHELINE_SZ); 1140 1141 /* dma memory setup for IOCFC */ 1142 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); 1143 1144 /* kva memory setup for IOCFC */ 1145 bfa_mem_kva_setup(meminfo, iocfc_kva, 1146 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0)); 1147 } 1148 1149 /* 1150 * Query IOC memory requirement information. 1151 */ 1152 void 1153 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1154 struct bfa_pcidev_s *pcidev) 1155 { 1156 int i; 1157 struct bfa_ioc_s *ioc = &bfa->ioc; 1158 1159 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; 1160 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; 1161 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; 1162 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; 1163 1164 ioc->trcmod = bfa->trcmod; 1165 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 1166 1167 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); 1168 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 1169 1170 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 1171 bfa_iocfc_mem_claim(bfa, cfg); 1172 INIT_LIST_HEAD(&bfa->timer_mod.timer_q); 1173 1174 INIT_LIST_HEAD(&bfa->comp_q); 1175 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1176 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 1177 } 1178 1179 /* 1180 * Query IOC memory requirement information. 1181 */ 1182 void 1183 bfa_iocfc_init(struct bfa_s *bfa) 1184 { 1185 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 1186 bfa_ioc_enable(&bfa->ioc); 1187 } 1188 1189 /* 1190 * IOC start called from bfa_start(). Called to start IOC operations 1191 * at driver instantiation for this instance. 1192 */ 1193 void 1194 bfa_iocfc_start(struct bfa_s *bfa) 1195 { 1196 if (bfa->iocfc.cfgdone) 1197 bfa_iocfc_start_submod(bfa); 1198 } 1199 1200 /* 1201 * IOC stop called from bfa_stop(). Called only when driver is unloaded 1202 * for this instance. 1203 */ 1204 void 1205 bfa_iocfc_stop(struct bfa_s *bfa) 1206 { 1207 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 1208 1209 bfa->queue_process = BFA_FALSE; 1210 bfa_ioc_disable(&bfa->ioc); 1211 } 1212 1213 void 1214 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) 1215 { 1216 struct bfa_s *bfa = bfaarg; 1217 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1218 union bfi_iocfc_i2h_msg_u *msg; 1219 1220 msg = (union bfi_iocfc_i2h_msg_u *) m; 1221 bfa_trc(bfa, msg->mh.msg_id); 1222 1223 switch (msg->mh.msg_id) { 1224 case BFI_IOCFC_I2H_CFG_REPLY: 1225 bfa_iocfc_cfgrsp(bfa); 1226 break; 1227 case BFI_IOCFC_I2H_UPDATEQ_RSP: 1228 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 1229 break; 1230 case BFI_IOCFC_I2H_FAA_ENABLE_RSP: 1231 bfa_faa_enable_reply(iocfc, 1232 (struct bfi_faa_en_dis_rsp_s *)msg); 1233 break; 1234 case BFI_IOCFC_I2H_FAA_DISABLE_RSP: 1235 bfa_faa_disable_reply(iocfc, 1236 (struct bfi_faa_en_dis_rsp_s *)msg); 1237 break; 1238 case BFI_IOCFC_I2H_FAA_QUERY_RSP: 1239 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); 1240 break; 1241 default: 1242 WARN_ON(1); 1243 } 1244 } 1245 1246 void 1247 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) 1248 { 1249 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1250 1251 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 1252 1253 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 1254 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : 1255 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); 1256 1257 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 1258 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : 1259 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); 1260 1261 attr->config = iocfc->cfg; 1262 } 1263 1264 bfa_status_t 1265 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) 1266 { 1267 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1268 struct bfi_iocfc_set_intr_req_s *m; 1269 1270 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 1271 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); 1272 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); 1273 1274 if (!bfa_iocfc_is_operational(bfa)) 1275 return BFA_STATUS_OK; 1276 1277 m = bfa_reqq_next(bfa, BFA_REQQ_IOC); 1278 if (!m) 1279 return BFA_STATUS_DEVBUSY; 1280 1281 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 1282 bfa_fn_lpu(bfa)); 1283 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 1284 m->delay = iocfc->cfginfo->intr_attr.delay; 1285 m->latency = iocfc->cfginfo->intr_attr.latency; 1286 1287 bfa_trc(bfa, attr->delay); 1288 bfa_trc(bfa, attr->latency); 1289 1290 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); 1291 return BFA_STATUS_OK; 1292 } 1293 1294 void 1295 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) 1296 { 1297 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1298 1299 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 1300 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); 1301 } 1302 /* 1303 * Enable IOC after it is disabled. 1304 */ 1305 void 1306 bfa_iocfc_enable(struct bfa_s *bfa) 1307 { 1308 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1309 "IOC Enable"); 1310 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE; 1311 bfa_ioc_enable(&bfa->ioc); 1312 } 1313 1314 void 1315 bfa_iocfc_disable(struct bfa_s *bfa) 1316 { 1317 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1318 "IOC Disable"); 1319 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; 1320 1321 bfa->queue_process = BFA_FALSE; 1322 bfa_ioc_disable(&bfa->ioc); 1323 } 1324 1325 1326 bfa_boolean_t 1327 bfa_iocfc_is_operational(struct bfa_s *bfa) 1328 { 1329 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 1330 } 1331 1332 /* 1333 * Return boot target port wwns -- read from boot information in flash. 1334 */ 1335 void 1336 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) 1337 { 1338 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1339 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1340 int i; 1341 1342 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { 1343 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); 1344 *nwwns = cfgrsp->pbc_cfg.nbluns; 1345 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) 1346 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; 1347 1348 return; 1349 } 1350 1351 *nwwns = cfgrsp->bootwwns.nwwns; 1352 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); 1353 } 1354 1355 int 1356 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) 1357 { 1358 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1359 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; 1360 1361 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); 1362 return cfgrsp->pbc_cfg.nvports; 1363 } 1364 1365 1366 /* 1367 * Use this function query the memory requirement of the BFA library. 1368 * This function needs to be called before bfa_attach() to get the 1369 * memory required of the BFA layer for a given driver configuration. 1370 * 1371 * This call will fail, if the cap is out of range compared to pre-defined 1372 * values within the BFA library 1373 * 1374 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate 1375 * its configuration in this structure. 1376 * The default values for struct bfa_iocfc_cfg_s can be 1377 * fetched using bfa_cfg_get_default() API. 1378 * 1379 * If cap's boundary check fails, the library will use 1380 * the default bfa_cap_t values (and log a warning msg). 1381 * 1382 * @param[out] meminfo - pointer to bfa_meminfo_t. This content 1383 * indicates the memory type (see bfa_mem_type_t) and 1384 * amount of memory required. 1385 * 1386 * Driver should allocate the memory, populate the 1387 * starting address for each block and provide the same 1388 * structure as input parameter to bfa_attach() call. 1389 * 1390 * @param[in] bfa - pointer to the bfa structure, used while fetching the 1391 * dma, kva memory information of the bfa sub-modules. 1392 * 1393 * @return void 1394 * 1395 * Special Considerations: @note 1396 */ 1397 void 1398 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, 1399 struct bfa_s *bfa) 1400 { 1401 int i; 1402 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); 1403 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); 1404 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); 1405 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); 1406 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 1407 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 1408 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 1409 1410 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1411 1412 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1413 1414 /* Initialize the DMA & KVA meminfo queues */ 1415 INIT_LIST_HEAD(&meminfo->dma_info.qe); 1416 INIT_LIST_HEAD(&meminfo->kva_info.qe); 1417 1418 bfa_iocfc_meminfo(cfg, meminfo, bfa); 1419 1420 for (i = 0; hal_mods[i]; i++) 1421 hal_mods[i]->meminfo(cfg, meminfo, bfa); 1422 1423 /* dma info setup */ 1424 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); 1425 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); 1426 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); 1427 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); 1428 bfa_mem_dma_setup(meminfo, flash_dma, 1429 bfa_flash_meminfo(cfg->drvcfg.min_cfg)); 1430 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); 1431 bfa_mem_dma_setup(meminfo, phy_dma, 1432 bfa_phy_meminfo(cfg->drvcfg.min_cfg)); 1433 } 1434 1435 /* 1436 * Use this function to do attach the driver instance with the BFA 1437 * library. This function will not trigger any HW initialization 1438 * process (which will be done in bfa_init() call) 1439 * 1440 * This call will fail, if the cap is out of range compared to 1441 * pre-defined values within the BFA library 1442 * 1443 * @param[out] bfa Pointer to bfa_t. 1444 * @param[in] bfad Opaque handle back to the driver's IOC structure 1445 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure 1446 * that was used in bfa_cfg_get_meminfo(). 1447 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should 1448 * use the bfa_cfg_get_meminfo() call to 1449 * find the memory blocks required, allocate the 1450 * required memory and provide the starting addresses. 1451 * @param[in] pcidev pointer to struct bfa_pcidev_s 1452 * 1453 * @return 1454 * void 1455 * 1456 * Special Considerations: 1457 * 1458 * @note 1459 * 1460 */ 1461 void 1462 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1463 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1464 { 1465 int i; 1466 struct bfa_mem_dma_s *dma_info, *dma_elem; 1467 struct bfa_mem_kva_s *kva_info, *kva_elem; 1468 struct list_head *dm_qe, *km_qe; 1469 1470 bfa->fcs = BFA_FALSE; 1471 1472 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1473 1474 /* Initialize memory pointers for iterative allocation */ 1475 dma_info = &meminfo->dma_info; 1476 dma_info->kva_curp = dma_info->kva; 1477 dma_info->dma_curp = dma_info->dma; 1478 1479 kva_info = &meminfo->kva_info; 1480 kva_info->kva_curp = kva_info->kva; 1481 1482 list_for_each(dm_qe, &dma_info->qe) { 1483 dma_elem = (struct bfa_mem_dma_s *) dm_qe; 1484 dma_elem->kva_curp = dma_elem->kva; 1485 dma_elem->dma_curp = dma_elem->dma; 1486 } 1487 1488 list_for_each(km_qe, &kva_info->qe) { 1489 kva_elem = (struct bfa_mem_kva_s *) km_qe; 1490 kva_elem->kva_curp = kva_elem->kva; 1491 } 1492 1493 bfa_iocfc_attach(bfa, bfad, cfg, pcidev); 1494 1495 for (i = 0; hal_mods[i]; i++) 1496 hal_mods[i]->attach(bfa, bfad, cfg, pcidev); 1497 1498 bfa_com_port_attach(bfa); 1499 bfa_com_ablk_attach(bfa); 1500 bfa_com_cee_attach(bfa); 1501 bfa_com_sfp_attach(bfa); 1502 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); 1503 bfa_com_diag_attach(bfa); 1504 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); 1505 } 1506 1507 /* 1508 * Use this function to delete a BFA IOC. IOC should be stopped (by 1509 * calling bfa_stop()) before this function call. 1510 * 1511 * @param[in] bfa - pointer to bfa_t. 1512 * 1513 * @return 1514 * void 1515 * 1516 * Special Considerations: 1517 * 1518 * @note 1519 */ 1520 void 1521 bfa_detach(struct bfa_s *bfa) 1522 { 1523 int i; 1524 1525 for (i = 0; hal_mods[i]; i++) 1526 hal_mods[i]->detach(bfa); 1527 bfa_ioc_detach(&bfa->ioc); 1528 } 1529 1530 void 1531 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) 1532 { 1533 INIT_LIST_HEAD(comp_q); 1534 list_splice_tail_init(&bfa->comp_q, comp_q); 1535 } 1536 1537 void 1538 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) 1539 { 1540 struct list_head *qe; 1541 struct list_head *qen; 1542 struct bfa_cb_qe_s *hcb_qe; 1543 1544 list_for_each_safe(qe, qen, comp_q) { 1545 hcb_qe = (struct bfa_cb_qe_s *) qe; 1546 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1547 } 1548 } 1549 1550 void 1551 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) 1552 { 1553 struct list_head *qe; 1554 struct bfa_cb_qe_s *hcb_qe; 1555 1556 while (!list_empty(comp_q)) { 1557 bfa_q_deq(comp_q, &qe); 1558 hcb_qe = (struct bfa_cb_qe_s *) qe; 1559 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1560 } 1561 } 1562 1563 1564 /* 1565 * Return the list of PCI vendor/device id lists supported by this 1566 * BFA instance. 1567 */ 1568 void 1569 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) 1570 { 1571 static struct bfa_pciid_s __pciids[] = { 1572 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, 1573 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, 1574 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, 1575 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1576 }; 1577 1578 *npciids = sizeof(__pciids) / sizeof(__pciids[0]); 1579 *pciids = __pciids; 1580 } 1581 1582 /* 1583 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1584 * into BFA layer). The OS driver can then turn back and overwrite entries that 1585 * have been configured by the user. 1586 * 1587 * @param[in] cfg - pointer to bfa_ioc_cfg_t 1588 * 1589 * @return 1590 * void 1591 * 1592 * Special Considerations: 1593 * note 1594 */ 1595 void 1596 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) 1597 { 1598 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; 1599 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; 1600 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; 1601 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; 1602 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; 1603 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1604 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1605 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1606 cfg->fwcfg.num_fwtio_reqs = 0; 1607 1608 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1609 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1610 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; 1611 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; 1612 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; 1613 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; 1614 cfg->drvcfg.ioc_recover = BFA_FALSE; 1615 cfg->drvcfg.delay_comp = BFA_FALSE; 1616 1617 } 1618 1619 void 1620 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) 1621 { 1622 bfa_cfg_get_default(cfg); 1623 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; 1624 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; 1625 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; 1626 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; 1627 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 1628 cfg->fwcfg.num_fwtio_reqs = 0; 1629 1630 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1631 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1632 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; 1633 cfg->drvcfg.min_cfg = BFA_TRUE; 1634 } 1635