1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 /* 19 * bfad.c Linux driver PCI interface module. 20 */ 21 #include <linux/module.h> 22 #include <linux/kthread.h> 23 #include <linux/errno.h> 24 #include <linux/sched.h> 25 #include <linux/init.h> 26 #include <linux/fs.h> 27 #include <linux/pci.h> 28 #include <linux/firmware.h> 29 #include <asm/uaccess.h> 30 #include <asm/fcntl.h> 31 32 #include "bfad_drv.h" 33 #include "bfad_im.h" 34 #include "bfa_fcs.h" 35 #include "bfa_os_inc.h" 36 #include "bfa_defs.h" 37 #include "bfa.h" 38 39 BFA_TRC_FILE(LDRV, BFAD); 40 DEFINE_MUTEX(bfad_mutex); 41 LIST_HEAD(bfad_list); 42 43 static int bfad_inst; 44 static int num_sgpgs_parm; 45 int supported_fc4s; 46 char *host_name, *os_name, *os_patch; 47 int num_rports, num_ios, num_tms; 48 int num_fcxps, num_ufbufs; 49 int reqq_size, rspq_size, num_sgpgs; 50 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 51 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 52 int bfa_io_max_sge = BFAD_IO_MAX_SGE; 53 int bfa_log_level = 3; /* WARNING log level */ 54 int ioc_auto_recover = BFA_TRUE; 55 int bfa_linkup_delay = -1; 56 int fdmi_enable = BFA_TRUE; 57 int pcie_max_read_reqsz; 58 int bfa_debugfs_enable = 1; 59 int msix_disable_cb = 0, msix_disable_ct = 0; 60 61 u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; 62 u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; 63 64 const char *msix_name_ct[] = { 65 "cpe0", "cpe1", "cpe2", "cpe3", 66 "rme0", "rme1", "rme2", "rme3", 67 "ctrl" }; 68 69 const char *msix_name_cb[] = { 70 "cpe0", "cpe1", "cpe2", "cpe3", 71 "rme0", "rme1", "rme2", "rme3", 72 "eemc", "elpu0", "elpu1", "epss", "mlpu" }; 73 74 MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); 75 MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); 76 MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); 77 78 module_param(os_name, charp, S_IRUGO | S_IWUSR); 79 MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); 80 module_param(os_patch, charp, S_IRUGO | S_IWUSR); 81 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); 82 module_param(host_name, charp, S_IRUGO | S_IWUSR); 83 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); 84 module_param(num_rports, int, S_IRUGO | S_IWUSR); 85 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " 86 "(physical/logical), default=1024"); 87 module_param(num_ios, int, S_IRUGO | S_IWUSR); 88 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); 89 module_param(num_tms, int, S_IRUGO | S_IWUSR); 90 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); 91 module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 92 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); 93 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 94 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " 95 "buffers, default=64"); 96 module_param(reqq_size, int, S_IRUGO | S_IWUSR); 97 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " 98 "default=256"); 99 module_param(rspq_size, int, S_IRUGO | S_IWUSR); 100 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " 101 "default=64"); 102 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 103 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); 104 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 105 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " 106 "Range[>0]"); 107 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 108 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); 109 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 110 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); 111 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); 112 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " 113 "Range[Critical:1|Error:2|Warning:3|Info:4]"); 114 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 115 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " 116 "Range[off:0|on:1]"); 117 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 118 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " 119 "boot port. Otherwise 10 secs in RHEL4 & 0 for " 120 "[RHEL5, SLES10, ESX40] Range[>0]"); 121 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); 122 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts " 123 "for Brocade-415/425/815/825 cards, default=0, " 124 " Range[false:0|true:1]"); 125 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); 126 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts " 127 "if possible for Brocade-1010/1020/804/1007/902/1741 " 128 "cards, default=0, Range[false:0|true:1]"); 129 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); 130 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " 131 "Range[false:0|true:1]"); 132 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); 133 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " 134 "(use system setting), Range[128|256|512|1024|2048|4096]"); 135 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); 136 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," 137 " Range[false:0|true:1]"); 138 139 static void 140 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); 141 static void 142 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); 143 static void 144 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); 145 static void 146 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); 147 static void 148 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); 149 static void 150 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); 151 static void 152 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 153 154 /* 155 * Beginning state for the driver instance, awaiting the pci_probe event 156 */ 157 static void 158 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) 159 { 160 bfa_trc(bfad, event); 161 162 switch (event) { 163 case BFAD_E_CREATE: 164 bfa_sm_set_state(bfad, bfad_sm_created); 165 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, 166 "%s", "bfad_worker"); 167 if (IS_ERR(bfad->bfad_tsk)) { 168 printk(KERN_INFO "bfad[%d]: Kernel thread " 169 "creation failed!\n", bfad->inst_no); 170 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); 171 } 172 bfa_sm_send_event(bfad, BFAD_E_INIT); 173 break; 174 175 case BFAD_E_STOP: 176 /* Ignore stop; already in uninit */ 177 break; 178 179 default: 180 bfa_sm_fault(bfad, event); 181 } 182 } 183 184 /* 185 * Driver Instance is created, awaiting event INIT to initialize the bfad 186 */ 187 static void 188 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) 189 { 190 unsigned long flags; 191 192 bfa_trc(bfad, event); 193 194 switch (event) { 195 case BFAD_E_INIT: 196 bfa_sm_set_state(bfad, bfad_sm_initializing); 197 198 init_completion(&bfad->comp); 199 200 /* Enable Interrupt and wait bfa_init completion */ 201 if (bfad_setup_intr(bfad)) { 202 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", 203 bfad->inst_no); 204 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); 205 break; 206 } 207 208 spin_lock_irqsave(&bfad->bfad_lock, flags); 209 bfa_init(&bfad->bfa); 210 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 211 212 /* Set up interrupt handler for each vectors */ 213 if ((bfad->bfad_flags & BFAD_MSIX_ON) && 214 bfad_install_msix_handler(bfad)) { 215 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 216 __func__, bfad->inst_no); 217 } 218 219 bfad_init_timer(bfad); 220 221 wait_for_completion(&bfad->comp); 222 223 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 224 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 225 } else { 226 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; 227 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); 228 } 229 230 break; 231 232 case BFAD_E_KTHREAD_CREATE_FAILED: 233 bfa_sm_set_state(bfad, bfad_sm_uninit); 234 break; 235 236 default: 237 bfa_sm_fault(bfad, event); 238 } 239 } 240 241 static void 242 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) 243 { 244 int retval; 245 unsigned long flags; 246 247 bfa_trc(bfad, event); 248 249 switch (event) { 250 case BFAD_E_INIT_SUCCESS: 251 kthread_stop(bfad->bfad_tsk); 252 spin_lock_irqsave(&bfad->bfad_lock, flags); 253 bfad->bfad_tsk = NULL; 254 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 255 256 retval = bfad_start_ops(bfad); 257 if (retval != BFA_STATUS_OK) 258 break; 259 bfa_sm_set_state(bfad, bfad_sm_operational); 260 break; 261 262 case BFAD_E_INTR_INIT_FAILED: 263 bfa_sm_set_state(bfad, bfad_sm_uninit); 264 kthread_stop(bfad->bfad_tsk); 265 spin_lock_irqsave(&bfad->bfad_lock, flags); 266 bfad->bfad_tsk = NULL; 267 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 268 break; 269 270 case BFAD_E_INIT_FAILED: 271 bfa_sm_set_state(bfad, bfad_sm_failed); 272 break; 273 default: 274 bfa_sm_fault(bfad, event); 275 } 276 } 277 278 static void 279 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) 280 { 281 int retval; 282 283 bfa_trc(bfad, event); 284 285 switch (event) { 286 case BFAD_E_INIT_SUCCESS: 287 retval = bfad_start_ops(bfad); 288 if (retval != BFA_STATUS_OK) 289 break; 290 bfa_sm_set_state(bfad, bfad_sm_operational); 291 break; 292 293 case BFAD_E_STOP: 294 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 295 bfad_uncfg_pport(bfad); 296 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { 297 bfad_im_probe_undo(bfad); 298 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 299 } 300 bfad_stop(bfad); 301 break; 302 303 case BFAD_E_EXIT_COMP: 304 bfa_sm_set_state(bfad, bfad_sm_uninit); 305 bfad_remove_intr(bfad); 306 del_timer_sync(&bfad->hal_tmo); 307 break; 308 309 default: 310 bfa_sm_fault(bfad, event); 311 } 312 } 313 314 static void 315 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) 316 { 317 bfa_trc(bfad, event); 318 319 switch (event) { 320 case BFAD_E_STOP: 321 bfa_sm_set_state(bfad, bfad_sm_fcs_exit); 322 bfad_fcs_stop(bfad); 323 break; 324 325 default: 326 bfa_sm_fault(bfad, event); 327 } 328 } 329 330 static void 331 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) 332 { 333 bfa_trc(bfad, event); 334 335 switch (event) { 336 case BFAD_E_FCS_EXIT_COMP: 337 bfa_sm_set_state(bfad, bfad_sm_stopping); 338 bfad_stop(bfad); 339 break; 340 341 default: 342 bfa_sm_fault(bfad, event); 343 } 344 } 345 346 static void 347 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) 348 { 349 bfa_trc(bfad, event); 350 351 switch (event) { 352 case BFAD_E_EXIT_COMP: 353 bfa_sm_set_state(bfad, bfad_sm_uninit); 354 bfad_remove_intr(bfad); 355 del_timer_sync(&bfad->hal_tmo); 356 bfad_im_probe_undo(bfad); 357 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 358 bfad_uncfg_pport(bfad); 359 break; 360 361 default: 362 bfa_sm_fault(bfad, event); 363 break; 364 } 365 } 366 367 /* 368 * BFA callbacks 369 */ 370 void 371 bfad_hcb_comp(void *arg, bfa_status_t status) 372 { 373 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; 374 375 fcomp->status = status; 376 complete(&fcomp->comp); 377 } 378 379 /* 380 * bfa_init callback 381 */ 382 void 383 bfa_cb_init(void *drv, bfa_status_t init_status) 384 { 385 struct bfad_s *bfad = drv; 386 387 if (init_status == BFA_STATUS_OK) { 388 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 389 390 /* 391 * If BFAD_HAL_INIT_FAIL flag is set: 392 * Wake up the kernel thread to start 393 * the bfad operations after HAL init done 394 */ 395 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { 396 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; 397 wake_up_process(bfad->bfad_tsk); 398 } 399 } 400 401 complete(&bfad->comp); 402 } 403 404 /* 405 * BFA_FCS callbacks 406 */ 407 struct bfad_port_s * 408 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, 409 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, 410 struct bfad_vport_s *vp_drv) 411 { 412 bfa_status_t rc; 413 struct bfad_port_s *port_drv; 414 415 if (!vp_drv && !vf_drv) { 416 port_drv = &bfad->pport; 417 port_drv->pvb_type = BFAD_PORT_PHYS_BASE; 418 } else if (!vp_drv && vf_drv) { 419 port_drv = &vf_drv->base_port; 420 port_drv->pvb_type = BFAD_PORT_VF_BASE; 421 } else if (vp_drv && !vf_drv) { 422 port_drv = &vp_drv->drv_port; 423 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; 424 } else { 425 port_drv = &vp_drv->drv_port; 426 port_drv->pvb_type = BFAD_PORT_VF_VPORT; 427 } 428 429 port_drv->fcs_port = port; 430 port_drv->roles = roles; 431 432 if (roles & BFA_LPORT_ROLE_FCP_IM) { 433 rc = bfad_im_port_new(bfad, port_drv); 434 if (rc != BFA_STATUS_OK) { 435 bfad_im_port_delete(bfad, port_drv); 436 port_drv = NULL; 437 } 438 } 439 440 return port_drv; 441 } 442 443 void 444 bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, 445 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 446 { 447 struct bfad_port_s *port_drv; 448 449 /* this will be only called from rmmod context */ 450 if (vp_drv && !vp_drv->comp_del) { 451 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) : 452 ((vf_drv) ? (&(vf_drv)->base_port) : 453 (&(bfad)->pport)); 454 bfa_trc(bfad, roles); 455 if (roles & BFA_LPORT_ROLE_FCP_IM) 456 bfad_im_port_delete(bfad, port_drv); 457 } 458 } 459 460 /* 461 * FCS RPORT alloc callback, after successful PLOGI by FCS 462 */ 463 bfa_status_t 464 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 465 struct bfad_rport_s **rport_drv) 466 { 467 bfa_status_t rc = BFA_STATUS_OK; 468 469 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 470 if (*rport_drv == NULL) { 471 rc = BFA_STATUS_ENOMEM; 472 goto ext; 473 } 474 475 *rport = &(*rport_drv)->fcs_rport; 476 477 ext: 478 return rc; 479 } 480 481 /* 482 * FCS PBC VPORT Create 483 */ 484 void 485 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) 486 { 487 488 struct bfa_lport_cfg_s port_cfg = {0}; 489 struct bfad_vport_s *vport; 490 int rc; 491 492 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 493 if (!vport) { 494 bfa_trc(bfad, 0); 495 return; 496 } 497 498 vport->drv_port.bfad = bfad; 499 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; 500 port_cfg.pwwn = pbc_vport.vp_pwwn; 501 port_cfg.nwwn = pbc_vport.vp_nwwn; 502 port_cfg.preboot_vp = BFA_TRUE; 503 504 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, 505 &port_cfg, vport); 506 507 if (rc != BFA_STATUS_OK) { 508 bfa_trc(bfad, 0); 509 return; 510 } 511 512 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); 513 } 514 515 void 516 bfad_hal_mem_release(struct bfad_s *bfad) 517 { 518 int i; 519 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 520 struct bfa_mem_elem_s *meminfo_elem; 521 522 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 523 meminfo_elem = &hal_meminfo->meminfo[i]; 524 if (meminfo_elem->kva != NULL) { 525 switch (meminfo_elem->mem_type) { 526 case BFA_MEM_TYPE_KVA: 527 vfree(meminfo_elem->kva); 528 break; 529 case BFA_MEM_TYPE_DMA: 530 dma_free_coherent(&bfad->pcidev->dev, 531 meminfo_elem->mem_len, 532 meminfo_elem->kva, 533 (dma_addr_t) meminfo_elem->dma); 534 break; 535 default: 536 bfa_assert(0); 537 break; 538 } 539 } 540 } 541 542 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 543 } 544 545 void 546 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) 547 { 548 if (num_rports > 0) 549 bfa_cfg->fwcfg.num_rports = num_rports; 550 if (num_ios > 0) 551 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 552 if (num_tms > 0) 553 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 554 if (num_fcxps > 0) 555 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 556 if (num_ufbufs > 0) 557 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 558 if (reqq_size > 0) 559 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 560 if (rspq_size > 0) 561 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 562 if (num_sgpgs > 0) 563 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 564 565 /* 566 * populate the hal values back to the driver for sysfs use. 567 * otherwise, the default values will be shown as 0 in sysfs 568 */ 569 num_rports = bfa_cfg->fwcfg.num_rports; 570 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 571 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 572 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 573 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 574 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 575 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 576 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 577 } 578 579 bfa_status_t 580 bfad_hal_mem_alloc(struct bfad_s *bfad) 581 { 582 int i; 583 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 584 struct bfa_mem_elem_s *meminfo_elem; 585 dma_addr_t phys_addr; 586 void *kva; 587 bfa_status_t rc = BFA_STATUS_OK; 588 int retry_count = 0; 589 int reset_value = 1; 590 int min_num_sgpgs = 512; 591 592 bfa_cfg_get_default(&bfad->ioc_cfg); 593 594 retry: 595 bfad_update_hal_cfg(&bfad->ioc_cfg); 596 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 597 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); 598 599 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 600 meminfo_elem = &hal_meminfo->meminfo[i]; 601 switch (meminfo_elem->mem_type) { 602 case BFA_MEM_TYPE_KVA: 603 kva = vmalloc(meminfo_elem->mem_len); 604 if (kva == NULL) { 605 bfad_hal_mem_release(bfad); 606 rc = BFA_STATUS_ENOMEM; 607 goto ext; 608 } 609 memset(kva, 0, meminfo_elem->mem_len); 610 meminfo_elem->kva = kva; 611 break; 612 case BFA_MEM_TYPE_DMA: 613 kva = dma_alloc_coherent(&bfad->pcidev->dev, 614 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL); 615 if (kva == NULL) { 616 bfad_hal_mem_release(bfad); 617 /* 618 * If we cannot allocate with default 619 * num_sgpages try with half the value. 620 */ 621 if (num_sgpgs > min_num_sgpgs) { 622 printk(KERN_INFO 623 "bfad[%d]: memory allocation failed" 624 " with num_sgpgs: %d\n", 625 bfad->inst_no, num_sgpgs); 626 nextLowerInt(&num_sgpgs); 627 printk(KERN_INFO 628 "bfad[%d]: trying to allocate memory" 629 " with num_sgpgs: %d\n", 630 bfad->inst_no, num_sgpgs); 631 retry_count++; 632 goto retry; 633 } else { 634 if (num_sgpgs_parm > 0) 635 num_sgpgs = num_sgpgs_parm; 636 else { 637 reset_value = 638 (1 << retry_count); 639 num_sgpgs *= reset_value; 640 } 641 rc = BFA_STATUS_ENOMEM; 642 goto ext; 643 } 644 } 645 646 if (num_sgpgs_parm > 0) 647 num_sgpgs = num_sgpgs_parm; 648 else { 649 reset_value = (1 << retry_count); 650 num_sgpgs *= reset_value; 651 } 652 653 memset(kva, 0, meminfo_elem->mem_len); 654 meminfo_elem->kva = kva; 655 meminfo_elem->dma = phys_addr; 656 break; 657 default: 658 break; 659 660 } 661 } 662 ext: 663 return rc; 664 } 665 666 /* 667 * Create a vport under a vf. 668 */ 669 bfa_status_t 670 bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 671 struct bfa_lport_cfg_s *port_cfg, struct device *dev) 672 { 673 struct bfad_vport_s *vport; 674 int rc = BFA_STATUS_OK; 675 unsigned long flags; 676 struct completion fcomp; 677 678 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 679 if (!vport) { 680 rc = BFA_STATUS_ENOMEM; 681 goto ext; 682 } 683 684 vport->drv_port.bfad = bfad; 685 spin_lock_irqsave(&bfad->bfad_lock, flags); 686 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 687 port_cfg, vport); 688 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 689 690 if (rc != BFA_STATUS_OK) 691 goto ext_free_vport; 692 693 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { 694 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, 695 dev); 696 if (rc != BFA_STATUS_OK) 697 goto ext_free_fcs_vport; 698 } 699 700 spin_lock_irqsave(&bfad->bfad_lock, flags); 701 bfa_fcs_vport_start(&vport->fcs_vport); 702 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 703 704 return BFA_STATUS_OK; 705 706 ext_free_fcs_vport: 707 spin_lock_irqsave(&bfad->bfad_lock, flags); 708 vport->comp_del = &fcomp; 709 init_completion(vport->comp_del); 710 bfa_fcs_vport_delete(&vport->fcs_vport); 711 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 712 wait_for_completion(vport->comp_del); 713 ext_free_vport: 714 kfree(vport); 715 ext: 716 return rc; 717 } 718 719 void 720 bfad_bfa_tmo(unsigned long data) 721 { 722 struct bfad_s *bfad = (struct bfad_s *) data; 723 unsigned long flags; 724 struct list_head doneq; 725 726 spin_lock_irqsave(&bfad->bfad_lock, flags); 727 728 bfa_timer_tick(&bfad->bfa); 729 730 bfa_comp_deq(&bfad->bfa, &doneq); 731 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 732 733 if (!list_empty(&doneq)) { 734 bfa_comp_process(&bfad->bfa, &doneq); 735 spin_lock_irqsave(&bfad->bfad_lock, flags); 736 bfa_comp_free(&bfad->bfa, &doneq); 737 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 738 } 739 740 mod_timer(&bfad->hal_tmo, 741 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 742 } 743 744 void 745 bfad_init_timer(struct bfad_s *bfad) 746 { 747 init_timer(&bfad->hal_tmo); 748 bfad->hal_tmo.function = bfad_bfa_tmo; 749 bfad->hal_tmo.data = (unsigned long)bfad; 750 751 mod_timer(&bfad->hal_tmo, 752 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 753 } 754 755 int 756 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 757 { 758 int rc = -ENODEV; 759 760 if (pci_enable_device(pdev)) { 761 printk(KERN_ERR "pci_enable_device fail %p\n", pdev); 762 goto out; 763 } 764 765 if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) 766 goto out_disable_device; 767 768 pci_set_master(pdev); 769 770 771 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 772 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 773 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); 774 goto out_release_region; 775 } 776 777 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 778 779 if (bfad->pci_bar0_kva == NULL) { 780 printk(KERN_ERR "Fail to map bar0\n"); 781 goto out_release_region; 782 } 783 784 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); 785 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 786 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 787 bfad->hal_pcidev.device_id = pdev->device; 788 bfad->pci_name = pci_name(pdev); 789 790 bfad->pci_attr.vendor_id = pdev->vendor; 791 bfad->pci_attr.device_id = pdev->device; 792 bfad->pci_attr.ssid = pdev->subsystem_device; 793 bfad->pci_attr.ssvid = pdev->subsystem_vendor; 794 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 795 796 bfad->pcidev = pdev; 797 798 /* Adjust PCIe Maximum Read Request Size */ 799 if (pcie_max_read_reqsz > 0) { 800 int pcie_cap_reg; 801 u16 pcie_dev_ctl; 802 u16 mask = 0xffff; 803 804 switch (pcie_max_read_reqsz) { 805 case 128: 806 mask = 0x0; 807 break; 808 case 256: 809 mask = 0x1000; 810 break; 811 case 512: 812 mask = 0x2000; 813 break; 814 case 1024: 815 mask = 0x3000; 816 break; 817 case 2048: 818 mask = 0x4000; 819 break; 820 case 4096: 821 mask = 0x5000; 822 break; 823 default: 824 break; 825 } 826 827 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); 828 if (mask != 0xffff && pcie_cap_reg) { 829 pcie_cap_reg += 0x08; 830 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); 831 if ((pcie_dev_ctl & 0x7000) != mask) { 832 printk(KERN_WARNING "BFA[%s]: " 833 "pcie_max_read_request_size is %d, " 834 "reset to %d\n", bfad->pci_name, 835 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, 836 pcie_max_read_reqsz); 837 838 pcie_dev_ctl &= ~0x7000; 839 pci_write_config_word(pdev, pcie_cap_reg, 840 pcie_dev_ctl | mask); 841 } 842 } 843 } 844 845 return 0; 846 847 out_release_region: 848 pci_release_regions(pdev); 849 out_disable_device: 850 pci_disable_device(pdev); 851 out: 852 return rc; 853 } 854 855 void 856 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 857 { 858 pci_iounmap(pdev, bfad->pci_bar0_kva); 859 pci_release_regions(pdev); 860 pci_disable_device(pdev); 861 pci_set_drvdata(pdev, NULL); 862 } 863 864 bfa_status_t 865 bfad_drv_init(struct bfad_s *bfad) 866 { 867 bfa_status_t rc; 868 unsigned long flags; 869 870 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 871 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 872 bfad->cfg_data.io_max_sge = bfa_io_max_sge; 873 bfad->cfg_data.binding_method = FCP_PWWN_BINDING; 874 875 rc = bfad_hal_mem_alloc(bfad); 876 if (rc != BFA_STATUS_OK) { 877 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 878 bfad->inst_no); 879 printk(KERN_WARNING 880 "Not enough memory to attach all Brocade HBA ports, %s", 881 "System may need more memory.\n"); 882 goto out_hal_mem_alloc_failure; 883 } 884 885 bfa_init_trc(&bfad->bfa, bfad->trcmod); 886 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 887 bfa_plog_init(&bfad->plog_buf); 888 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 889 0, "Driver Attach"); 890 891 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 892 &bfad->hal_pcidev); 893 894 /* FCS INIT */ 895 spin_lock_irqsave(&bfad->bfad_lock, flags); 896 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 897 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 898 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); 899 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 900 901 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 902 903 return BFA_STATUS_OK; 904 905 out_hal_mem_alloc_failure: 906 return BFA_STATUS_FAILED; 907 } 908 909 void 910 bfad_drv_uninit(struct bfad_s *bfad) 911 { 912 unsigned long flags; 913 914 spin_lock_irqsave(&bfad->bfad_lock, flags); 915 init_completion(&bfad->comp); 916 bfa_stop(&bfad->bfa); 917 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 918 wait_for_completion(&bfad->comp); 919 920 del_timer_sync(&bfad->hal_tmo); 921 bfa_isr_disable(&bfad->bfa); 922 bfa_detach(&bfad->bfa); 923 bfad_remove_intr(bfad); 924 bfad_hal_mem_release(bfad); 925 926 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; 927 } 928 929 void 930 bfad_drv_start(struct bfad_s *bfad) 931 { 932 unsigned long flags; 933 934 spin_lock_irqsave(&bfad->bfad_lock, flags); 935 bfa_start(&bfad->bfa); 936 bfa_fcs_start(&bfad->bfa_fcs); 937 bfad->bfad_flags |= BFAD_HAL_START_DONE; 938 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 939 940 if (bfad->im) 941 flush_workqueue(bfad->im->drv_workq); 942 } 943 944 void 945 bfad_fcs_stop(struct bfad_s *bfad) 946 { 947 unsigned long flags; 948 949 spin_lock_irqsave(&bfad->bfad_lock, flags); 950 init_completion(&bfad->comp); 951 bfad->pport.flags |= BFAD_PORT_DELETE; 952 bfa_fcs_exit(&bfad->bfa_fcs); 953 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 954 wait_for_completion(&bfad->comp); 955 956 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); 957 } 958 959 void 960 bfad_stop(struct bfad_s *bfad) 961 { 962 unsigned long flags; 963 964 spin_lock_irqsave(&bfad->bfad_lock, flags); 965 init_completion(&bfad->comp); 966 bfa_stop(&bfad->bfa); 967 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 968 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 969 wait_for_completion(&bfad->comp); 970 971 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); 972 } 973 974 bfa_status_t 975 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) 976 { 977 int rc = BFA_STATUS_OK; 978 979 /* Allocate scsi_host for the physical port */ 980 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 981 (role & BFA_LPORT_ROLE_FCP_IM)) { 982 if (bfad->pport.im_port == NULL) { 983 rc = BFA_STATUS_FAILED; 984 goto out; 985 } 986 987 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, 988 &bfad->pcidev->dev); 989 if (rc != BFA_STATUS_OK) 990 goto out; 991 992 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; 993 } 994 995 /* Setup the debugfs node for this scsi_host */ 996 if (bfa_debugfs_enable) 997 bfad_debugfs_init(&bfad->pport); 998 999 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 1000 1001 out: 1002 return rc; 1003 } 1004 1005 void 1006 bfad_uncfg_pport(struct bfad_s *bfad) 1007 { 1008 /* Remove the debugfs node for this scsi_host */ 1009 kfree(bfad->regdata); 1010 bfad_debugfs_exit(&bfad->pport); 1011 1012 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && 1013 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { 1014 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 1015 bfad_im_port_clean(bfad->pport.im_port); 1016 kfree(bfad->pport.im_port); 1017 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; 1018 } 1019 1020 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 1021 } 1022 1023 bfa_status_t 1024 bfad_start_ops(struct bfad_s *bfad) { 1025 1026 int retval; 1027 unsigned long flags; 1028 struct bfad_vport_s *vport, *vport_new; 1029 struct bfa_fcs_driver_info_s driver_info; 1030 1031 /* Fill the driver_info info to fcs*/ 1032 memset(&driver_info, 0, sizeof(driver_info)); 1033 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 1034 sizeof(driver_info.version) - 1); 1035 if (host_name) 1036 strncpy(driver_info.host_machine_name, host_name, 1037 sizeof(driver_info.host_machine_name) - 1); 1038 if (os_name) 1039 strncpy(driver_info.host_os_name, os_name, 1040 sizeof(driver_info.host_os_name) - 1); 1041 if (os_patch) 1042 strncpy(driver_info.host_os_patch, os_patch, 1043 sizeof(driver_info.host_os_patch) - 1); 1044 1045 strncpy(driver_info.os_device_name, bfad->pci_name, 1046 sizeof(driver_info.os_device_name - 1)); 1047 1048 /* FCS INIT */ 1049 spin_lock_irqsave(&bfad->bfad_lock, flags); 1050 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 1051 bfa_fcs_init(&bfad->bfa_fcs); 1052 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1053 1054 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 1055 if (retval != BFA_STATUS_OK) { 1056 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1057 bfa_sm_set_state(bfad, bfad_sm_failed); 1058 bfad_stop(bfad); 1059 return BFA_STATUS_FAILED; 1060 } 1061 1062 /* BFAD level FC4 IM specific resource allocation */ 1063 retval = bfad_im_probe(bfad); 1064 if (retval != BFA_STATUS_OK) { 1065 printk(KERN_WARNING "bfad_im_probe failed\n"); 1066 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1067 bfa_sm_set_state(bfad, bfad_sm_failed); 1068 bfad_im_probe_undo(bfad); 1069 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 1070 bfad_uncfg_pport(bfad); 1071 bfad_stop(bfad); 1072 return BFA_STATUS_FAILED; 1073 } else 1074 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; 1075 1076 bfad_drv_start(bfad); 1077 1078 /* Complete pbc vport create */ 1079 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, 1080 list_entry) { 1081 struct fc_vport_identifiers vid; 1082 struct fc_vport *fc_vport; 1083 char pwwn_buf[BFA_STRING_32]; 1084 1085 memset(&vid, 0, sizeof(vid)); 1086 vid.roles = FC_PORT_ROLE_FCP_INITIATOR; 1087 vid.vport_type = FC_PORTTYPE_NPIV; 1088 vid.disable = false; 1089 vid.node_name = wwn_to_u64((u8 *) 1090 (&((vport->fcs_vport).lport.port_cfg.nwwn))); 1091 vid.port_name = wwn_to_u64((u8 *) 1092 (&((vport->fcs_vport).lport.port_cfg.pwwn))); 1093 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); 1094 if (!fc_vport) { 1095 wwn2str(pwwn_buf, vid.port_name); 1096 printk(KERN_WARNING "bfad%d: failed to create pbc vport" 1097 " %s\n", bfad->inst_no, pwwn_buf); 1098 } 1099 list_del(&vport->list_entry); 1100 kfree(vport); 1101 } 1102 1103 /* 1104 * If bfa_linkup_delay is set to -1 default; try to retrive the 1105 * value using the bfad_os_get_linkup_delay(); else use the 1106 * passed in module param value as the bfa_linkup_delay. 1107 */ 1108 if (bfa_linkup_delay < 0) { 1109 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 1110 bfad_os_rport_online_wait(bfad); 1111 bfa_linkup_delay = -1; 1112 } else 1113 bfad_os_rport_online_wait(bfad); 1114 1115 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); 1116 1117 return BFA_STATUS_OK; 1118 } 1119 1120 int 1121 bfad_worker(void *ptr) 1122 { 1123 struct bfad_s *bfad; 1124 unsigned long flags; 1125 1126 bfad = (struct bfad_s *)ptr; 1127 1128 while (!kthread_should_stop()) { 1129 1130 /* Send event BFAD_E_INIT_SUCCESS */ 1131 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); 1132 1133 spin_lock_irqsave(&bfad->bfad_lock, flags); 1134 bfad->bfad_tsk = NULL; 1135 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1136 1137 break; 1138 } 1139 1140 return 0; 1141 } 1142 1143 /* 1144 * BFA driver interrupt functions 1145 */ 1146 irqreturn_t 1147 bfad_intx(int irq, void *dev_id) 1148 { 1149 struct bfad_s *bfad = dev_id; 1150 struct list_head doneq; 1151 unsigned long flags; 1152 bfa_boolean_t rc; 1153 1154 spin_lock_irqsave(&bfad->bfad_lock, flags); 1155 rc = bfa_intx(&bfad->bfa); 1156 if (!rc) { 1157 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1158 return IRQ_NONE; 1159 } 1160 1161 bfa_comp_deq(&bfad->bfa, &doneq); 1162 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1163 1164 if (!list_empty(&doneq)) { 1165 bfa_comp_process(&bfad->bfa, &doneq); 1166 1167 spin_lock_irqsave(&bfad->bfad_lock, flags); 1168 bfa_comp_free(&bfad->bfa, &doneq); 1169 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1170 bfa_trc_fp(bfad, irq); 1171 } 1172 1173 return IRQ_HANDLED; 1174 1175 } 1176 1177 static irqreturn_t 1178 bfad_msix(int irq, void *dev_id) 1179 { 1180 struct bfad_msix_s *vec = dev_id; 1181 struct bfad_s *bfad = vec->bfad; 1182 struct list_head doneq; 1183 unsigned long flags; 1184 1185 spin_lock_irqsave(&bfad->bfad_lock, flags); 1186 1187 bfa_msix(&bfad->bfa, vec->msix.entry); 1188 bfa_comp_deq(&bfad->bfa, &doneq); 1189 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1190 1191 if (!list_empty(&doneq)) { 1192 bfa_comp_process(&bfad->bfa, &doneq); 1193 1194 spin_lock_irqsave(&bfad->bfad_lock, flags); 1195 bfa_comp_free(&bfad->bfa, &doneq); 1196 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1197 } 1198 1199 return IRQ_HANDLED; 1200 } 1201 1202 /* 1203 * Initialize the MSIX entry table. 1204 */ 1205 static void 1206 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, 1207 int mask, int max_bit) 1208 { 1209 int i; 1210 int match = 0x00000001; 1211 1212 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { 1213 if (mask & match) { 1214 bfad->msix_tab[bfad->nvec].msix.entry = i; 1215 bfad->msix_tab[bfad->nvec].bfad = bfad; 1216 msix_entries[bfad->nvec].entry = i; 1217 bfad->nvec++; 1218 } 1219 1220 match <<= 1; 1221 } 1222 1223 } 1224 1225 int 1226 bfad_install_msix_handler(struct bfad_s *bfad) 1227 { 1228 int i, error = 0; 1229 1230 for (i = 0; i < bfad->nvec; i++) { 1231 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", 1232 bfad->pci_name, 1233 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? 1234 msix_name_ct[i] : msix_name_cb[i])); 1235 1236 error = request_irq(bfad->msix_tab[i].msix.vector, 1237 (irq_handler_t) bfad_msix, 0, 1238 bfad->msix_tab[i].name, &bfad->msix_tab[i]); 1239 bfa_trc(bfad, i); 1240 bfa_trc(bfad, bfad->msix_tab[i].msix.vector); 1241 if (error) { 1242 int j; 1243 1244 for (j = 0; j < i; j++) 1245 free_irq(bfad->msix_tab[j].msix.vector, 1246 &bfad->msix_tab[j]); 1247 1248 return 1; 1249 } 1250 } 1251 1252 return 0; 1253 } 1254 1255 /* 1256 * Setup MSIX based interrupt. 1257 */ 1258 int 1259 bfad_setup_intr(struct bfad_s *bfad) 1260 { 1261 int error = 0; 1262 u32 mask = 0, i, num_bit = 0, max_bit = 0; 1263 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 1264 struct pci_dev *pdev = bfad->pcidev; 1265 1266 /* Call BFA to get the msix map for this PCI function. */ 1267 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); 1268 1269 /* Set up the msix entry table */ 1270 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 1271 1272 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || 1273 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { 1274 1275 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 1276 if (error) { 1277 /* 1278 * Only error number of vector is available. 1279 * We don't have a mechanism to map multiple 1280 * interrupts into one vector, so even if we 1281 * can try to request less vectors, we don't 1282 * know how to associate interrupt events to 1283 * vectors. Linux doesn't dupicate vectors 1284 * in the MSIX table for this case. 1285 */ 1286 1287 printk(KERN_WARNING "bfad%d: " 1288 "pci_enable_msix failed (%d)," 1289 " use line based.\n", bfad->inst_no, error); 1290 1291 goto line_based; 1292 } 1293 1294 /* Save the vectors */ 1295 for (i = 0; i < bfad->nvec; i++) { 1296 bfa_trc(bfad, msix_entries[i].vector); 1297 bfad->msix_tab[i].msix.vector = msix_entries[i].vector; 1298 } 1299 1300 bfa_msix_init(&bfad->bfa, bfad->nvec); 1301 1302 bfad->bfad_flags |= BFAD_MSIX_ON; 1303 1304 return error; 1305 } 1306 1307 line_based: 1308 error = 0; 1309 if (request_irq 1310 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, 1311 BFAD_DRIVER_NAME, bfad) != 0) { 1312 /* Enable interrupt handler failed */ 1313 return 1; 1314 } 1315 1316 return error; 1317 } 1318 1319 void 1320 bfad_remove_intr(struct bfad_s *bfad) 1321 { 1322 int i; 1323 1324 if (bfad->bfad_flags & BFAD_MSIX_ON) { 1325 for (i = 0; i < bfad->nvec; i++) 1326 free_irq(bfad->msix_tab[i].msix.vector, 1327 &bfad->msix_tab[i]); 1328 1329 pci_disable_msix(bfad->pcidev); 1330 bfad->bfad_flags &= ~BFAD_MSIX_ON; 1331 } else { 1332 free_irq(bfad->pcidev->irq, bfad); 1333 } 1334 } 1335 1336 /* 1337 * PCI probe entry. 1338 */ 1339 int 1340 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1341 { 1342 struct bfad_s *bfad; 1343 int error = -ENODEV, retval; 1344 1345 /* For single port cards - only claim function 0 */ 1346 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && 1347 (PCI_FUNC(pdev->devfn) != 0)) 1348 return -ENODEV; 1349 1350 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 1351 if (!bfad) { 1352 error = -ENOMEM; 1353 goto out; 1354 } 1355 1356 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); 1357 if (!bfad->trcmod) { 1358 printk(KERN_WARNING "Error alloc trace buffer!\n"); 1359 error = -ENOMEM; 1360 goto out_alloc_trace_failure; 1361 } 1362 1363 /* TRACE INIT */ 1364 bfa_trc_init(bfad->trcmod); 1365 bfa_trc(bfad, bfad_inst); 1366 1367 if (!(bfad_load_fwimg(pdev))) { 1368 kfree(bfad->trcmod); 1369 goto out_alloc_trace_failure; 1370 } 1371 1372 retval = bfad_pci_init(pdev, bfad); 1373 if (retval) { 1374 printk(KERN_WARNING "bfad_pci_init failure!\n"); 1375 error = retval; 1376 goto out_pci_init_failure; 1377 } 1378 1379 mutex_lock(&bfad_mutex); 1380 bfad->inst_no = bfad_inst++; 1381 list_add_tail(&bfad->list_entry, &bfad_list); 1382 mutex_unlock(&bfad_mutex); 1383 1384 /* Initializing the state machine: State set to uninit */ 1385 bfa_sm_set_state(bfad, bfad_sm_uninit); 1386 1387 spin_lock_init(&bfad->bfad_lock); 1388 pci_set_drvdata(pdev, bfad); 1389 1390 bfad->ref_count = 0; 1391 bfad->pport.bfad = bfad; 1392 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1393 1394 retval = bfad_drv_init(bfad); 1395 if (retval != BFA_STATUS_OK) 1396 goto out_drv_init_failure; 1397 1398 bfa_sm_send_event(bfad, BFAD_E_CREATE); 1399 1400 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) 1401 goto out_bfad_sm_failure; 1402 1403 return 0; 1404 1405 out_bfad_sm_failure: 1406 bfa_detach(&bfad->bfa); 1407 bfad_hal_mem_release(bfad); 1408 out_drv_init_failure: 1409 mutex_lock(&bfad_mutex); 1410 bfad_inst--; 1411 list_del(&bfad->list_entry); 1412 mutex_unlock(&bfad_mutex); 1413 bfad_pci_uninit(pdev, bfad); 1414 out_pci_init_failure: 1415 kfree(bfad->trcmod); 1416 out_alloc_trace_failure: 1417 kfree(bfad); 1418 out: 1419 return error; 1420 } 1421 1422 /* 1423 * PCI remove entry. 1424 */ 1425 void 1426 bfad_pci_remove(struct pci_dev *pdev) 1427 { 1428 struct bfad_s *bfad = pci_get_drvdata(pdev); 1429 unsigned long flags; 1430 1431 bfa_trc(bfad, bfad->inst_no); 1432 1433 spin_lock_irqsave(&bfad->bfad_lock, flags); 1434 if (bfad->bfad_tsk != NULL) { 1435 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1436 kthread_stop(bfad->bfad_tsk); 1437 } else { 1438 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1439 } 1440 1441 /* Send Event BFAD_E_STOP */ 1442 bfa_sm_send_event(bfad, BFAD_E_STOP); 1443 1444 /* Driver detach and dealloc mem */ 1445 spin_lock_irqsave(&bfad->bfad_lock, flags); 1446 bfa_detach(&bfad->bfa); 1447 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1448 bfad_hal_mem_release(bfad); 1449 1450 /* Cleaning the BFAD instance */ 1451 mutex_lock(&bfad_mutex); 1452 bfad_inst--; 1453 list_del(&bfad->list_entry); 1454 mutex_unlock(&bfad_mutex); 1455 bfad_pci_uninit(pdev, bfad); 1456 1457 kfree(bfad->trcmod); 1458 kfree(bfad); 1459 } 1460 1461 struct pci_device_id bfad_id_table[] = { 1462 { 1463 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1464 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1465 .subvendor = PCI_ANY_ID, 1466 .subdevice = PCI_ANY_ID, 1467 }, 1468 { 1469 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1470 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1471 .subvendor = PCI_ANY_ID, 1472 .subdevice = PCI_ANY_ID, 1473 }, 1474 { 1475 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1476 .device = BFA_PCI_DEVICE_ID_CT, 1477 .subvendor = PCI_ANY_ID, 1478 .subdevice = PCI_ANY_ID, 1479 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1480 .class_mask = ~0, 1481 }, 1482 { 1483 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1484 .device = BFA_PCI_DEVICE_ID_CT_FC, 1485 .subvendor = PCI_ANY_ID, 1486 .subdevice = PCI_ANY_ID, 1487 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1488 .class_mask = ~0, 1489 }, 1490 1491 {0, 0}, 1492 }; 1493 1494 MODULE_DEVICE_TABLE(pci, bfad_id_table); 1495 1496 static struct pci_driver bfad_pci_driver = { 1497 .name = BFAD_DRIVER_NAME, 1498 .id_table = bfad_id_table, 1499 .probe = bfad_pci_probe, 1500 .remove = __devexit_p(bfad_pci_remove), 1501 }; 1502 1503 /* 1504 * Driver module init. 1505 */ 1506 static int __init 1507 bfad_init(void) 1508 { 1509 int error = 0; 1510 1511 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1512 BFAD_DRIVER_VERSION); 1513 1514 if (num_sgpgs > 0) 1515 num_sgpgs_parm = num_sgpgs; 1516 1517 error = bfad_im_module_init(); 1518 if (error) { 1519 error = -ENOMEM; 1520 printk(KERN_WARNING "bfad_im_module_init failure\n"); 1521 goto ext; 1522 } 1523 1524 if (strcmp(FCPI_NAME, " fcpim") == 0) 1525 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; 1526 1527 bfa_ioc_auto_recover(ioc_auto_recover); 1528 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1529 1530 error = pci_register_driver(&bfad_pci_driver); 1531 if (error) { 1532 printk(KERN_WARNING "pci_register_driver failure\n"); 1533 goto ext; 1534 } 1535 1536 return 0; 1537 1538 ext: 1539 bfad_im_module_exit(); 1540 return error; 1541 } 1542 1543 /* 1544 * Driver module exit. 1545 */ 1546 static void __exit 1547 bfad_exit(void) 1548 { 1549 pci_unregister_driver(&bfad_pci_driver); 1550 bfad_im_module_exit(); 1551 bfad_free_fwimg(); 1552 } 1553 1554 /* Firmware handling */ 1555 u32 * 1556 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 1557 u32 *bfi_image_size, char *fw_name) 1558 { 1559 const struct firmware *fw; 1560 1561 if (request_firmware(&fw, fw_name, &pdev->dev)) { 1562 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); 1563 goto error; 1564 } 1565 1566 *bfi_image = vmalloc(fw->size); 1567 if (NULL == *bfi_image) { 1568 printk(KERN_ALERT "Fail to allocate buffer for fw image " 1569 "size=%x!\n", (u32) fw->size); 1570 goto error; 1571 } 1572 1573 memcpy(*bfi_image, fw->data, fw->size); 1574 *bfi_image_size = fw->size/sizeof(u32); 1575 1576 return *bfi_image; 1577 1578 error: 1579 return NULL; 1580 } 1581 1582 u32 * 1583 bfad_get_firmware_buf(struct pci_dev *pdev) 1584 { 1585 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { 1586 if (bfi_image_ct_fc_size == 0) 1587 bfad_read_firmware(pdev, &bfi_image_ct_fc, 1588 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); 1589 return bfi_image_ct_fc; 1590 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { 1591 if (bfi_image_ct_cna_size == 0) 1592 bfad_read_firmware(pdev, &bfi_image_ct_cna, 1593 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); 1594 return bfi_image_ct_cna; 1595 } else { 1596 if (bfi_image_cb_fc_size == 0) 1597 bfad_read_firmware(pdev, &bfi_image_cb_fc, 1598 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); 1599 return bfi_image_cb_fc; 1600 } 1601 } 1602 1603 module_init(bfad_init); 1604 module_exit(bfad_exit); 1605 MODULE_LICENSE("GPL"); 1606 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1607 MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1608 MODULE_VERSION(BFAD_DRIVER_VERSION); 1609