1 /* 2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 /** 19 * bfad.c Linux driver PCI interface module. 20 */ 21 22 #include <linux/module.h> 23 #include "bfad_drv.h" 24 #include "bfad_im.h" 25 #include "bfad_tm.h" 26 #include "bfad_ipfc.h" 27 #include "bfad_trcmod.h" 28 #include <fcb/bfa_fcb_vf.h> 29 #include <fcb/bfa_fcb_rport.h> 30 #include <fcb/bfa_fcb_port.h> 31 #include <fcb/bfa_fcb.h> 32 33 BFA_TRC_FILE(LDRV, BFAD); 34 static DEFINE_MUTEX(bfad_mutex); 35 LIST_HEAD(bfad_list); 36 static int bfad_inst; 37 int bfad_supported_fc4s; 38 39 static char *host_name; 40 static char *os_name; 41 static char *os_patch; 42 static int num_rports; 43 static int num_ios; 44 static int num_tms; 45 static int num_fcxps; 46 static int num_ufbufs; 47 static int reqq_size; 48 static int rspq_size; 49 static int num_sgpgs; 50 static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; 51 static int bfa_io_max_sge = BFAD_IO_MAX_SGE; 52 static int log_level = BFA_LOG_WARNING; 53 static int ioc_auto_recover = BFA_TRUE; 54 static int ipfc_enable = BFA_FALSE; 55 static int ipfc_mtu = -1; 56 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 57 int bfa_linkup_delay = -1; 58 59 module_param(os_name, charp, S_IRUGO | S_IWUSR); 60 module_param(os_patch, charp, S_IRUGO | S_IWUSR); 61 module_param(host_name, charp, S_IRUGO | S_IWUSR); 62 module_param(num_rports, int, S_IRUGO | S_IWUSR); 63 module_param(num_ios, int, S_IRUGO | S_IWUSR); 64 module_param(num_tms, int, S_IRUGO | S_IWUSR); 65 module_param(num_fcxps, int, S_IRUGO | S_IWUSR); 66 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); 67 module_param(reqq_size, int, S_IRUGO | S_IWUSR); 68 module_param(rspq_size, int, S_IRUGO | S_IWUSR); 69 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); 70 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); 71 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); 72 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); 73 module_param(log_level, int, S_IRUGO | S_IWUSR); 74 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 75 module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 76 module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 77 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 78 79 /* 80 * Stores the module parm num_sgpgs value; 81 * used to reset for bfad next instance. 82 */ 83 static int num_sgpgs_parm; 84 85 static bfa_status_t 86 bfad_fc4_probe(struct bfad_s *bfad) 87 { 88 int rc; 89 90 rc = bfad_im_probe(bfad); 91 if (rc != BFA_STATUS_OK) 92 goto ext; 93 94 bfad_tm_probe(bfad); 95 96 if (ipfc_enable) 97 bfad_ipfc_probe(bfad); 98 ext: 99 return rc; 100 } 101 102 static void 103 bfad_fc4_probe_undo(struct bfad_s *bfad) 104 { 105 bfad_im_probe_undo(bfad); 106 bfad_tm_probe_undo(bfad); 107 if (ipfc_enable) 108 bfad_ipfc_probe_undo(bfad); 109 } 110 111 static void 112 bfad_fc4_probe_post(struct bfad_s *bfad) 113 { 114 if (bfad->im) 115 bfad_im_probe_post(bfad->im); 116 117 bfad_tm_probe_post(bfad); 118 if (ipfc_enable) 119 bfad_ipfc_probe_post(bfad); 120 } 121 122 static bfa_status_t 123 bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 124 { 125 int rc = BFA_STATUS_FAILED; 126 127 if (roles & BFA_PORT_ROLE_FCP_IM) 128 rc = bfad_im_port_new(bfad, port); 129 if (rc != BFA_STATUS_OK) 130 goto ext; 131 132 if (roles & BFA_PORT_ROLE_FCP_TM) 133 rc = bfad_tm_port_new(bfad, port); 134 if (rc != BFA_STATUS_OK) 135 goto ext; 136 137 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 138 rc = bfad_ipfc_port_new(bfad, port, port->pvb_type); 139 ext: 140 return rc; 141 } 142 143 static void 144 bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles) 145 { 146 if (roles & BFA_PORT_ROLE_FCP_IM) 147 bfad_im_port_delete(bfad, port); 148 149 if (roles & BFA_PORT_ROLE_FCP_TM) 150 bfad_tm_port_delete(bfad, port); 151 152 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 153 bfad_ipfc_port_delete(bfad, port); 154 } 155 156 /** 157 * BFA callbacks 158 */ 159 void 160 bfad_hcb_comp(void *arg, bfa_status_t status) 161 { 162 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; 163 164 fcomp->status = status; 165 complete(&fcomp->comp); 166 } 167 168 /** 169 * bfa_init callback 170 */ 171 void 172 bfa_cb_init(void *drv, bfa_status_t init_status) 173 { 174 struct bfad_s *bfad = drv; 175 176 if (init_status == BFA_STATUS_OK) 177 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 178 179 complete(&bfad->comp); 180 } 181 182 183 184 /** 185 * BFA_FCS callbacks 186 */ 187 static struct bfad_port_s * 188 bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, 189 struct bfad_vport_s *vp_drv) 190 { 191 return (vp_drv) ? (&(vp_drv)->drv_port) 192 : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); 193 } 194 195 struct bfad_port_s * 196 bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, 197 enum bfa_port_role roles, struct bfad_vf_s *vf_drv, 198 struct bfad_vport_s *vp_drv) 199 { 200 bfa_status_t rc; 201 struct bfad_port_s *port_drv; 202 203 if (!vp_drv && !vf_drv) { 204 port_drv = &bfad->pport; 205 port_drv->pvb_type = BFAD_PORT_PHYS_BASE; 206 } else if (!vp_drv && vf_drv) { 207 port_drv = &vf_drv->base_port; 208 port_drv->pvb_type = BFAD_PORT_VF_BASE; 209 } else if (vp_drv && !vf_drv) { 210 port_drv = &vp_drv->drv_port; 211 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; 212 } else { 213 port_drv = &vp_drv->drv_port; 214 port_drv->pvb_type = BFAD_PORT_VF_VPORT; 215 } 216 217 port_drv->fcs_port = port; 218 port_drv->roles = roles; 219 rc = bfad_fc4_port_new(bfad, port_drv, roles); 220 if (rc != BFA_STATUS_OK) { 221 bfad_fc4_port_delete(bfad, port_drv, roles); 222 port_drv = NULL; 223 } 224 225 return port_drv; 226 } 227 228 void 229 bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, 230 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 231 { 232 struct bfad_port_s *port_drv; 233 234 /* 235 * this will be only called from rmmod context 236 */ 237 if (vp_drv && !vp_drv->comp_del) { 238 port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 239 bfa_trc(bfad, roles); 240 bfad_fc4_port_delete(bfad, port_drv, roles); 241 } 242 } 243 244 void 245 bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles, 246 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 247 { 248 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 249 250 if (roles & BFA_PORT_ROLE_FCP_IM) 251 bfad_im_port_online(bfad, port_drv); 252 253 if (roles & BFA_PORT_ROLE_FCP_TM) 254 bfad_tm_port_online(bfad, port_drv); 255 256 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 257 bfad_ipfc_port_online(bfad, port_drv); 258 259 bfad->bfad_flags |= BFAD_PORT_ONLINE; 260 } 261 262 void 263 bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles, 264 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) 265 { 266 struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); 267 268 if (roles & BFA_PORT_ROLE_FCP_IM) 269 bfad_im_port_offline(bfad, port_drv); 270 271 if (roles & BFA_PORT_ROLE_FCP_TM) 272 bfad_tm_port_offline(bfad, port_drv); 273 274 if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) 275 bfad_ipfc_port_offline(bfad, port_drv); 276 } 277 278 void 279 bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv) 280 { 281 if (vport_drv->comp_del) { 282 complete(vport_drv->comp_del); 283 return; 284 } 285 286 kfree(vport_drv); 287 } 288 289 /** 290 * FCS RPORT alloc callback, after successful PLOGI by FCS 291 */ 292 bfa_status_t 293 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, 294 struct bfad_rport_s **rport_drv) 295 { 296 bfa_status_t rc = BFA_STATUS_OK; 297 298 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); 299 if (*rport_drv == NULL) { 300 rc = BFA_STATUS_ENOMEM; 301 goto ext; 302 } 303 304 *rport = &(*rport_drv)->fcs_rport; 305 306 ext: 307 return rc; 308 } 309 310 311 312 void 313 bfad_hal_mem_release(struct bfad_s *bfad) 314 { 315 int i; 316 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 317 struct bfa_mem_elem_s *meminfo_elem; 318 319 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 320 meminfo_elem = &hal_meminfo->meminfo[i]; 321 if (meminfo_elem->kva != NULL) { 322 switch (meminfo_elem->mem_type) { 323 case BFA_MEM_TYPE_KVA: 324 vfree(meminfo_elem->kva); 325 break; 326 case BFA_MEM_TYPE_DMA: 327 dma_free_coherent(&bfad->pcidev->dev, 328 meminfo_elem->mem_len, 329 meminfo_elem->kva, 330 (dma_addr_t) meminfo_elem->dma); 331 break; 332 default: 333 bfa_assert(0); 334 break; 335 } 336 } 337 } 338 339 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 340 } 341 342 void 343 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) 344 { 345 if (num_rports > 0) 346 bfa_cfg->fwcfg.num_rports = num_rports; 347 if (num_ios > 0) 348 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 349 if (num_tms > 0) 350 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 351 if (num_fcxps > 0) 352 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 353 if (num_ufbufs > 0) 354 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 355 if (reqq_size > 0) 356 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 357 if (rspq_size > 0) 358 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 359 if (num_sgpgs > 0) 360 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 361 362 /* 363 * populate the hal values back to the driver for sysfs use. 364 * otherwise, the default values will be shown as 0 in sysfs 365 */ 366 num_rports = bfa_cfg->fwcfg.num_rports; 367 num_ios = bfa_cfg->fwcfg.num_ioim_reqs; 368 num_tms = bfa_cfg->fwcfg.num_tskim_reqs; 369 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; 370 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; 371 reqq_size = bfa_cfg->drvcfg.num_reqq_elems; 372 rspq_size = bfa_cfg->drvcfg.num_rspq_elems; 373 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; 374 } 375 376 bfa_status_t 377 bfad_hal_mem_alloc(struct bfad_s *bfad) 378 { 379 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 380 struct bfa_mem_elem_s *meminfo_elem; 381 bfa_status_t rc = BFA_STATUS_OK; 382 dma_addr_t phys_addr; 383 int retry_count = 0; 384 int reset_value = 1; 385 int min_num_sgpgs = 512; 386 void *kva; 387 int i; 388 389 bfa_cfg_get_default(&bfad->ioc_cfg); 390 391 retry: 392 bfad_update_hal_cfg(&bfad->ioc_cfg); 393 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 394 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); 395 396 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 397 meminfo_elem = &hal_meminfo->meminfo[i]; 398 switch (meminfo_elem->mem_type) { 399 case BFA_MEM_TYPE_KVA: 400 kva = vmalloc(meminfo_elem->mem_len); 401 if (kva == NULL) { 402 bfad_hal_mem_release(bfad); 403 rc = BFA_STATUS_ENOMEM; 404 goto ext; 405 } 406 memset(kva, 0, meminfo_elem->mem_len); 407 meminfo_elem->kva = kva; 408 break; 409 case BFA_MEM_TYPE_DMA: 410 kva = dma_alloc_coherent(&bfad->pcidev->dev, 411 meminfo_elem->mem_len, 412 &phys_addr, GFP_KERNEL); 413 if (kva == NULL) { 414 bfad_hal_mem_release(bfad); 415 /* 416 * If we cannot allocate with default 417 * num_sgpages try with half the value. 418 */ 419 if (num_sgpgs > min_num_sgpgs) { 420 printk(KERN_INFO "bfad[%d]: memory" 421 " allocation failed with" 422 " num_sgpgs: %d\n", 423 bfad->inst_no, num_sgpgs); 424 nextLowerInt(&num_sgpgs); 425 printk(KERN_INFO "bfad[%d]: trying to" 426 " allocate memory with" 427 " num_sgpgs: %d\n", 428 bfad->inst_no, num_sgpgs); 429 retry_count++; 430 goto retry; 431 } else { 432 if (num_sgpgs_parm > 0) 433 num_sgpgs = num_sgpgs_parm; 434 else { 435 reset_value = 436 (1 << retry_count); 437 num_sgpgs *= reset_value; 438 } 439 rc = BFA_STATUS_ENOMEM; 440 goto ext; 441 } 442 } 443 444 if (num_sgpgs_parm > 0) 445 num_sgpgs = num_sgpgs_parm; 446 else { 447 reset_value = (1 << retry_count); 448 num_sgpgs *= reset_value; 449 } 450 451 memset(kva, 0, meminfo_elem->mem_len); 452 meminfo_elem->kva = kva; 453 meminfo_elem->dma = phys_addr; 454 break; 455 default: 456 break; 457 458 } 459 } 460 ext: 461 return rc; 462 } 463 464 /** 465 * Create a vport under a vf. 466 */ 467 bfa_status_t 468 bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 469 struct bfa_port_cfg_s *port_cfg) 470 { 471 struct bfad_vport_s *vport; 472 int rc = BFA_STATUS_OK; 473 unsigned long flags; 474 struct completion fcomp; 475 476 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 477 if (!vport) { 478 rc = BFA_STATUS_ENOMEM; 479 goto ext; 480 } 481 482 vport->drv_port.bfad = bfad; 483 spin_lock_irqsave(&bfad->bfad_lock, flags); 484 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, 485 port_cfg, vport); 486 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 487 488 if (rc != BFA_STATUS_OK) 489 goto ext_free_vport; 490 491 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { 492 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port); 493 if (rc != BFA_STATUS_OK) 494 goto ext_free_fcs_vport; 495 } 496 497 spin_lock_irqsave(&bfad->bfad_lock, flags); 498 bfa_fcs_vport_start(&vport->fcs_vport); 499 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 500 501 return BFA_STATUS_OK; 502 503 ext_free_fcs_vport: 504 spin_lock_irqsave(&bfad->bfad_lock, flags); 505 vport->comp_del = &fcomp; 506 init_completion(vport->comp_del); 507 bfa_fcs_vport_delete(&vport->fcs_vport); 508 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 509 wait_for_completion(vport->comp_del); 510 ext_free_vport: 511 kfree(vport); 512 ext: 513 return rc; 514 } 515 516 /** 517 * Create a vf and its base vport implicitely. 518 */ 519 bfa_status_t 520 bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 521 struct bfa_port_cfg_s *port_cfg) 522 { 523 struct bfad_vf_s *vf; 524 int rc = BFA_STATUS_OK; 525 526 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); 527 if (!vf) { 528 rc = BFA_STATUS_FAILED; 529 goto ext; 530 } 531 532 rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg, 533 vf); 534 if (rc != BFA_STATUS_OK) 535 kfree(vf); 536 ext: 537 return rc; 538 } 539 540 void 541 bfad_bfa_tmo(unsigned long data) 542 { 543 struct bfad_s *bfad = (struct bfad_s *)data; 544 unsigned long flags; 545 struct list_head doneq; 546 547 spin_lock_irqsave(&bfad->bfad_lock, flags); 548 549 bfa_timer_tick(&bfad->bfa); 550 551 bfa_comp_deq(&bfad->bfa, &doneq); 552 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 553 554 if (!list_empty(&doneq)) { 555 bfa_comp_process(&bfad->bfa, &doneq); 556 spin_lock_irqsave(&bfad->bfad_lock, flags); 557 bfa_comp_free(&bfad->bfa, &doneq); 558 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 559 } 560 561 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 562 } 563 564 void 565 bfad_init_timer(struct bfad_s *bfad) 566 { 567 init_timer(&bfad->hal_tmo); 568 bfad->hal_tmo.function = bfad_bfa_tmo; 569 bfad->hal_tmo.data = (unsigned long)bfad; 570 571 mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); 572 } 573 574 int 575 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 576 { 577 unsigned long bar0_len; 578 int rc = -ENODEV; 579 580 if (pci_enable_device(pdev)) { 581 BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev); 582 goto out; 583 } 584 585 if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) 586 goto out_disable_device; 587 588 pci_set_master(pdev); 589 590 591 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 592 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 593 BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev); 594 goto out_release_region; 595 } 596 597 bfad->pci_bar0_map = pci_resource_start(pdev, 0); 598 bar0_len = pci_resource_len(pdev, 0); 599 bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len); 600 601 if (bfad->pci_bar0_kva == NULL) { 602 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); 603 goto out_release_region; 604 } 605 606 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); 607 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 608 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 609 bfad->hal_pcidev.device_id = pdev->device; 610 bfad->pci_name = pci_name(pdev); 611 612 bfad->pci_attr.vendor_id = pdev->vendor; 613 bfad->pci_attr.device_id = pdev->device; 614 bfad->pci_attr.ssid = pdev->subsystem_device; 615 bfad->pci_attr.ssvid = pdev->subsystem_vendor; 616 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); 617 618 bfad->pcidev = pdev; 619 return 0; 620 621 out_release_region: 622 pci_release_regions(pdev); 623 out_disable_device: 624 pci_disable_device(pdev); 625 out: 626 return rc; 627 } 628 629 void 630 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 631 { 632 #if defined(__ia64__) 633 pci_iounmap(pdev, bfad->pci_bar0_kva); 634 #else 635 iounmap(bfad->pci_bar0_kva); 636 #endif 637 pci_release_regions(pdev); 638 pci_disable_device(pdev); 639 pci_set_drvdata(pdev, NULL); 640 } 641 642 void 643 bfad_fcs_port_cfg(struct bfad_s *bfad) 644 { 645 struct bfa_port_cfg_s port_cfg; 646 struct bfa_pport_attr_s attr; 647 char symname[BFA_SYMNAME_MAXLEN]; 648 649 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 650 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 651 bfa_pport_get_attr(&bfad->bfa, &attr); 652 port_cfg.nwwn = attr.nwwn; 653 port_cfg.pwwn = attr.pwwn; 654 655 bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg); 656 } 657 658 bfa_status_t 659 bfad_drv_init(struct bfad_s *bfad) 660 { 661 bfa_status_t rc; 662 unsigned long flags; 663 struct bfa_fcs_driver_info_s driver_info; 664 int i; 665 666 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 667 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 668 bfad->cfg_data.io_max_sge = bfa_io_max_sge; 669 bfad->cfg_data.binding_method = FCP_PWWN_BINDING; 670 671 rc = bfad_hal_mem_alloc(bfad); 672 if (rc != BFA_STATUS_OK) { 673 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", 674 bfad->inst_no); 675 printk(KERN_WARNING 676 "Not enough memory to attach all Brocade HBA ports," 677 " System may need more memory.\n"); 678 goto out_hal_mem_alloc_failure; 679 } 680 681 bfa_init_log(&bfad->bfa, bfad->logmod); 682 bfa_init_trc(&bfad->bfa, bfad->trcmod); 683 bfa_init_aen(&bfad->bfa, bfad->aen); 684 INIT_LIST_HEAD(&bfad->file_q); 685 INIT_LIST_HEAD(&bfad->file_free_q); 686 for (i = 0; i < BFAD_AEN_MAX_APPS; i++) { 687 bfa_q_qe_init(&bfad->file_buf[i].qe); 688 list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q); 689 } 690 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 691 bfa_plog_init(&bfad->plog_buf); 692 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 693 0, "Driver Attach"); 694 695 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, 696 &bfad->hal_pcidev); 697 698 init_completion(&bfad->comp); 699 700 /* 701 * Enable Interrupt and wait bfa_init completion 702 */ 703 if (bfad_setup_intr(bfad)) { 704 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", 705 bfad->inst_no); 706 goto out_setup_intr_failure; 707 } 708 709 spin_lock_irqsave(&bfad->bfad_lock, flags); 710 bfa_init(&bfad->bfa); 711 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 712 713 /* 714 * Set up interrupt handler for each vectors 715 */ 716 if ((bfad->bfad_flags & BFAD_MSIX_ON) 717 && bfad_install_msix_handler(bfad)) { 718 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", 719 __func__, bfad->inst_no); 720 } 721 722 bfad_init_timer(bfad); 723 724 wait_for_completion(&bfad->comp); 725 726 memset(&driver_info, 0, sizeof(driver_info)); 727 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 728 sizeof(driver_info.version) - 1); 729 if (host_name) 730 strncpy(driver_info.host_machine_name, host_name, 731 sizeof(driver_info.host_machine_name) - 1); 732 if (os_name) 733 strncpy(driver_info.host_os_name, os_name, 734 sizeof(driver_info.host_os_name) - 1); 735 if (os_patch) 736 strncpy(driver_info.host_os_patch, os_patch, 737 sizeof(driver_info.host_os_patch) - 1); 738 739 strncpy(driver_info.os_device_name, bfad->pci_name, 740 sizeof(driver_info.os_device_name - 1)); 741 742 /* 743 * FCS INIT 744 */ 745 spin_lock_irqsave(&bfad->bfad_lock, flags); 746 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); 747 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 748 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); 749 bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 750 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 751 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 752 753 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 754 return BFA_STATUS_OK; 755 756 out_setup_intr_failure: 757 bfa_detach(&bfad->bfa); 758 bfad_hal_mem_release(bfad); 759 out_hal_mem_alloc_failure: 760 return BFA_STATUS_FAILED; 761 } 762 763 void 764 bfad_drv_uninit(struct bfad_s *bfad) 765 { 766 del_timer_sync(&bfad->hal_tmo); 767 bfa_isr_disable(&bfad->bfa); 768 bfa_detach(&bfad->bfa); 769 bfad_remove_intr(bfad); 770 bfa_assert(list_empty(&bfad->file_q)); 771 bfad_hal_mem_release(bfad); 772 } 773 774 void 775 bfad_drv_start(struct bfad_s *bfad) 776 { 777 unsigned long flags; 778 779 spin_lock_irqsave(&bfad->bfad_lock, flags); 780 bfa_start(&bfad->bfa); 781 bfa_fcs_start(&bfad->bfa_fcs); 782 bfad->bfad_flags |= BFAD_HAL_START_DONE; 783 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 784 785 bfad_fc4_probe_post(bfad); 786 } 787 788 void 789 bfad_drv_stop(struct bfad_s *bfad) 790 { 791 unsigned long flags; 792 793 spin_lock_irqsave(&bfad->bfad_lock, flags); 794 init_completion(&bfad->comp); 795 bfad->pport.flags |= BFAD_PORT_DELETE; 796 bfa_fcs_exit(&bfad->bfa_fcs); 797 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 798 wait_for_completion(&bfad->comp); 799 800 spin_lock_irqsave(&bfad->bfad_lock, flags); 801 init_completion(&bfad->comp); 802 bfa_stop(&bfad->bfa); 803 bfad->bfad_flags &= ~BFAD_HAL_START_DONE; 804 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 805 wait_for_completion(&bfad->comp); 806 } 807 808 bfa_status_t 809 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) 810 { 811 int rc = BFA_STATUS_OK; 812 813 /* 814 * Allocate scsi_host for the physical port 815 */ 816 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) 817 && (role & BFA_PORT_ROLE_FCP_IM)) { 818 if (bfad->pport.im_port == NULL) { 819 rc = BFA_STATUS_FAILED; 820 goto out; 821 } 822 823 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port); 824 if (rc != BFA_STATUS_OK) 825 goto out; 826 827 bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; 828 } 829 830 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; 831 832 out: 833 return rc; 834 } 835 836 void 837 bfad_uncfg_pport(struct bfad_s *bfad) 838 { 839 if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { 840 bfad_ipfc_port_delete(bfad, &bfad->pport); 841 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC; 842 } 843 844 if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) 845 && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) { 846 bfad_im_scsi_host_free(bfad, bfad->pport.im_port); 847 bfad_im_port_clean(bfad->pport.im_port); 848 kfree(bfad->pport.im_port); 849 bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM; 850 } 851 852 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; 853 } 854 855 void 856 bfad_drv_log_level_set(struct bfad_s *bfad) 857 { 858 if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX) 859 bfa_log_set_level_all(&bfad->log_data, log_level); 860 } 861 862 /* 863 * PCI_entry PCI driver entries * { 864 */ 865 866 /** 867 * PCI probe entry. 868 */ 869 int 870 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 871 { 872 struct bfad_s *bfad; 873 int error = -ENODEV, retval; 874 char buf[16]; 875 876 /* 877 * For single port cards - only claim function 0 878 */ 879 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) 880 && (PCI_FUNC(pdev->devfn) != 0)) 881 return -ENODEV; 882 883 BFA_TRACE(BFA_INFO, "bfad_pci_probe entry"); 884 885 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); 886 if (!bfad) { 887 error = -ENOMEM; 888 goto out; 889 } 890 891 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); 892 if (!bfad->trcmod) { 893 printk(KERN_WARNING "Error alloc trace buffer!\n"); 894 error = -ENOMEM; 895 goto out_alloc_trace_failure; 896 } 897 898 /* 899 * LOG/TRACE INIT 900 */ 901 bfa_trc_init(bfad->trcmod); 902 bfa_trc(bfad, bfad_inst); 903 904 bfad->logmod = &bfad->log_data; 905 sprintf(buf, "%d", bfad_inst); 906 bfa_log_init(bfad->logmod, buf, bfa_os_printf); 907 908 bfad_drv_log_level_set(bfad); 909 910 bfad->aen = &bfad->aen_buf; 911 912 if (!(bfad_load_fwimg(pdev))) { 913 printk(KERN_WARNING "bfad_load_fwimg failure!\n"); 914 kfree(bfad->trcmod); 915 goto out_alloc_trace_failure; 916 } 917 918 retval = bfad_pci_init(pdev, bfad); 919 if (retval) { 920 printk(KERN_WARNING "bfad_pci_init failure!\n"); 921 error = retval; 922 goto out_pci_init_failure; 923 } 924 925 mutex_lock(&bfad_mutex); 926 bfad->inst_no = bfad_inst++; 927 list_add_tail(&bfad->list_entry, &bfad_list); 928 mutex_unlock(&bfad_mutex); 929 930 spin_lock_init(&bfad->bfad_lock); 931 pci_set_drvdata(pdev, bfad); 932 933 bfad->ref_count = 0; 934 bfad->pport.bfad = bfad; 935 936 retval = bfad_drv_init(bfad); 937 if (retval != BFA_STATUS_OK) 938 goto out_drv_init_failure; 939 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 940 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); 941 goto ok; 942 } 943 944 /* 945 * PPORT FCS config 946 */ 947 bfad_fcs_port_cfg(bfad); 948 949 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); 950 if (retval != BFA_STATUS_OK) 951 goto out_cfg_pport_failure; 952 953 /* 954 * BFAD level FC4 (IM/TM/IPFC) specific resource allocation 955 */ 956 retval = bfad_fc4_probe(bfad); 957 if (retval != BFA_STATUS_OK) { 958 printk(KERN_WARNING "bfad_fc4_probe failed\n"); 959 goto out_fc4_probe_failure; 960 } 961 962 bfad_drv_start(bfad); 963 964 /* 965 * If bfa_linkup_delay is set to -1 default; try to retrive the 966 * value using the bfad_os_get_linkup_delay(); else use the 967 * passed in module param value as the bfa_linkup_delay. 968 */ 969 if (bfa_linkup_delay < 0) { 970 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); 971 bfad_os_rport_online_wait(bfad); 972 bfa_linkup_delay = -1; 973 } else { 974 bfad_os_rport_online_wait(bfad); 975 } 976 977 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); 978 ok: 979 return 0; 980 981 out_fc4_probe_failure: 982 bfad_fc4_probe_undo(bfad); 983 bfad_uncfg_pport(bfad); 984 out_cfg_pport_failure: 985 bfad_drv_uninit(bfad); 986 out_drv_init_failure: 987 mutex_lock(&bfad_mutex); 988 bfad_inst--; 989 list_del(&bfad->list_entry); 990 mutex_unlock(&bfad_mutex); 991 bfad_pci_uninit(pdev, bfad); 992 out_pci_init_failure: 993 kfree(bfad->trcmod); 994 out_alloc_trace_failure: 995 kfree(bfad); 996 out: 997 return error; 998 } 999 1000 /** 1001 * PCI remove entry. 1002 */ 1003 void 1004 bfad_pci_remove(struct pci_dev *pdev) 1005 { 1006 struct bfad_s *bfad = pci_get_drvdata(pdev); 1007 unsigned long flags; 1008 1009 bfa_trc(bfad, bfad->inst_no); 1010 1011 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) 1012 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1013 1014 spin_lock_irqsave(&bfad->bfad_lock, flags); 1015 init_completion(&bfad->comp); 1016 bfa_stop(&bfad->bfa); 1017 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1018 wait_for_completion(&bfad->comp); 1019 1020 bfad_remove_intr(bfad); 1021 del_timer_sync(&bfad->hal_tmo); 1022 goto hal_detach; 1023 } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) { 1024 goto remove_sysfs; 1025 } 1026 1027 if (bfad->bfad_flags & BFAD_HAL_START_DONE) 1028 bfad_drv_stop(bfad); 1029 1030 bfad_remove_intr(bfad); 1031 1032 del_timer_sync(&bfad->hal_tmo); 1033 bfad_fc4_probe_undo(bfad); 1034 1035 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1036 bfad_uncfg_pport(bfad); 1037 1038 hal_detach: 1039 spin_lock_irqsave(&bfad->bfad_lock, flags); 1040 bfa_detach(&bfad->bfa); 1041 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1042 bfad_hal_mem_release(bfad); 1043 remove_sysfs: 1044 1045 mutex_lock(&bfad_mutex); 1046 bfad_inst--; 1047 list_del(&bfad->list_entry); 1048 mutex_unlock(&bfad_mutex); 1049 bfad_pci_uninit(pdev, bfad); 1050 1051 kfree(bfad->trcmod); 1052 kfree(bfad); 1053 } 1054 1055 1056 static struct pci_device_id bfad_id_table[] = { 1057 { 1058 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1059 .device = BFA_PCI_DEVICE_ID_FC_8G2P, 1060 .subvendor = PCI_ANY_ID, 1061 .subdevice = PCI_ANY_ID, 1062 }, 1063 { 1064 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1065 .device = BFA_PCI_DEVICE_ID_FC_8G1P, 1066 .subvendor = PCI_ANY_ID, 1067 .subdevice = PCI_ANY_ID, 1068 }, 1069 { 1070 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1071 .device = BFA_PCI_DEVICE_ID_CT, 1072 .subvendor = PCI_ANY_ID, 1073 .subdevice = PCI_ANY_ID, 1074 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1075 .class_mask = ~0, 1076 }, 1077 1078 {0, 0}, 1079 }; 1080 1081 MODULE_DEVICE_TABLE(pci, bfad_id_table); 1082 1083 static struct pci_driver bfad_pci_driver = { 1084 .name = BFAD_DRIVER_NAME, 1085 .id_table = bfad_id_table, 1086 .probe = bfad_pci_probe, 1087 .remove = __devexit_p(bfad_pci_remove), 1088 }; 1089 1090 /** 1091 * Linux driver module functions 1092 */ 1093 bfa_status_t 1094 bfad_fc4_module_init(void) 1095 { 1096 int rc; 1097 1098 rc = bfad_im_module_init(); 1099 if (rc != BFA_STATUS_OK) 1100 goto ext; 1101 1102 bfad_tm_module_init(); 1103 if (ipfc_enable) 1104 bfad_ipfc_module_init(); 1105 ext: 1106 return rc; 1107 } 1108 1109 void 1110 bfad_fc4_module_exit(void) 1111 { 1112 if (ipfc_enable) 1113 bfad_ipfc_module_exit(); 1114 bfad_tm_module_exit(); 1115 bfad_im_module_exit(); 1116 } 1117 1118 /** 1119 * Driver module init. 1120 */ 1121 static int __init 1122 bfad_init(void) 1123 { 1124 int error = 0; 1125 1126 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", 1127 BFAD_DRIVER_VERSION); 1128 1129 if (num_sgpgs > 0) 1130 num_sgpgs_parm = num_sgpgs; 1131 1132 error = bfad_fc4_module_init(); 1133 if (error) { 1134 error = -ENOMEM; 1135 printk(KERN_WARNING "bfad_fc4_module_init failure\n"); 1136 goto ext; 1137 } 1138 1139 if (!strcmp(FCPI_NAME, " fcpim")) 1140 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM; 1141 if (!strcmp(FCPT_NAME, " fcptm")) 1142 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM; 1143 if (!strcmp(IPFC_NAME, " ipfc")) 1144 bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC; 1145 1146 bfa_ioc_auto_recover(ioc_auto_recover); 1147 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1148 error = pci_register_driver(&bfad_pci_driver); 1149 1150 if (error) { 1151 printk(KERN_WARNING "bfad pci_register_driver failure\n"); 1152 goto ext; 1153 } 1154 1155 return 0; 1156 1157 ext: 1158 bfad_fc4_module_exit(); 1159 return error; 1160 } 1161 1162 /** 1163 * Driver module exit. 1164 */ 1165 static void __exit 1166 bfad_exit(void) 1167 { 1168 pci_unregister_driver(&bfad_pci_driver); 1169 bfad_fc4_module_exit(); 1170 bfad_free_fwimg(); 1171 } 1172 1173 #define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME 1174 1175 module_init(bfad_init); 1176 module_exit(bfad_exit); 1177 MODULE_LICENSE("GPL"); 1178 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); 1179 MODULE_AUTHOR("Brocade Communications Systems, Inc."); 1180 MODULE_VERSION(BFAD_DRIVER_VERSION); 1181 1182 1183