1 /* 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 */ 18 #include <linux/module.h> 19 #include <linux/mempool.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/errno.h> 23 #include <linux/init.h> 24 #include <linux/pci.h> 25 #include <linux/skbuff.h> 26 #include <linux/interrupt.h> 27 #include <linux/spinlock.h> 28 #include <linux/workqueue.h> 29 #include <linux/if_ether.h> 30 #include <scsi/fc/fc_fip.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <scsi/scsi_tcq.h> 35 #include <scsi/libfc.h> 36 #include <scsi/fc_frame.h> 37 38 #include "vnic_dev.h" 39 #include "vnic_intr.h" 40 #include "vnic_stats.h" 41 #include "fnic_io.h" 42 #include "fnic_fip.h" 43 #include "fnic.h" 44 45 #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 46 47 /* Timer to poll notification area for events. Used for MSI interrupts */ 48 #define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ) 49 50 static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; 51 static struct kmem_cache *fnic_io_req_cache; 52 LIST_HEAD(fnic_list); 53 DEFINE_SPINLOCK(fnic_list_lock); 54 55 /* Supported devices by fnic module */ 56 static struct pci_device_id fnic_id_table[] = { 57 { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, 58 { 0, } 59 }; 60 61 MODULE_DESCRIPTION(DRV_DESCRIPTION); 62 MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, " 63 "Joseph R. Eykholt <jeykholt@cisco.com>"); 64 MODULE_LICENSE("GPL v2"); 65 MODULE_VERSION(DRV_VERSION); 66 MODULE_DEVICE_TABLE(pci, fnic_id_table); 67 68 unsigned int fnic_log_level; 69 module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); 70 MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); 71 72 unsigned int fnic_trace_max_pages = 16; 73 module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); 74 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " 75 "for fnic trace buffer"); 76 77 unsigned int fnic_fc_trace_max_pages = 64; 78 module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR); 79 MODULE_PARM_DESC(fnic_fc_trace_max_pages, 80 "Total allocated memory pages for fc trace buffer"); 81 82 static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; 83 module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); 84 MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); 85 86 static struct libfc_function_template fnic_transport_template = { 87 .frame_send = fnic_send, 88 .lport_set_port_id = fnic_set_port_id, 89 .fcp_abort_io = fnic_empty_scsi_cleanup, 90 .fcp_cleanup = fnic_empty_scsi_cleanup, 91 .exch_mgr_reset = fnic_exch_mgr_reset 92 }; 93 94 static int fnic_slave_alloc(struct scsi_device *sdev) 95 { 96 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 97 98 if (!rport || fc_remote_port_chkready(rport)) 99 return -ENXIO; 100 101 scsi_change_queue_depth(sdev, fnic_max_qdepth); 102 return 0; 103 } 104 105 static struct scsi_host_template fnic_host_template = { 106 .module = THIS_MODULE, 107 .name = DRV_NAME, 108 .queuecommand = fnic_queuecommand, 109 .eh_abort_handler = fnic_abort_cmd, 110 .eh_device_reset_handler = fnic_device_reset, 111 .eh_host_reset_handler = fnic_host_reset, 112 .slave_alloc = fnic_slave_alloc, 113 .change_queue_depth = scsi_change_queue_depth, 114 .this_id = -1, 115 .cmd_per_lun = 3, 116 .can_queue = FNIC_DFLT_IO_REQ, 117 .use_clustering = ENABLE_CLUSTERING, 118 .sg_tablesize = FNIC_MAX_SG_DESC_CNT, 119 .max_sectors = 0xffff, 120 .shost_attrs = fnic_attrs, 121 .use_blk_tags = 1, 122 .track_queue_depth = 1, 123 }; 124 125 static void 126 fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) 127 { 128 if (timeout) 129 rport->dev_loss_tmo = timeout; 130 else 131 rport->dev_loss_tmo = 1; 132 } 133 134 static void fnic_get_host_speed(struct Scsi_Host *shost); 135 static struct scsi_transport_template *fnic_fc_transport; 136 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); 137 static void fnic_reset_host_stats(struct Scsi_Host *); 138 139 static struct fc_function_template fnic_fc_functions = { 140 141 .show_host_node_name = 1, 142 .show_host_port_name = 1, 143 .show_host_supported_classes = 1, 144 .show_host_supported_fc4s = 1, 145 .show_host_active_fc4s = 1, 146 .show_host_maxframe_size = 1, 147 .show_host_port_id = 1, 148 .show_host_supported_speeds = 1, 149 .get_host_speed = fnic_get_host_speed, 150 .show_host_speed = 1, 151 .show_host_port_type = 1, 152 .get_host_port_state = fc_get_host_port_state, 153 .show_host_port_state = 1, 154 .show_host_symbolic_name = 1, 155 .show_rport_maxframe_size = 1, 156 .show_rport_supported_classes = 1, 157 .show_host_fabric_name = 1, 158 .show_starget_node_name = 1, 159 .show_starget_port_name = 1, 160 .show_starget_port_id = 1, 161 .show_rport_dev_loss_tmo = 1, 162 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, 163 .issue_fc_host_lip = fnic_reset, 164 .get_fc_host_stats = fnic_get_stats, 165 .reset_fc_host_stats = fnic_reset_host_stats, 166 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 167 .terminate_rport_io = fnic_terminate_rport_io, 168 .bsg_request = fc_lport_bsg_request, 169 }; 170 171 static void fnic_get_host_speed(struct Scsi_Host *shost) 172 { 173 struct fc_lport *lp = shost_priv(shost); 174 struct fnic *fnic = lport_priv(lp); 175 u32 port_speed = vnic_dev_port_speed(fnic->vdev); 176 177 /* Add in other values as they get defined in fw */ 178 switch (port_speed) { 179 case 10000: 180 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 181 break; 182 default: 183 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 184 break; 185 } 186 } 187 188 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) 189 { 190 int ret; 191 struct fc_lport *lp = shost_priv(host); 192 struct fnic *fnic = lport_priv(lp); 193 struct fc_host_statistics *stats = &lp->host_stats; 194 struct vnic_stats *vs; 195 unsigned long flags; 196 197 if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) 198 return stats; 199 fnic->stats_time = jiffies; 200 201 spin_lock_irqsave(&fnic->fnic_lock, flags); 202 ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); 203 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 204 205 if (ret) { 206 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, 207 "fnic: Get vnic stats failed" 208 " 0x%x", ret); 209 return stats; 210 } 211 vs = fnic->stats; 212 stats->tx_frames = vs->tx.tx_unicast_frames_ok; 213 stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; 214 stats->rx_frames = vs->rx.rx_unicast_frames_ok; 215 stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; 216 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; 217 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; 218 stats->invalid_crc_count = vs->rx.rx_crc_errors; 219 stats->seconds_since_last_reset = 220 (jiffies - fnic->stats_reset_time) / HZ; 221 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); 222 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); 223 224 return stats; 225 } 226 227 /* 228 * fnic_dump_fchost_stats 229 * note : dumps fc_statistics into system logs 230 */ 231 void fnic_dump_fchost_stats(struct Scsi_Host *host, 232 struct fc_host_statistics *stats) 233 { 234 FNIC_MAIN_NOTE(KERN_NOTICE, host, 235 "fnic: seconds since last reset = %llu\n", 236 stats->seconds_since_last_reset); 237 FNIC_MAIN_NOTE(KERN_NOTICE, host, 238 "fnic: tx frames = %llu\n", 239 stats->tx_frames); 240 FNIC_MAIN_NOTE(KERN_NOTICE, host, 241 "fnic: tx words = %llu\n", 242 stats->tx_words); 243 FNIC_MAIN_NOTE(KERN_NOTICE, host, 244 "fnic: rx frames = %llu\n", 245 stats->rx_frames); 246 FNIC_MAIN_NOTE(KERN_NOTICE, host, 247 "fnic: rx words = %llu\n", 248 stats->rx_words); 249 FNIC_MAIN_NOTE(KERN_NOTICE, host, 250 "fnic: lip count = %llu\n", 251 stats->lip_count); 252 FNIC_MAIN_NOTE(KERN_NOTICE, host, 253 "fnic: nos count = %llu\n", 254 stats->nos_count); 255 FNIC_MAIN_NOTE(KERN_NOTICE, host, 256 "fnic: error frames = %llu\n", 257 stats->error_frames); 258 FNIC_MAIN_NOTE(KERN_NOTICE, host, 259 "fnic: dumped frames = %llu\n", 260 stats->dumped_frames); 261 FNIC_MAIN_NOTE(KERN_NOTICE, host, 262 "fnic: link failure count = %llu\n", 263 stats->link_failure_count); 264 FNIC_MAIN_NOTE(KERN_NOTICE, host, 265 "fnic: loss of sync count = %llu\n", 266 stats->loss_of_sync_count); 267 FNIC_MAIN_NOTE(KERN_NOTICE, host, 268 "fnic: loss of signal count = %llu\n", 269 stats->loss_of_signal_count); 270 FNIC_MAIN_NOTE(KERN_NOTICE, host, 271 "fnic: prim seq protocol err count = %llu\n", 272 stats->prim_seq_protocol_err_count); 273 FNIC_MAIN_NOTE(KERN_NOTICE, host, 274 "fnic: invalid tx word count= %llu\n", 275 stats->invalid_tx_word_count); 276 FNIC_MAIN_NOTE(KERN_NOTICE, host, 277 "fnic: invalid crc count = %llu\n", 278 stats->invalid_crc_count); 279 FNIC_MAIN_NOTE(KERN_NOTICE, host, 280 "fnic: fcp input requests = %llu\n", 281 stats->fcp_input_requests); 282 FNIC_MAIN_NOTE(KERN_NOTICE, host, 283 "fnic: fcp output requests = %llu\n", 284 stats->fcp_output_requests); 285 FNIC_MAIN_NOTE(KERN_NOTICE, host, 286 "fnic: fcp control requests = %llu\n", 287 stats->fcp_control_requests); 288 FNIC_MAIN_NOTE(KERN_NOTICE, host, 289 "fnic: fcp input megabytes = %llu\n", 290 stats->fcp_input_megabytes); 291 FNIC_MAIN_NOTE(KERN_NOTICE, host, 292 "fnic: fcp output megabytes = %llu\n", 293 stats->fcp_output_megabytes); 294 return; 295 } 296 297 /* 298 * fnic_reset_host_stats : clears host stats 299 * note : called when reset_statistics set under sysfs dir 300 */ 301 static void fnic_reset_host_stats(struct Scsi_Host *host) 302 { 303 int ret; 304 struct fc_lport *lp = shost_priv(host); 305 struct fnic *fnic = lport_priv(lp); 306 struct fc_host_statistics *stats; 307 unsigned long flags; 308 309 /* dump current stats, before clearing them */ 310 stats = fnic_get_stats(host); 311 fnic_dump_fchost_stats(host, stats); 312 313 spin_lock_irqsave(&fnic->fnic_lock, flags); 314 ret = vnic_dev_stats_clear(fnic->vdev); 315 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 316 317 if (ret) { 318 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, 319 "fnic: Reset vnic stats failed" 320 " 0x%x", ret); 321 return; 322 } 323 fnic->stats_reset_time = jiffies; 324 memset(stats, 0, sizeof(*stats)); 325 326 return; 327 } 328 329 void fnic_log_q_error(struct fnic *fnic) 330 { 331 unsigned int i; 332 u32 error_status; 333 334 for (i = 0; i < fnic->raw_wq_count; i++) { 335 error_status = ioread32(&fnic->wq[i].ctrl->error_status); 336 if (error_status) 337 shost_printk(KERN_ERR, fnic->lport->host, 338 "WQ[%d] error_status" 339 " %d\n", i, error_status); 340 } 341 342 for (i = 0; i < fnic->rq_count; i++) { 343 error_status = ioread32(&fnic->rq[i].ctrl->error_status); 344 if (error_status) 345 shost_printk(KERN_ERR, fnic->lport->host, 346 "RQ[%d] error_status" 347 " %d\n", i, error_status); 348 } 349 350 for (i = 0; i < fnic->wq_copy_count; i++) { 351 error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status); 352 if (error_status) 353 shost_printk(KERN_ERR, fnic->lport->host, 354 "CWQ[%d] error_status" 355 " %d\n", i, error_status); 356 } 357 } 358 359 void fnic_handle_link_event(struct fnic *fnic) 360 { 361 unsigned long flags; 362 363 spin_lock_irqsave(&fnic->fnic_lock, flags); 364 if (fnic->stop_rx_link_events) { 365 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 366 return; 367 } 368 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 369 370 queue_work(fnic_event_queue, &fnic->link_work); 371 372 } 373 374 static int fnic_notify_set(struct fnic *fnic) 375 { 376 int err; 377 378 switch (vnic_dev_get_intr_mode(fnic->vdev)) { 379 case VNIC_DEV_INTR_MODE_INTX: 380 err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY); 381 break; 382 case VNIC_DEV_INTR_MODE_MSI: 383 err = vnic_dev_notify_set(fnic->vdev, -1); 384 break; 385 case VNIC_DEV_INTR_MODE_MSIX: 386 err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); 387 break; 388 default: 389 shost_printk(KERN_ERR, fnic->lport->host, 390 "Interrupt mode should be set up" 391 " before devcmd notify set %d\n", 392 vnic_dev_get_intr_mode(fnic->vdev)); 393 err = -1; 394 break; 395 } 396 397 return err; 398 } 399 400 static void fnic_notify_timer(unsigned long data) 401 { 402 struct fnic *fnic = (struct fnic *)data; 403 404 fnic_handle_link_event(fnic); 405 mod_timer(&fnic->notify_timer, 406 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); 407 } 408 409 static void fnic_fip_notify_timer(unsigned long data) 410 { 411 struct fnic *fnic = (struct fnic *)data; 412 413 fnic_handle_fip_timer(fnic); 414 } 415 416 static void fnic_notify_timer_start(struct fnic *fnic) 417 { 418 switch (vnic_dev_get_intr_mode(fnic->vdev)) { 419 case VNIC_DEV_INTR_MODE_MSI: 420 /* 421 * Schedule first timeout immediately. The driver is 422 * initiatialized and ready to look for link up notification 423 */ 424 mod_timer(&fnic->notify_timer, jiffies); 425 break; 426 default: 427 /* Using intr for notification for INTx/MSI-X */ 428 break; 429 }; 430 } 431 432 static int fnic_dev_wait(struct vnic_dev *vdev, 433 int (*start)(struct vnic_dev *, int), 434 int (*finished)(struct vnic_dev *, int *), 435 int arg) 436 { 437 unsigned long time; 438 int done; 439 int err; 440 int count; 441 442 count = 0; 443 444 err = start(vdev, arg); 445 if (err) 446 return err; 447 448 /* Wait for func to complete. 449 * Sometime schedule_timeout_uninterruptible take long time 450 * to wake up so we do not retry as we are only waiting for 451 * 2 seconds in while loop. By adding count, we make sure 452 * we try atleast three times before returning -ETIMEDOUT 453 */ 454 time = jiffies + (HZ * 2); 455 do { 456 err = finished(vdev, &done); 457 count++; 458 if (err) 459 return err; 460 if (done) 461 return 0; 462 schedule_timeout_uninterruptible(HZ / 10); 463 } while (time_after(time, jiffies) || (count < 3)); 464 465 return -ETIMEDOUT; 466 } 467 468 static int fnic_cleanup(struct fnic *fnic) 469 { 470 unsigned int i; 471 int err; 472 473 vnic_dev_disable(fnic->vdev); 474 for (i = 0; i < fnic->intr_count; i++) 475 vnic_intr_mask(&fnic->intr[i]); 476 477 for (i = 0; i < fnic->rq_count; i++) { 478 err = vnic_rq_disable(&fnic->rq[i]); 479 if (err) 480 return err; 481 } 482 for (i = 0; i < fnic->raw_wq_count; i++) { 483 err = vnic_wq_disable(&fnic->wq[i]); 484 if (err) 485 return err; 486 } 487 for (i = 0; i < fnic->wq_copy_count; i++) { 488 err = vnic_wq_copy_disable(&fnic->wq_copy[i]); 489 if (err) 490 return err; 491 } 492 493 /* Clean up completed IOs and FCS frames */ 494 fnic_wq_copy_cmpl_handler(fnic, -1); 495 fnic_wq_cmpl_handler(fnic, -1); 496 fnic_rq_cmpl_handler(fnic, -1); 497 498 /* Clean up the IOs and FCS frames that have not completed */ 499 for (i = 0; i < fnic->raw_wq_count; i++) 500 vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf); 501 for (i = 0; i < fnic->rq_count; i++) 502 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); 503 for (i = 0; i < fnic->wq_copy_count; i++) 504 vnic_wq_copy_clean(&fnic->wq_copy[i], 505 fnic_wq_copy_cleanup_handler); 506 507 for (i = 0; i < fnic->cq_count; i++) 508 vnic_cq_clean(&fnic->cq[i]); 509 for (i = 0; i < fnic->intr_count; i++) 510 vnic_intr_clean(&fnic->intr[i]); 511 512 mempool_destroy(fnic->io_req_pool); 513 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) 514 mempool_destroy(fnic->io_sgl_pool[i]); 515 516 return 0; 517 } 518 519 static void fnic_iounmap(struct fnic *fnic) 520 { 521 if (fnic->bar0.vaddr) 522 iounmap(fnic->bar0.vaddr); 523 } 524 525 /** 526 * fnic_get_mac() - get assigned data MAC address for FIP code. 527 * @lport: local port. 528 */ 529 static u8 *fnic_get_mac(struct fc_lport *lport) 530 { 531 struct fnic *fnic = lport_priv(lport); 532 533 return fnic->data_src_addr; 534 } 535 536 static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) 537 { 538 u16 old_vlan; 539 old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id); 540 } 541 542 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 543 { 544 struct Scsi_Host *host; 545 struct fc_lport *lp; 546 struct fnic *fnic; 547 mempool_t *pool; 548 int err; 549 int i; 550 unsigned long flags; 551 552 /* 553 * Allocate SCSI Host and set up association between host, 554 * local port, and fnic 555 */ 556 lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); 557 if (!lp) { 558 printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); 559 err = -ENOMEM; 560 goto err_out; 561 } 562 host = lp->host; 563 fnic = lport_priv(lp); 564 fnic->lport = lp; 565 fnic->ctlr.lp = lp; 566 567 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, 568 host->host_no); 569 570 host->transportt = fnic_fc_transport; 571 572 err = fnic_stats_debugfs_init(fnic); 573 if (err) { 574 shost_printk(KERN_ERR, fnic->lport->host, 575 "Failed to initialize debugfs for stats\n"); 576 fnic_stats_debugfs_remove(fnic); 577 } 578 579 /* Setup PCI resources */ 580 pci_set_drvdata(pdev, fnic); 581 582 fnic->pdev = pdev; 583 584 err = pci_enable_device(pdev); 585 if (err) { 586 shost_printk(KERN_ERR, fnic->lport->host, 587 "Cannot enable PCI device, aborting.\n"); 588 goto err_out_free_hba; 589 } 590 591 err = pci_request_regions(pdev, DRV_NAME); 592 if (err) { 593 shost_printk(KERN_ERR, fnic->lport->host, 594 "Cannot enable PCI resources, aborting\n"); 595 goto err_out_disable_device; 596 } 597 598 pci_set_master(pdev); 599 600 /* Query PCI controller on system for DMA addressing 601 * limitation for the device. Try 64-bit first, and 602 * fail to 32-bit. 603 */ 604 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 605 if (err) { 606 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 607 if (err) { 608 shost_printk(KERN_ERR, fnic->lport->host, 609 "No usable DMA configuration " 610 "aborting\n"); 611 goto err_out_release_regions; 612 } 613 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 614 if (err) { 615 shost_printk(KERN_ERR, fnic->lport->host, 616 "Unable to obtain 32-bit DMA " 617 "for consistent allocations, aborting.\n"); 618 goto err_out_release_regions; 619 } 620 } else { 621 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 622 if (err) { 623 shost_printk(KERN_ERR, fnic->lport->host, 624 "Unable to obtain 64-bit DMA " 625 "for consistent allocations, aborting.\n"); 626 goto err_out_release_regions; 627 } 628 } 629 630 /* Map vNIC resources from BAR0 */ 631 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 632 shost_printk(KERN_ERR, fnic->lport->host, 633 "BAR0 not memory-map'able, aborting.\n"); 634 err = -ENODEV; 635 goto err_out_release_regions; 636 } 637 638 fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); 639 fnic->bar0.bus_addr = pci_resource_start(pdev, 0); 640 fnic->bar0.len = pci_resource_len(pdev, 0); 641 642 if (!fnic->bar0.vaddr) { 643 shost_printk(KERN_ERR, fnic->lport->host, 644 "Cannot memory-map BAR0 res hdr, " 645 "aborting.\n"); 646 err = -ENODEV; 647 goto err_out_release_regions; 648 } 649 650 fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); 651 if (!fnic->vdev) { 652 shost_printk(KERN_ERR, fnic->lport->host, 653 "vNIC registration failed, " 654 "aborting.\n"); 655 err = -ENODEV; 656 goto err_out_iounmap; 657 } 658 659 err = fnic_dev_wait(fnic->vdev, vnic_dev_open, 660 vnic_dev_open_done, 0); 661 if (err) { 662 shost_printk(KERN_ERR, fnic->lport->host, 663 "vNIC dev open failed, aborting.\n"); 664 goto err_out_vnic_unregister; 665 } 666 667 err = vnic_dev_init(fnic->vdev, 0); 668 if (err) { 669 shost_printk(KERN_ERR, fnic->lport->host, 670 "vNIC dev init failed, aborting.\n"); 671 goto err_out_dev_close; 672 } 673 674 err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 675 if (err) { 676 shost_printk(KERN_ERR, fnic->lport->host, 677 "vNIC get MAC addr failed \n"); 678 goto err_out_dev_close; 679 } 680 /* set data_src for point-to-point mode and to keep it non-zero */ 681 memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); 682 683 /* Get vNIC configuration */ 684 err = fnic_get_vnic_config(fnic); 685 if (err) { 686 shost_printk(KERN_ERR, fnic->lport->host, 687 "Get vNIC configuration failed, " 688 "aborting.\n"); 689 goto err_out_dev_close; 690 } 691 692 /* Configure Maximum Outstanding IO reqs*/ 693 if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { 694 host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, 695 max_t(u32, FNIC_MIN_IO_REQ, 696 fnic->config.io_throttle_count)); 697 } 698 fnic->fnic_max_tag_id = host->can_queue; 699 700 err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); 701 if (err) { 702 shost_printk(KERN_ERR, fnic->lport->host, 703 "Unable to alloc shared tag map\n"); 704 goto err_out_dev_close; 705 } 706 707 host->max_lun = fnic->config.luns_per_tgt; 708 host->max_id = FNIC_MAX_FCP_TARGET; 709 host->max_cmd_len = FCOE_MAX_CMD_LEN; 710 711 fnic_get_res_counts(fnic); 712 713 err = fnic_set_intr_mode(fnic); 714 if (err) { 715 shost_printk(KERN_ERR, fnic->lport->host, 716 "Failed to set intr mode, " 717 "aborting.\n"); 718 goto err_out_dev_close; 719 } 720 721 err = fnic_alloc_vnic_resources(fnic); 722 if (err) { 723 shost_printk(KERN_ERR, fnic->lport->host, 724 "Failed to alloc vNIC resources, " 725 "aborting.\n"); 726 goto err_out_clear_intr; 727 } 728 729 730 /* initialize all fnic locks */ 731 spin_lock_init(&fnic->fnic_lock); 732 733 for (i = 0; i < FNIC_WQ_MAX; i++) 734 spin_lock_init(&fnic->wq_lock[i]); 735 736 for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { 737 spin_lock_init(&fnic->wq_copy_lock[i]); 738 fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; 739 fnic->fw_ack_recd[i] = 0; 740 fnic->fw_ack_index[i] = -1; 741 } 742 743 for (i = 0; i < FNIC_IO_LOCKS; i++) 744 spin_lock_init(&fnic->io_req_lock[i]); 745 746 fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); 747 if (!fnic->io_req_pool) 748 goto err_out_free_resources; 749 750 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 751 if (!pool) 752 goto err_out_free_ioreq_pool; 753 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; 754 755 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); 756 if (!pool) 757 goto err_out_free_dflt_pool; 758 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; 759 760 /* setup vlan config, hw inserts vlan header */ 761 fnic->vlan_hw_insert = 1; 762 fnic->vlan_id = 0; 763 764 /* Initialize the FIP fcoe_ctrl struct */ 765 fnic->ctlr.send = fnic_eth_send; 766 fnic->ctlr.update_mac = fnic_update_mac; 767 fnic->ctlr.get_src_addr = fnic_get_mac; 768 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 769 shost_printk(KERN_INFO, fnic->lport->host, 770 "firmware supports FIP\n"); 771 /* enable directed and multicast */ 772 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); 773 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); 774 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 775 fnic->set_vlan = fnic_set_vlan; 776 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); 777 setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, 778 (unsigned long)fnic); 779 spin_lock_init(&fnic->vlans_lock); 780 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); 781 INIT_WORK(&fnic->event_work, fnic_handle_event); 782 skb_queue_head_init(&fnic->fip_frame_queue); 783 INIT_LIST_HEAD(&fnic->evlist); 784 INIT_LIST_HEAD(&fnic->vlans); 785 } else { 786 shost_printk(KERN_INFO, fnic->lport->host, 787 "firmware uses non-FIP mode\n"); 788 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); 789 fnic->ctlr.state = FIP_ST_NON_FIP; 790 } 791 fnic->state = FNIC_IN_FC_MODE; 792 793 atomic_set(&fnic->in_flight, 0); 794 fnic->state_flags = FNIC_FLAGS_NONE; 795 796 /* Enable hardware stripping of vlan header on ingress */ 797 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); 798 799 /* Setup notification buffer area */ 800 err = fnic_notify_set(fnic); 801 if (err) { 802 shost_printk(KERN_ERR, fnic->lport->host, 803 "Failed to alloc notify buffer, aborting.\n"); 804 goto err_out_free_max_pool; 805 } 806 807 /* Setup notify timer when using MSI interrupts */ 808 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) 809 setup_timer(&fnic->notify_timer, 810 fnic_notify_timer, (unsigned long)fnic); 811 812 /* allocate RQ buffers and post them to RQ*/ 813 for (i = 0; i < fnic->rq_count; i++) { 814 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); 815 if (err) { 816 shost_printk(KERN_ERR, fnic->lport->host, 817 "fnic_alloc_rq_frame can't alloc " 818 "frame\n"); 819 goto err_out_free_rq_buf; 820 } 821 } 822 823 /* 824 * Initialization done with PCI system, hardware, firmware. 825 * Add host to SCSI 826 */ 827 err = scsi_add_host(lp->host, &pdev->dev); 828 if (err) { 829 shost_printk(KERN_ERR, fnic->lport->host, 830 "fnic: scsi_add_host failed...exiting\n"); 831 goto err_out_free_rq_buf; 832 } 833 834 /* Start local port initiatialization */ 835 836 lp->link_up = 0; 837 838 lp->max_retry_count = fnic->config.flogi_retries; 839 lp->max_rport_retry_count = fnic->config.plogi_retries; 840 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 841 FCP_SPPF_CONF_COMPL); 842 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) 843 lp->service_params |= FCP_SPPF_RETRY; 844 845 lp->boot_time = jiffies; 846 lp->e_d_tov = fnic->config.ed_tov; 847 lp->r_a_tov = fnic->config.ra_tov; 848 lp->link_supported_speeds = FC_PORTSPEED_10GBIT; 849 fc_set_wwnn(lp, fnic->config.node_wwn); 850 fc_set_wwpn(lp, fnic->config.port_wwn); 851 852 fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); 853 854 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, 855 FCPIO_HOST_EXCH_RANGE_END, NULL)) { 856 err = -ENOMEM; 857 goto err_out_remove_scsi_host; 858 } 859 860 fc_lport_init_stats(lp); 861 fnic->stats_reset_time = jiffies; 862 863 fc_lport_config(lp); 864 865 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + 866 sizeof(struct fc_frame_header))) { 867 err = -EINVAL; 868 goto err_out_free_exch_mgr; 869 } 870 fc_host_maxframe_size(lp->host) = lp->mfs; 871 fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; 872 873 sprintf(fc_host_symbolic_name(lp->host), 874 DRV_NAME " v" DRV_VERSION " over %s", fnic->name); 875 876 spin_lock_irqsave(&fnic_list_lock, flags); 877 list_add_tail(&fnic->list, &fnic_list); 878 spin_unlock_irqrestore(&fnic_list_lock, flags); 879 880 INIT_WORK(&fnic->link_work, fnic_handle_link); 881 INIT_WORK(&fnic->frame_work, fnic_handle_frame); 882 skb_queue_head_init(&fnic->frame_queue); 883 skb_queue_head_init(&fnic->tx_queue); 884 885 /* Enable all queues */ 886 for (i = 0; i < fnic->raw_wq_count; i++) 887 vnic_wq_enable(&fnic->wq[i]); 888 for (i = 0; i < fnic->rq_count; i++) 889 vnic_rq_enable(&fnic->rq[i]); 890 for (i = 0; i < fnic->wq_copy_count; i++) 891 vnic_wq_copy_enable(&fnic->wq_copy[i]); 892 893 fc_fabric_login(lp); 894 895 vnic_dev_enable(fnic->vdev); 896 897 err = fnic_request_intr(fnic); 898 if (err) { 899 shost_printk(KERN_ERR, fnic->lport->host, 900 "Unable to request irq.\n"); 901 goto err_out_free_exch_mgr; 902 } 903 904 for (i = 0; i < fnic->intr_count; i++) 905 vnic_intr_unmask(&fnic->intr[i]); 906 907 fnic_notify_timer_start(fnic); 908 909 return 0; 910 911 err_out_free_exch_mgr: 912 fc_exch_mgr_free(lp); 913 err_out_remove_scsi_host: 914 fc_remove_host(lp->host); 915 scsi_remove_host(lp->host); 916 err_out_free_rq_buf: 917 for (i = 0; i < fnic->rq_count; i++) 918 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); 919 vnic_dev_notify_unset(fnic->vdev); 920 err_out_free_max_pool: 921 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); 922 err_out_free_dflt_pool: 923 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); 924 err_out_free_ioreq_pool: 925 mempool_destroy(fnic->io_req_pool); 926 err_out_free_resources: 927 fnic_free_vnic_resources(fnic); 928 err_out_clear_intr: 929 fnic_clear_intr_mode(fnic); 930 err_out_dev_close: 931 vnic_dev_close(fnic->vdev); 932 err_out_vnic_unregister: 933 vnic_dev_unregister(fnic->vdev); 934 err_out_iounmap: 935 fnic_iounmap(fnic); 936 err_out_release_regions: 937 pci_release_regions(pdev); 938 err_out_disable_device: 939 pci_disable_device(pdev); 940 err_out_free_hba: 941 fnic_stats_debugfs_remove(fnic); 942 scsi_host_put(lp->host); 943 err_out: 944 return err; 945 } 946 947 static void fnic_remove(struct pci_dev *pdev) 948 { 949 struct fnic *fnic = pci_get_drvdata(pdev); 950 struct fc_lport *lp = fnic->lport; 951 unsigned long flags; 952 953 /* 954 * Mark state so that the workqueue thread stops forwarding 955 * received frames and link events to the local port. ISR and 956 * other threads that can queue work items will also stop 957 * creating work items on the fnic workqueue 958 */ 959 spin_lock_irqsave(&fnic->fnic_lock, flags); 960 fnic->stop_rx_link_events = 1; 961 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 962 963 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) 964 del_timer_sync(&fnic->notify_timer); 965 966 /* 967 * Flush the fnic event queue. After this call, there should 968 * be no event queued for this fnic device in the workqueue 969 */ 970 flush_workqueue(fnic_event_queue); 971 skb_queue_purge(&fnic->frame_queue); 972 skb_queue_purge(&fnic->tx_queue); 973 974 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 975 del_timer_sync(&fnic->fip_timer); 976 skb_queue_purge(&fnic->fip_frame_queue); 977 fnic_fcoe_reset_vlans(fnic); 978 fnic_fcoe_evlist_free(fnic); 979 } 980 981 /* 982 * Log off the fabric. This stops all remote ports, dns port, 983 * logs off the fabric. This flushes all rport, disc, lport work 984 * before returning 985 */ 986 fc_fabric_logoff(fnic->lport); 987 988 spin_lock_irqsave(&fnic->fnic_lock, flags); 989 fnic->in_remove = 1; 990 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 991 992 fcoe_ctlr_destroy(&fnic->ctlr); 993 fc_lport_destroy(lp); 994 fnic_stats_debugfs_remove(fnic); 995 996 /* 997 * This stops the fnic device, masks all interrupts. Completed 998 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are 999 * cleaned up 1000 */ 1001 fnic_cleanup(fnic); 1002 1003 BUG_ON(!skb_queue_empty(&fnic->frame_queue)); 1004 BUG_ON(!skb_queue_empty(&fnic->tx_queue)); 1005 1006 spin_lock_irqsave(&fnic_list_lock, flags); 1007 list_del(&fnic->list); 1008 spin_unlock_irqrestore(&fnic_list_lock, flags); 1009 1010 fc_remove_host(fnic->lport->host); 1011 scsi_remove_host(fnic->lport->host); 1012 fc_exch_mgr_free(fnic->lport); 1013 vnic_dev_notify_unset(fnic->vdev); 1014 fnic_free_intr(fnic); 1015 fnic_free_vnic_resources(fnic); 1016 fnic_clear_intr_mode(fnic); 1017 vnic_dev_close(fnic->vdev); 1018 vnic_dev_unregister(fnic->vdev); 1019 fnic_iounmap(fnic); 1020 pci_release_regions(pdev); 1021 pci_disable_device(pdev); 1022 scsi_host_put(lp->host); 1023 } 1024 1025 static struct pci_driver fnic_driver = { 1026 .name = DRV_NAME, 1027 .id_table = fnic_id_table, 1028 .probe = fnic_probe, 1029 .remove = fnic_remove, 1030 }; 1031 1032 static int __init fnic_init_module(void) 1033 { 1034 size_t len; 1035 int err = 0; 1036 1037 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 1038 1039 /* Create debugfs entries for fnic */ 1040 err = fnic_debugfs_init(); 1041 if (err < 0) { 1042 printk(KERN_ERR PFX "Failed to create fnic directory " 1043 "for tracing and stats logging\n"); 1044 fnic_debugfs_terminate(); 1045 } 1046 1047 /* Allocate memory for trace buffer */ 1048 err = fnic_trace_buf_init(); 1049 if (err < 0) { 1050 printk(KERN_ERR PFX 1051 "Trace buffer initialization Failed. " 1052 "Fnic Tracing utility is disabled\n"); 1053 fnic_trace_free(); 1054 } 1055 1056 /* Allocate memory for fc trace buffer */ 1057 err = fnic_fc_trace_init(); 1058 if (err < 0) { 1059 printk(KERN_ERR PFX "FC trace buffer initialization Failed " 1060 "FC frame tracing utility is disabled\n"); 1061 fnic_fc_trace_free(); 1062 } 1063 1064 /* Create a cache for allocation of default size sgls */ 1065 len = sizeof(struct fnic_dflt_sgl_list); 1066 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create 1067 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, 1068 SLAB_HWCACHE_ALIGN, 1069 NULL); 1070 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { 1071 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); 1072 err = -ENOMEM; 1073 goto err_create_fnic_sgl_slab_dflt; 1074 } 1075 1076 /* Create a cache for allocation of max size sgls*/ 1077 len = sizeof(struct fnic_sgl_list); 1078 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create 1079 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, 1080 SLAB_HWCACHE_ALIGN, 1081 NULL); 1082 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { 1083 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); 1084 err = -ENOMEM; 1085 goto err_create_fnic_sgl_slab_max; 1086 } 1087 1088 /* Create a cache of io_req structs for use via mempool */ 1089 fnic_io_req_cache = kmem_cache_create("fnic_io_req", 1090 sizeof(struct fnic_io_req), 1091 0, SLAB_HWCACHE_ALIGN, NULL); 1092 if (!fnic_io_req_cache) { 1093 printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); 1094 err = -ENOMEM; 1095 goto err_create_fnic_ioreq_slab; 1096 } 1097 1098 fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); 1099 if (!fnic_event_queue) { 1100 printk(KERN_ERR PFX "fnic work queue create failed\n"); 1101 err = -ENOMEM; 1102 goto err_create_fnic_workq; 1103 } 1104 1105 spin_lock_init(&fnic_list_lock); 1106 INIT_LIST_HEAD(&fnic_list); 1107 1108 fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); 1109 if (!fnic_fip_queue) { 1110 printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); 1111 err = -ENOMEM; 1112 goto err_create_fip_workq; 1113 } 1114 1115 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); 1116 if (!fnic_fc_transport) { 1117 printk(KERN_ERR PFX "fc_attach_transport error\n"); 1118 err = -ENOMEM; 1119 goto err_fc_transport; 1120 } 1121 1122 /* register the driver with PCI system */ 1123 err = pci_register_driver(&fnic_driver); 1124 if (err < 0) { 1125 printk(KERN_ERR PFX "pci register error\n"); 1126 goto err_pci_register; 1127 } 1128 return err; 1129 1130 err_pci_register: 1131 fc_release_transport(fnic_fc_transport); 1132 err_fc_transport: 1133 destroy_workqueue(fnic_fip_queue); 1134 err_create_fip_workq: 1135 destroy_workqueue(fnic_event_queue); 1136 err_create_fnic_workq: 1137 kmem_cache_destroy(fnic_io_req_cache); 1138 err_create_fnic_ioreq_slab: 1139 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); 1140 err_create_fnic_sgl_slab_max: 1141 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 1142 err_create_fnic_sgl_slab_dflt: 1143 fnic_trace_free(); 1144 fnic_fc_trace_free(); 1145 fnic_debugfs_terminate(); 1146 return err; 1147 } 1148 1149 static void __exit fnic_cleanup_module(void) 1150 { 1151 pci_unregister_driver(&fnic_driver); 1152 destroy_workqueue(fnic_event_queue); 1153 if (fnic_fip_queue) { 1154 flush_workqueue(fnic_fip_queue); 1155 destroy_workqueue(fnic_fip_queue); 1156 } 1157 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); 1158 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 1159 kmem_cache_destroy(fnic_io_req_cache); 1160 fc_release_transport(fnic_fc_transport); 1161 fnic_trace_free(); 1162 fnic_fc_trace_free(); 1163 fnic_debugfs_terminate(); 1164 } 1165 1166 module_init(fnic_init_module); 1167 module_exit(fnic_cleanup_module); 1168 1169