1 /* 2 * Copyright 2014 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mempool.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/errno.h> 23 #include <linux/init.h> 24 #include <linux/pci.h> 25 #include <linux/skbuff.h> 26 #include <linux/interrupt.h> 27 #include <linux/spinlock.h> 28 #include <linux/workqueue.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_tcq.h> 31 32 #include "snic.h" 33 #include "snic_fwint.h" 34 35 #define PCI_DEVICE_ID_CISCO_SNIC 0x0046 36 37 /* Supported devices by snic module */ 38 static struct pci_device_id snic_id_table[] = { 39 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, 40 { 0, } /* end of table */ 41 }; 42 43 unsigned int snic_log_level = 0x0; 44 module_param(snic_log_level, int, S_IRUGO|S_IWUSR); 45 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels"); 46 47 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 48 unsigned int snic_trace_max_pages = 16; 49 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR); 50 MODULE_PARM_DESC(snic_trace_max_pages, 51 "Total allocated memory pages for snic trace buffer"); 52 53 #endif 54 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH; 55 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); 56 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); 57 58 /* 59 * snic_slave_alloc : callback function to SCSI Mid Layer, called on 60 * scsi device initialization. 61 */ 62 static int 63 snic_slave_alloc(struct scsi_device *sdev) 64 { 65 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); 66 67 if (!tgt || snic_tgt_chkready(tgt)) 68 return -ENXIO; 69 70 return 0; 71 } 72 73 /* 74 * snic_slave_configure : callback function to SCSI Mid Layer, called on 75 * scsi device initialization. 76 */ 77 static int 78 snic_slave_configure(struct scsi_device *sdev) 79 { 80 struct snic *snic = shost_priv(sdev->host); 81 u32 qdepth = 0, max_ios = 0; 82 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ; 83 84 /* Set Queue Depth */ 85 max_ios = snic_max_qdepth; 86 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH); 87 scsi_change_queue_depth(sdev, qdepth); 88 89 if (snic->fwinfo.io_tmo > 1) 90 tmo = snic->fwinfo.io_tmo * HZ; 91 92 /* FW requires extended timeouts */ 93 blk_queue_rq_timeout(sdev->request_queue, tmo); 94 95 return 0; 96 } 97 98 static int 99 snic_change_queue_depth(struct scsi_device *sdev, int qdepth) 100 { 101 struct snic *snic = shost_priv(sdev->host); 102 int qsz = 0; 103 104 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); 105 if (qsz < sdev->queue_depth) 106 atomic64_inc(&snic->s_stats.misc.qsz_rampdown); 107 else if (qsz > sdev->queue_depth) 108 atomic64_inc(&snic->s_stats.misc.qsz_rampup); 109 110 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); 111 112 scsi_change_queue_depth(sdev, qsz); 113 114 return sdev->queue_depth; 115 } 116 117 static struct scsi_host_template snic_host_template = { 118 .module = THIS_MODULE, 119 .name = SNIC_DRV_NAME, 120 .queuecommand = snic_queuecommand, 121 .eh_abort_handler = snic_abort_cmd, 122 .eh_device_reset_handler = snic_device_reset, 123 .eh_host_reset_handler = snic_host_reset, 124 .slave_alloc = snic_slave_alloc, 125 .slave_configure = snic_slave_configure, 126 .change_queue_depth = snic_change_queue_depth, 127 .this_id = -1, 128 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, 129 .can_queue = SNIC_MAX_IO_REQ, 130 .use_clustering = ENABLE_CLUSTERING, 131 .sg_tablesize = SNIC_MAX_SG_DESC_CNT, 132 .max_sectors = 0x800, 133 .shost_attrs = snic_attrs, 134 .track_queue_depth = 1, 135 .cmd_size = sizeof(struct snic_internal_io_state), 136 .proc_name = "snic_scsi", 137 }; 138 139 /* 140 * snic_handle_link_event : Handles link events such as link up/down/error 141 */ 142 void 143 snic_handle_link_event(struct snic *snic) 144 { 145 unsigned long flags; 146 147 spin_lock_irqsave(&snic->snic_lock, flags); 148 if (snic->stop_link_events) { 149 spin_unlock_irqrestore(&snic->snic_lock, flags); 150 151 return; 152 } 153 spin_unlock_irqrestore(&snic->snic_lock, flags); 154 155 queue_work(snic_glob->event_q, &snic->link_work); 156 } /* end of snic_handle_link_event */ 157 158 /* 159 * snic_notify_set : sets notification area 160 * This notification area is to receive events from fw 161 * Note: snic supports only MSIX interrupts, in which we can just call 162 * svnic_dev_notify_set directly 163 */ 164 static int 165 snic_notify_set(struct snic *snic) 166 { 167 int ret = 0; 168 enum vnic_dev_intr_mode intr_mode; 169 170 intr_mode = svnic_dev_get_intr_mode(snic->vdev); 171 172 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) { 173 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY); 174 } else { 175 SNIC_HOST_ERR(snic->shost, 176 "Interrupt mode should be setup before devcmd notify set %d\n", 177 intr_mode); 178 ret = -1; 179 } 180 181 return ret; 182 } /* end of snic_notify_set */ 183 184 /* 185 * snic_dev_wait : polls vnic open status. 186 */ 187 static int 188 snic_dev_wait(struct vnic_dev *vdev, 189 int (*start)(struct vnic_dev *, int), 190 int (*finished)(struct vnic_dev *, int *), 191 int arg) 192 { 193 unsigned long time; 194 int ret, done; 195 int retry_cnt = 0; 196 197 ret = start(vdev, arg); 198 if (ret) 199 return ret; 200 201 /* 202 * Wait for func to complete...2 seconds max. 203 * 204 * Sometimes schedule_timeout_uninterruptible take long time 205 * to wakeup, which results skipping retry. The retry counter 206 * ensures to retry at least two times. 207 */ 208 time = jiffies + (HZ * 2); 209 do { 210 ret = finished(vdev, &done); 211 if (ret) 212 return ret; 213 214 if (done) 215 return 0; 216 schedule_timeout_uninterruptible(HZ/10); 217 ++retry_cnt; 218 } while (time_after(time, jiffies) || (retry_cnt < 3)); 219 220 return -ETIMEDOUT; 221 } /* end of snic_dev_wait */ 222 223 /* 224 * snic_cleanup: called by snic_remove 225 * Stops the snic device, masks all interrupts, Completed CQ entries are 226 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup 227 */ 228 static int 229 snic_cleanup(struct snic *snic) 230 { 231 unsigned int i; 232 int ret; 233 234 svnic_dev_disable(snic->vdev); 235 for (i = 0; i < snic->intr_count; i++) 236 svnic_intr_mask(&snic->intr[i]); 237 238 for (i = 0; i < snic->wq_count; i++) { 239 ret = svnic_wq_disable(&snic->wq[i]); 240 if (ret) 241 return ret; 242 } 243 244 /* Clean up completed IOs */ 245 snic_fwcq_cmpl_handler(snic, -1); 246 247 snic_wq_cmpl_handler(snic, -1); 248 249 /* Clean up the IOs that have not completed */ 250 for (i = 0; i < snic->wq_count; i++) 251 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); 252 253 for (i = 0; i < snic->cq_count; i++) 254 svnic_cq_clean(&snic->cq[i]); 255 256 for (i = 0; i < snic->intr_count; i++) 257 svnic_intr_clean(&snic->intr[i]); 258 259 /* Cleanup snic specific requests */ 260 snic_free_all_untagged_reqs(snic); 261 262 /* Cleanup Pending SCSI commands */ 263 snic_shutdown_scsi_cleanup(snic); 264 265 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++) 266 mempool_destroy(snic->req_pool[i]); 267 268 return 0; 269 } /* end of snic_cleanup */ 270 271 272 static void 273 snic_iounmap(struct snic *snic) 274 { 275 if (snic->bar0.vaddr) 276 iounmap(snic->bar0.vaddr); 277 } 278 279 /* 280 * snic_vdev_open_done : polls for svnic_dev_open cmd completion. 281 */ 282 static int 283 snic_vdev_open_done(struct vnic_dev *vdev, int *done) 284 { 285 struct snic *snic = svnic_dev_priv(vdev); 286 int ret; 287 int nretries = 5; 288 289 do { 290 ret = svnic_dev_open_done(vdev, done); 291 if (ret == 0) 292 break; 293 294 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n"); 295 } while (nretries--); 296 297 return ret; 298 } /* end of snic_vdev_open_done */ 299 300 /* 301 * snic_add_host : registers scsi host with ML 302 */ 303 static int 304 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) 305 { 306 int ret = 0; 307 308 ret = scsi_add_host(shost, &pdev->dev); 309 if (ret) { 310 SNIC_HOST_ERR(shost, 311 "snic: scsi_add_host failed. %d\n", 312 ret); 313 314 return ret; 315 } 316 317 SNIC_BUG_ON(shost->work_q != NULL); 318 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", 319 shost->host_no); 320 shost->work_q = create_singlethread_workqueue(shost->work_q_name); 321 if (!shost->work_q) { 322 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); 323 324 ret = -ENOMEM; 325 } 326 327 return ret; 328 } /* end of snic_add_host */ 329 330 static void 331 snic_del_host(struct Scsi_Host *shost) 332 { 333 if (!shost->work_q) 334 return; 335 336 destroy_workqueue(shost->work_q); 337 shost->work_q = NULL; 338 scsi_remove_host(shost); 339 } 340 341 int 342 snic_get_state(struct snic *snic) 343 { 344 return atomic_read(&snic->state); 345 } 346 347 void 348 snic_set_state(struct snic *snic, enum snic_state state) 349 { 350 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n", 351 snic_state_to_str(snic_get_state(snic)), 352 snic_state_to_str(state)); 353 354 atomic_set(&snic->state, state); 355 } 356 357 /* 358 * snic_probe : Initialize the snic interface. 359 */ 360 static int 361 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 362 { 363 struct Scsi_Host *shost; 364 struct snic *snic; 365 mempool_t *pool; 366 unsigned long flags; 367 u32 max_ios = 0; 368 int ret, i; 369 370 /* Device Information */ 371 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ", 372 pdev->vendor, pdev->device, pdev->subsystem_vendor, 373 pdev->subsystem_device); 374 375 SNIC_INFO("snic device bus %x: slot %x: fn %x\n", 376 pdev->bus->number, PCI_SLOT(pdev->devfn), 377 PCI_FUNC(pdev->devfn)); 378 379 /* 380 * Allocate SCSI Host and setup association between host, and snic 381 */ 382 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); 383 if (!shost) { 384 SNIC_ERR("Unable to alloc scsi_host\n"); 385 ret = -ENOMEM; 386 387 goto prob_end; 388 } 389 snic = shost_priv(shost); 390 snic->shost = shost; 391 392 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME, 393 shost->host_no); 394 395 SNIC_HOST_INFO(shost, 396 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n", 397 shost->host_no, snic, shost, pdev->bus->number, 398 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 399 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 400 /* Per snic debugfs init */ 401 ret = snic_stats_debugfs_init(snic); 402 if (ret) { 403 SNIC_HOST_ERR(snic->shost, 404 "Failed to initialize debugfs stats\n"); 405 snic_stats_debugfs_remove(snic); 406 } 407 #endif 408 409 /* Setup PCI Resources */ 410 pci_set_drvdata(pdev, snic); 411 snic->pdev = pdev; 412 413 ret = pci_enable_device(pdev); 414 if (ret) { 415 SNIC_HOST_ERR(shost, 416 "Cannot enable PCI Resources, aborting : %d\n", 417 ret); 418 419 goto err_free_snic; 420 } 421 422 ret = pci_request_regions(pdev, SNIC_DRV_NAME); 423 if (ret) { 424 SNIC_HOST_ERR(shost, 425 "Cannot obtain PCI Resources, aborting : %d\n", 426 ret); 427 428 goto err_pci_disable; 429 } 430 431 pci_set_master(pdev); 432 433 /* 434 * Query PCI Controller on system for DMA addressing 435 * limitation for the device. Try 43-bit first, and 436 * fail to 32-bit. 437 */ 438 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43)); 439 if (ret) { 440 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 441 if (ret) { 442 SNIC_HOST_ERR(shost, 443 "No Usable DMA Configuration, aborting %d\n", 444 ret); 445 446 goto err_rel_regions; 447 } 448 449 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 450 if (ret) { 451 SNIC_HOST_ERR(shost, 452 "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n", 453 ret); 454 455 goto err_rel_regions; 456 } 457 } else { 458 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43)); 459 if (ret) { 460 SNIC_HOST_ERR(shost, 461 "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n", 462 ret); 463 464 goto err_rel_regions; 465 } 466 } 467 468 469 /* Map vNIC resources from BAR0 */ 470 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 471 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); 472 473 ret = -ENODEV; 474 goto err_rel_regions; 475 } 476 477 snic->bar0.vaddr = pci_iomap(pdev, 0, 0); 478 if (!snic->bar0.vaddr) { 479 SNIC_HOST_ERR(shost, 480 "Cannot memory map BAR0 res hdr aborting.\n"); 481 482 ret = -ENODEV; 483 goto err_rel_regions; 484 } 485 486 snic->bar0.bus_addr = pci_resource_start(pdev, 0); 487 snic->bar0.len = pci_resource_len(pdev, 0); 488 SNIC_BUG_ON(snic->bar0.bus_addr == 0); 489 490 /* Devcmd2 Resource Allocation and Initialization */ 491 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1); 492 if (!snic->vdev) { 493 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n"); 494 495 ret = -ENODEV; 496 goto err_iounmap; 497 } 498 499 ret = svnic_dev_cmd_init(snic->vdev, 0); 500 if (ret) { 501 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret); 502 503 goto err_vnic_unreg; 504 } 505 506 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0); 507 if (ret) { 508 SNIC_HOST_ERR(shost, 509 "vNIC dev open failed, aborting. %d\n", 510 ret); 511 512 goto err_vnic_unreg; 513 } 514 515 ret = svnic_dev_init(snic->vdev, 0); 516 if (ret) { 517 SNIC_HOST_ERR(shost, 518 "vNIC dev init failed. aborting. %d\n", 519 ret); 520 521 goto err_dev_close; 522 } 523 524 /* Get vNIC information */ 525 ret = snic_get_vnic_config(snic); 526 if (ret) { 527 SNIC_HOST_ERR(shost, 528 "Get vNIC configuration failed, aborting. %d\n", 529 ret); 530 531 goto err_dev_close; 532 } 533 534 /* Configure Maximum Outstanding IO reqs */ 535 max_ios = snic->config.io_throttle_count; 536 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD) 537 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ, 538 max_t(u32, SNIC_MIN_IO_REQ, max_ios)); 539 540 snic->max_tag_id = shost->can_queue; 541 542 shost->max_lun = snic->config.luns_per_tgt; 543 shost->max_id = SNIC_MAX_TARGET; 544 545 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/ 546 547 snic_get_res_counts(snic); 548 549 /* 550 * Assumption: Only MSIx is supported 551 */ 552 ret = snic_set_intr_mode(snic); 553 if (ret) { 554 SNIC_HOST_ERR(shost, 555 "Failed to set intr mode aborting. %d\n", 556 ret); 557 558 goto err_dev_close; 559 } 560 561 ret = snic_alloc_vnic_res(snic); 562 if (ret) { 563 SNIC_HOST_ERR(shost, 564 "Failed to alloc vNIC resources aborting. %d\n", 565 ret); 566 567 goto err_clear_intr; 568 } 569 570 /* Initialize specific lists */ 571 INIT_LIST_HEAD(&snic->list); 572 573 /* 574 * spl_cmd_list for maintaining snic specific cmds 575 * such as EXCH_VER_REQ, REPORT_TARGETS etc 576 */ 577 INIT_LIST_HEAD(&snic->spl_cmd_list); 578 spin_lock_init(&snic->spl_cmd_lock); 579 580 /* initialize all snic locks */ 581 spin_lock_init(&snic->snic_lock); 582 583 for (i = 0; i < SNIC_WQ_MAX; i++) 584 spin_lock_init(&snic->wq_lock[i]); 585 586 for (i = 0; i < SNIC_IO_LOCKS; i++) 587 spin_lock_init(&snic->io_req_lock[i]); 588 589 pool = mempool_create_slab_pool(2, 590 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 591 if (!pool) { 592 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); 593 594 goto err_free_res; 595 } 596 597 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; 598 599 pool = mempool_create_slab_pool(2, 600 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 601 if (!pool) { 602 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); 603 604 goto err_free_dflt_sgl_pool; 605 } 606 607 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; 608 609 pool = mempool_create_slab_pool(2, 610 snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 611 if (!pool) { 612 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); 613 614 goto err_free_max_sgl_pool; 615 } 616 617 snic->req_pool[SNIC_REQ_TM_CACHE] = pool; 618 619 /* Initialize snic state */ 620 atomic_set(&snic->state, SNIC_INIT); 621 622 atomic_set(&snic->ios_inflight, 0); 623 624 /* Setup notification buffer area */ 625 ret = snic_notify_set(snic); 626 if (ret) { 627 SNIC_HOST_ERR(shost, 628 "Failed to alloc notify buffer aborting. %d\n", 629 ret); 630 631 goto err_free_tmreq_pool; 632 } 633 634 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 635 list_add_tail(&snic->list, &snic_glob->snic_list); 636 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); 637 638 snic_disc_init(&snic->disc); 639 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc); 640 INIT_WORK(&snic->disc_work, snic_handle_disc); 641 INIT_WORK(&snic->link_work, snic_handle_link); 642 643 /* Enable all queues */ 644 for (i = 0; i < snic->wq_count; i++) 645 svnic_wq_enable(&snic->wq[i]); 646 647 ret = svnic_dev_enable_wait(snic->vdev); 648 if (ret) { 649 SNIC_HOST_ERR(shost, 650 "vNIC dev enable failed w/ error %d\n", 651 ret); 652 653 goto err_vdev_enable; 654 } 655 656 ret = snic_request_intr(snic); 657 if (ret) { 658 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret); 659 660 goto err_req_intr; 661 } 662 663 for (i = 0; i < snic->intr_count; i++) 664 svnic_intr_unmask(&snic->intr[i]); 665 666 /* Get snic params */ 667 ret = snic_get_conf(snic); 668 if (ret) { 669 SNIC_HOST_ERR(shost, 670 "Failed to get snic io config from FW w err %d\n", 671 ret); 672 673 goto err_get_conf; 674 } 675 676 /* 677 * Initialization done with PCI system, hardware, firmware. 678 * Add shost to SCSI 679 */ 680 ret = snic_add_host(shost, pdev); 681 if (ret) { 682 SNIC_HOST_ERR(shost, 683 "Adding scsi host Failed ... exiting. %d\n", 684 ret); 685 686 goto err_get_conf; 687 } 688 689 snic_set_state(snic, SNIC_ONLINE); 690 691 ret = snic_disc_start(snic); 692 if (ret) { 693 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", 694 ret); 695 696 goto err_get_conf; 697 } 698 699 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n"); 700 701 return 0; 702 703 err_get_conf: 704 snic_free_all_untagged_reqs(snic); 705 706 for (i = 0; i < snic->intr_count; i++) 707 svnic_intr_mask(&snic->intr[i]); 708 709 snic_free_intr(snic); 710 711 err_req_intr: 712 svnic_dev_disable(snic->vdev); 713 714 err_vdev_enable: 715 svnic_dev_notify_unset(snic->vdev); 716 717 for (i = 0; i < snic->wq_count; i++) { 718 int rc = 0; 719 720 rc = svnic_wq_disable(&snic->wq[i]); 721 if (rc) { 722 SNIC_HOST_ERR(shost, 723 "WQ Disable Failed w/ err = %d\n", rc); 724 725 break; 726 } 727 } 728 snic_del_host(snic->shost); 729 730 err_free_tmreq_pool: 731 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); 732 733 err_free_max_sgl_pool: 734 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]); 735 736 err_free_dflt_sgl_pool: 737 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]); 738 739 err_free_res: 740 snic_free_vnic_res(snic); 741 742 err_clear_intr: 743 snic_clear_intr_mode(snic); 744 745 err_dev_close: 746 svnic_dev_close(snic->vdev); 747 748 err_vnic_unreg: 749 svnic_dev_unregister(snic->vdev); 750 751 err_iounmap: 752 snic_iounmap(snic); 753 754 err_rel_regions: 755 pci_release_regions(pdev); 756 757 err_pci_disable: 758 pci_disable_device(pdev); 759 760 err_free_snic: 761 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 762 snic_stats_debugfs_remove(snic); 763 #endif 764 scsi_host_put(shost); 765 pci_set_drvdata(pdev, NULL); 766 767 prob_end: 768 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n", 769 pdev->bus->number, PCI_SLOT(pdev->devfn), 770 PCI_FUNC(pdev->devfn)); 771 772 return ret; 773 } /* end of snic_probe */ 774 775 776 /* 777 * snic_remove : invoked on unbinding the interface to cleanup the 778 * resources allocated in snic_probe on initialization. 779 */ 780 static void 781 snic_remove(struct pci_dev *pdev) 782 { 783 struct snic *snic = pci_get_drvdata(pdev); 784 unsigned long flags; 785 786 if (!snic) { 787 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n", 788 pdev->bus->number, PCI_SLOT(pdev->devfn), 789 PCI_FUNC(pdev->devfn)); 790 791 return; 792 } 793 794 /* 795 * Mark state so that the workqueue thread stops forwarding 796 * received frames and link events. ISR and other threads 797 * that can queue work items will also stop creating work 798 * items on the snic workqueue 799 */ 800 snic_set_state(snic, SNIC_OFFLINE); 801 spin_lock_irqsave(&snic->snic_lock, flags); 802 snic->stop_link_events = 1; 803 spin_unlock_irqrestore(&snic->snic_lock, flags); 804 805 flush_workqueue(snic_glob->event_q); 806 snic_disc_term(snic); 807 808 spin_lock_irqsave(&snic->snic_lock, flags); 809 snic->in_remove = 1; 810 spin_unlock_irqrestore(&snic->snic_lock, flags); 811 812 /* 813 * This stops the snic device, masks all interrupts, Completed 814 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are 815 * cleanup 816 */ 817 snic_cleanup(snic); 818 819 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 820 list_del(&snic->list); 821 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); 822 823 snic_tgt_del_all(snic); 824 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 825 snic_stats_debugfs_remove(snic); 826 #endif 827 snic_del_host(snic->shost); 828 829 svnic_dev_notify_unset(snic->vdev); 830 snic_free_intr(snic); 831 snic_free_vnic_res(snic); 832 snic_clear_intr_mode(snic); 833 svnic_dev_close(snic->vdev); 834 svnic_dev_unregister(snic->vdev); 835 snic_iounmap(snic); 836 pci_release_regions(pdev); 837 pci_disable_device(pdev); 838 pci_set_drvdata(pdev, NULL); 839 840 /* this frees Scsi_Host and snic memory (continuous chunk) */ 841 scsi_host_put(snic->shost); 842 } /* end of snic_remove */ 843 844 845 struct snic_global *snic_glob; 846 847 /* 848 * snic_global_data_init: Initialize SNIC Global Data 849 * Notes: All the global lists, variables should be part of global data 850 * this helps in debugging. 851 */ 852 static int 853 snic_global_data_init(void) 854 { 855 int ret = 0; 856 struct kmem_cache *cachep; 857 ssize_t len = 0; 858 859 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL); 860 861 if (!snic_glob) { 862 SNIC_ERR("Failed to allocate Global Context.\n"); 863 864 ret = -ENOMEM; 865 goto gdi_end; 866 } 867 868 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 869 /* Debugfs related Initialization */ 870 /* Create debugfs entries for snic */ 871 ret = snic_debugfs_init(); 872 if (ret < 0) { 873 SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n"); 874 snic_debugfs_term(); 875 /* continue even if it fails */ 876 } 877 878 /* Trace related Initialization */ 879 /* Allocate memory for trace buffer */ 880 ret = snic_trc_init(); 881 if (ret < 0) { 882 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n"); 883 snic_trc_free(); 884 /* continue even if it fails */ 885 } 886 887 #endif 888 INIT_LIST_HEAD(&snic_glob->snic_list); 889 spin_lock_init(&snic_glob->snic_list_lock); 890 891 /* Create a cache for allocation of snic_host_req+default size ESGLs */ 892 len = sizeof(struct snic_req_info); 893 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl); 894 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, 895 SLAB_HWCACHE_ALIGN, NULL); 896 if (!cachep) { 897 SNIC_ERR("Failed to create snic default sgl slab\n"); 898 ret = -ENOMEM; 899 900 goto err_dflt_req_slab; 901 } 902 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; 903 904 /* Create a cache for allocation of max size Extended SGLs */ 905 len = sizeof(struct snic_req_info); 906 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl); 907 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, 908 SLAB_HWCACHE_ALIGN, NULL); 909 if (!cachep) { 910 SNIC_ERR("Failed to create snic max sgl slab\n"); 911 ret = -ENOMEM; 912 913 goto err_max_req_slab; 914 } 915 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; 916 917 len = sizeof(struct snic_host_req); 918 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, 919 SLAB_HWCACHE_ALIGN, NULL); 920 if (!cachep) { 921 SNIC_ERR("Failed to create snic tm req slab\n"); 922 ret = -ENOMEM; 923 924 goto err_tmreq_slab; 925 } 926 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; 927 928 /* snic_event queue */ 929 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); 930 if (!snic_glob->event_q) { 931 SNIC_ERR("snic event queue create failed\n"); 932 ret = -ENOMEM; 933 934 goto err_eventq; 935 } 936 937 return ret; 938 939 err_eventq: 940 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 941 942 err_tmreq_slab: 943 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 944 945 err_max_req_slab: 946 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 947 948 err_dflt_req_slab: 949 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 950 snic_trc_free(); 951 snic_debugfs_term(); 952 #endif 953 kfree(snic_glob); 954 snic_glob = NULL; 955 956 gdi_end: 957 return ret; 958 } /* end of snic_glob_init */ 959 960 /* 961 * snic_global_data_cleanup : Frees SNIC Global Data 962 */ 963 static void 964 snic_global_data_cleanup(void) 965 { 966 SNIC_BUG_ON(snic_glob == NULL); 967 968 destroy_workqueue(snic_glob->event_q); 969 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 970 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 971 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 972 973 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 974 /* Freeing Trace Resources */ 975 snic_trc_free(); 976 977 /* Freeing Debugfs Resources */ 978 snic_debugfs_term(); 979 #endif 980 kfree(snic_glob); 981 snic_glob = NULL; 982 } /* end of snic_glob_cleanup */ 983 984 static struct pci_driver snic_driver = { 985 .name = SNIC_DRV_NAME, 986 .id_table = snic_id_table, 987 .probe = snic_probe, 988 .remove = snic_remove, 989 }; 990 991 static int __init 992 snic_init_module(void) 993 { 994 int ret = 0; 995 996 #ifndef __x86_64__ 997 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n"); 998 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 999 #endif 1000 1001 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION); 1002 1003 ret = snic_global_data_init(); 1004 if (ret) { 1005 SNIC_ERR("Failed to Initialize Global Data.\n"); 1006 1007 return ret; 1008 } 1009 1010 ret = pci_register_driver(&snic_driver); 1011 if (ret < 0) { 1012 SNIC_ERR("PCI driver register error\n"); 1013 1014 goto err_pci_reg; 1015 } 1016 1017 return ret; 1018 1019 err_pci_reg: 1020 snic_global_data_cleanup(); 1021 1022 return ret; 1023 } 1024 1025 static void __exit 1026 snic_cleanup_module(void) 1027 { 1028 pci_unregister_driver(&snic_driver); 1029 snic_global_data_cleanup(); 1030 } 1031 1032 module_init(snic_init_module); 1033 module_exit(snic_cleanup_module); 1034 1035 MODULE_LICENSE("GPL v2"); 1036 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION); 1037 MODULE_VERSION(SNIC_DRV_VERSION); 1038 MODULE_DEVICE_TABLE(pci, snic_id_table); 1039 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, " 1040 "Sesidhar Baddela <sebaddel@cisco.com>"); 1041