1 /* 2 * Copyright 2014 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mempool.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/errno.h> 23 #include <linux/init.h> 24 #include <linux/pci.h> 25 #include <linux/skbuff.h> 26 #include <linux/interrupt.h> 27 #include <linux/spinlock.h> 28 #include <linux/workqueue.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_tcq.h> 31 32 #include "snic.h" 33 #include "snic_fwint.h" 34 35 #define PCI_DEVICE_ID_CISCO_SNIC 0x0046 36 37 /* Supported devices by snic module */ 38 static struct pci_device_id snic_id_table[] = { 39 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, 40 { 0, } /* end of table */ 41 }; 42 43 unsigned int snic_log_level = 0x0; 44 module_param(snic_log_level, int, S_IRUGO|S_IWUSR); 45 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels"); 46 47 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 48 unsigned int snic_trace_max_pages = 16; 49 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR); 50 MODULE_PARM_DESC(snic_trace_max_pages, 51 "Total allocated memory pages for snic trace buffer"); 52 53 #endif 54 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH; 55 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); 56 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); 57 58 /* 59 * snic_slave_alloc : callback function to SCSI Mid Layer, called on 60 * scsi device initialization. 61 */ 62 static int 63 snic_slave_alloc(struct scsi_device *sdev) 64 { 65 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); 66 67 if (!tgt || snic_tgt_chkready(tgt)) 68 return -ENXIO; 69 70 return 0; 71 } 72 73 /* 74 * snic_slave_configure : callback function to SCSI Mid Layer, called on 75 * scsi device initialization. 76 */ 77 static int 78 snic_slave_configure(struct scsi_device *sdev) 79 { 80 struct snic *snic = shost_priv(sdev->host); 81 u32 qdepth = 0, max_ios = 0; 82 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ; 83 84 /* Set Queue Depth */ 85 max_ios = snic_max_qdepth; 86 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH); 87 scsi_change_queue_depth(sdev, qdepth); 88 89 if (snic->fwinfo.io_tmo > 1) 90 tmo = snic->fwinfo.io_tmo * HZ; 91 92 /* FW requires extended timeouts */ 93 blk_queue_rq_timeout(sdev->request_queue, tmo); 94 95 return 0; 96 } 97 98 static int 99 snic_change_queue_depth(struct scsi_device *sdev, int qdepth) 100 { 101 struct snic *snic = shost_priv(sdev->host); 102 int qsz = 0; 103 104 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); 105 if (qsz < sdev->queue_depth) 106 atomic64_inc(&snic->s_stats.misc.qsz_rampdown); 107 else if (qsz > sdev->queue_depth) 108 atomic64_inc(&snic->s_stats.misc.qsz_rampup); 109 110 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); 111 112 scsi_change_queue_depth(sdev, qsz); 113 114 return sdev->queue_depth; 115 } 116 117 static struct scsi_host_template snic_host_template = { 118 .module = THIS_MODULE, 119 .name = SNIC_DRV_NAME, 120 .queuecommand = snic_queuecommand, 121 .eh_abort_handler = snic_abort_cmd, 122 .eh_device_reset_handler = snic_device_reset, 123 .eh_host_reset_handler = snic_host_reset, 124 .slave_alloc = snic_slave_alloc, 125 .slave_configure = snic_slave_configure, 126 .change_queue_depth = snic_change_queue_depth, 127 .this_id = -1, 128 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, 129 .can_queue = SNIC_MAX_IO_REQ, 130 .use_clustering = ENABLE_CLUSTERING, 131 .sg_tablesize = SNIC_MAX_SG_DESC_CNT, 132 .max_sectors = 0x800, 133 .shost_attrs = snic_attrs, 134 .track_queue_depth = 1, 135 .cmd_size = sizeof(struct snic_internal_io_state), 136 .proc_name = "snic_scsi", 137 }; 138 139 /* 140 * snic_handle_link_event : Handles link events such as link up/down/error 141 */ 142 void 143 snic_handle_link_event(struct snic *snic) 144 { 145 unsigned long flags; 146 147 spin_lock_irqsave(&snic->snic_lock, flags); 148 if (snic->stop_link_events) { 149 spin_unlock_irqrestore(&snic->snic_lock, flags); 150 151 return; 152 } 153 spin_unlock_irqrestore(&snic->snic_lock, flags); 154 155 queue_work(snic_glob->event_q, &snic->link_work); 156 } /* end of snic_handle_link_event */ 157 158 /* 159 * snic_notify_set : sets notification area 160 * This notification area is to receive events from fw 161 * Note: snic supports only MSIX interrupts, in which we can just call 162 * svnic_dev_notify_set directly 163 */ 164 static int 165 snic_notify_set(struct snic *snic) 166 { 167 int ret = 0; 168 enum vnic_dev_intr_mode intr_mode; 169 170 intr_mode = svnic_dev_get_intr_mode(snic->vdev); 171 172 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) { 173 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY); 174 } else { 175 SNIC_HOST_ERR(snic->shost, 176 "Interrupt mode should be setup before devcmd notify set %d\n", 177 intr_mode); 178 ret = -1; 179 } 180 181 return ret; 182 } /* end of snic_notify_set */ 183 184 /* 185 * snic_dev_wait : polls vnic open status. 186 */ 187 static int 188 snic_dev_wait(struct vnic_dev *vdev, 189 int (*start)(struct vnic_dev *, int), 190 int (*finished)(struct vnic_dev *, int *), 191 int arg) 192 { 193 unsigned long time; 194 int ret, done; 195 int retry_cnt = 0; 196 197 ret = start(vdev, arg); 198 if (ret) 199 return ret; 200 201 /* 202 * Wait for func to complete...2 seconds max. 203 * 204 * Sometimes schedule_timeout_uninterruptible take long time 205 * to wakeup, which results skipping retry. The retry counter 206 * ensures to retry at least two times. 207 */ 208 time = jiffies + (HZ * 2); 209 do { 210 ret = finished(vdev, &done); 211 if (ret) 212 return ret; 213 214 if (done) 215 return 0; 216 schedule_timeout_uninterruptible(HZ/10); 217 ++retry_cnt; 218 } while (time_after(time, jiffies) || (retry_cnt < 3)); 219 220 return -ETIMEDOUT; 221 } /* end of snic_dev_wait */ 222 223 /* 224 * snic_cleanup: called by snic_remove 225 * Stops the snic device, masks all interrupts, Completed CQ entries are 226 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup 227 */ 228 static int 229 snic_cleanup(struct snic *snic) 230 { 231 unsigned int i; 232 int ret; 233 234 svnic_dev_disable(snic->vdev); 235 for (i = 0; i < snic->intr_count; i++) 236 svnic_intr_mask(&snic->intr[i]); 237 238 for (i = 0; i < snic->wq_count; i++) { 239 ret = svnic_wq_disable(&snic->wq[i]); 240 if (ret) 241 return ret; 242 } 243 244 /* Clean up completed IOs */ 245 snic_fwcq_cmpl_handler(snic, -1); 246 247 snic_wq_cmpl_handler(snic, -1); 248 249 /* Clean up the IOs that have not completed */ 250 for (i = 0; i < snic->wq_count; i++) 251 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); 252 253 for (i = 0; i < snic->cq_count; i++) 254 svnic_cq_clean(&snic->cq[i]); 255 256 for (i = 0; i < snic->intr_count; i++) 257 svnic_intr_clean(&snic->intr[i]); 258 259 /* Cleanup snic specific requests */ 260 snic_free_all_untagged_reqs(snic); 261 262 /* Cleanup Pending SCSI commands */ 263 snic_shutdown_scsi_cleanup(snic); 264 265 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++) 266 mempool_destroy(snic->req_pool[i]); 267 268 return 0; 269 } /* end of snic_cleanup */ 270 271 272 static void 273 snic_iounmap(struct snic *snic) 274 { 275 if (snic->bar0.vaddr) 276 iounmap(snic->bar0.vaddr); 277 } 278 279 /* 280 * snic_vdev_open_done : polls for svnic_dev_open cmd completion. 281 */ 282 static int 283 snic_vdev_open_done(struct vnic_dev *vdev, int *done) 284 { 285 struct snic *snic = svnic_dev_priv(vdev); 286 int ret; 287 int nretries = 5; 288 289 do { 290 ret = svnic_dev_open_done(vdev, done); 291 if (ret == 0) 292 break; 293 294 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n"); 295 } while (nretries--); 296 297 return ret; 298 } /* end of snic_vdev_open_done */ 299 300 /* 301 * snic_add_host : registers scsi host with ML 302 */ 303 static int 304 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) 305 { 306 int ret = 0; 307 308 ret = scsi_add_host(shost, &pdev->dev); 309 if (ret) { 310 SNIC_HOST_ERR(shost, 311 "snic: scsi_add_host failed. %d\n", 312 ret); 313 314 return ret; 315 } 316 317 SNIC_BUG_ON(shost->work_q != NULL); 318 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", 319 shost->host_no); 320 shost->work_q = create_singlethread_workqueue(shost->work_q_name); 321 if (!shost->work_q) { 322 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); 323 324 ret = -ENOMEM; 325 } 326 327 return ret; 328 } /* end of snic_add_host */ 329 330 static void 331 snic_del_host(struct Scsi_Host *shost) 332 { 333 if (!shost->work_q) 334 return; 335 336 destroy_workqueue(shost->work_q); 337 shost->work_q = NULL; 338 scsi_remove_host(shost); 339 } 340 341 int 342 snic_get_state(struct snic *snic) 343 { 344 return atomic_read(&snic->state); 345 } 346 347 void 348 snic_set_state(struct snic *snic, enum snic_state state) 349 { 350 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n", 351 snic_state_to_str(snic_get_state(snic)), 352 snic_state_to_str(state)); 353 354 atomic_set(&snic->state, state); 355 } 356 357 /* 358 * snic_probe : Initialize the snic interface. 359 */ 360 static int 361 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 362 { 363 struct Scsi_Host *shost; 364 struct snic *snic; 365 mempool_t *pool; 366 unsigned long flags; 367 u32 max_ios = 0; 368 int ret, i; 369 370 /* Device Information */ 371 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ", 372 pdev->vendor, pdev->device, pdev->subsystem_vendor, 373 pdev->subsystem_device); 374 375 SNIC_INFO("snic device bus %x: slot %x: fn %x\n", 376 pdev->bus->number, PCI_SLOT(pdev->devfn), 377 PCI_FUNC(pdev->devfn)); 378 379 /* 380 * Allocate SCSI Host and setup association between host, and snic 381 */ 382 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); 383 if (!shost) { 384 SNIC_ERR("Unable to alloc scsi_host\n"); 385 ret = -ENOMEM; 386 387 goto prob_end; 388 } 389 snic = shost_priv(shost); 390 snic->shost = shost; 391 392 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME, 393 shost->host_no); 394 395 SNIC_HOST_INFO(shost, 396 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n", 397 shost->host_no, snic, shost, pdev->bus->number, 398 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 399 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 400 /* Per snic debugfs init */ 401 ret = snic_stats_debugfs_init(snic); 402 if (ret) { 403 SNIC_HOST_ERR(snic->shost, 404 "Failed to initialize debugfs stats\n"); 405 snic_stats_debugfs_remove(snic); 406 } 407 #endif 408 409 /* Setup PCI Resources */ 410 pci_set_drvdata(pdev, snic); 411 snic->pdev = pdev; 412 413 ret = pci_enable_device(pdev); 414 if (ret) { 415 SNIC_HOST_ERR(shost, 416 "Cannot enable PCI Resources, aborting : %d\n", 417 ret); 418 419 goto err_free_snic; 420 } 421 422 ret = pci_request_regions(pdev, SNIC_DRV_NAME); 423 if (ret) { 424 SNIC_HOST_ERR(shost, 425 "Cannot obtain PCI Resources, aborting : %d\n", 426 ret); 427 428 goto err_pci_disable; 429 } 430 431 pci_set_master(pdev); 432 433 /* 434 * Query PCI Controller on system for DMA addressing 435 * limitation for the device. Try 43-bit first, and 436 * fail to 32-bit. 437 */ 438 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43)); 439 if (ret) { 440 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 441 if (ret) { 442 SNIC_HOST_ERR(shost, 443 "No Usable DMA Configuration, aborting %d\n", 444 ret); 445 goto err_rel_regions; 446 } 447 } 448 449 /* Map vNIC resources from BAR0 */ 450 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 451 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); 452 453 ret = -ENODEV; 454 goto err_rel_regions; 455 } 456 457 snic->bar0.vaddr = pci_iomap(pdev, 0, 0); 458 if (!snic->bar0.vaddr) { 459 SNIC_HOST_ERR(shost, 460 "Cannot memory map BAR0 res hdr aborting.\n"); 461 462 ret = -ENODEV; 463 goto err_rel_regions; 464 } 465 466 snic->bar0.bus_addr = pci_resource_start(pdev, 0); 467 snic->bar0.len = pci_resource_len(pdev, 0); 468 SNIC_BUG_ON(snic->bar0.bus_addr == 0); 469 470 /* Devcmd2 Resource Allocation and Initialization */ 471 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1); 472 if (!snic->vdev) { 473 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n"); 474 475 ret = -ENODEV; 476 goto err_iounmap; 477 } 478 479 ret = svnic_dev_cmd_init(snic->vdev, 0); 480 if (ret) { 481 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret); 482 483 goto err_vnic_unreg; 484 } 485 486 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0); 487 if (ret) { 488 SNIC_HOST_ERR(shost, 489 "vNIC dev open failed, aborting. %d\n", 490 ret); 491 492 goto err_vnic_unreg; 493 } 494 495 ret = svnic_dev_init(snic->vdev, 0); 496 if (ret) { 497 SNIC_HOST_ERR(shost, 498 "vNIC dev init failed. aborting. %d\n", 499 ret); 500 501 goto err_dev_close; 502 } 503 504 /* Get vNIC information */ 505 ret = snic_get_vnic_config(snic); 506 if (ret) { 507 SNIC_HOST_ERR(shost, 508 "Get vNIC configuration failed, aborting. %d\n", 509 ret); 510 511 goto err_dev_close; 512 } 513 514 /* Configure Maximum Outstanding IO reqs */ 515 max_ios = snic->config.io_throttle_count; 516 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD) 517 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ, 518 max_t(u32, SNIC_MIN_IO_REQ, max_ios)); 519 520 snic->max_tag_id = shost->can_queue; 521 522 shost->max_lun = snic->config.luns_per_tgt; 523 shost->max_id = SNIC_MAX_TARGET; 524 525 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/ 526 527 snic_get_res_counts(snic); 528 529 /* 530 * Assumption: Only MSIx is supported 531 */ 532 ret = snic_set_intr_mode(snic); 533 if (ret) { 534 SNIC_HOST_ERR(shost, 535 "Failed to set intr mode aborting. %d\n", 536 ret); 537 538 goto err_dev_close; 539 } 540 541 ret = snic_alloc_vnic_res(snic); 542 if (ret) { 543 SNIC_HOST_ERR(shost, 544 "Failed to alloc vNIC resources aborting. %d\n", 545 ret); 546 547 goto err_clear_intr; 548 } 549 550 /* Initialize specific lists */ 551 INIT_LIST_HEAD(&snic->list); 552 553 /* 554 * spl_cmd_list for maintaining snic specific cmds 555 * such as EXCH_VER_REQ, REPORT_TARGETS etc 556 */ 557 INIT_LIST_HEAD(&snic->spl_cmd_list); 558 spin_lock_init(&snic->spl_cmd_lock); 559 560 /* initialize all snic locks */ 561 spin_lock_init(&snic->snic_lock); 562 563 for (i = 0; i < SNIC_WQ_MAX; i++) 564 spin_lock_init(&snic->wq_lock[i]); 565 566 for (i = 0; i < SNIC_IO_LOCKS; i++) 567 spin_lock_init(&snic->io_req_lock[i]); 568 569 pool = mempool_create_slab_pool(2, 570 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 571 if (!pool) { 572 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); 573 574 ret = -ENOMEM; 575 goto err_free_res; 576 } 577 578 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; 579 580 pool = mempool_create_slab_pool(2, 581 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 582 if (!pool) { 583 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); 584 585 ret = -ENOMEM; 586 goto err_free_dflt_sgl_pool; 587 } 588 589 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; 590 591 pool = mempool_create_slab_pool(2, 592 snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 593 if (!pool) { 594 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); 595 596 ret = -ENOMEM; 597 goto err_free_max_sgl_pool; 598 } 599 600 snic->req_pool[SNIC_REQ_TM_CACHE] = pool; 601 602 /* Initialize snic state */ 603 atomic_set(&snic->state, SNIC_INIT); 604 605 atomic_set(&snic->ios_inflight, 0); 606 607 /* Setup notification buffer area */ 608 ret = snic_notify_set(snic); 609 if (ret) { 610 SNIC_HOST_ERR(shost, 611 "Failed to alloc notify buffer aborting. %d\n", 612 ret); 613 614 goto err_free_tmreq_pool; 615 } 616 617 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 618 list_add_tail(&snic->list, &snic_glob->snic_list); 619 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); 620 621 snic_disc_init(&snic->disc); 622 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc); 623 INIT_WORK(&snic->disc_work, snic_handle_disc); 624 INIT_WORK(&snic->link_work, snic_handle_link); 625 626 /* Enable all queues */ 627 for (i = 0; i < snic->wq_count; i++) 628 svnic_wq_enable(&snic->wq[i]); 629 630 ret = svnic_dev_enable_wait(snic->vdev); 631 if (ret) { 632 SNIC_HOST_ERR(shost, 633 "vNIC dev enable failed w/ error %d\n", 634 ret); 635 636 goto err_vdev_enable; 637 } 638 639 ret = snic_request_intr(snic); 640 if (ret) { 641 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret); 642 643 goto err_req_intr; 644 } 645 646 for (i = 0; i < snic->intr_count; i++) 647 svnic_intr_unmask(&snic->intr[i]); 648 649 /* Get snic params */ 650 ret = snic_get_conf(snic); 651 if (ret) { 652 SNIC_HOST_ERR(shost, 653 "Failed to get snic io config from FW w err %d\n", 654 ret); 655 656 goto err_get_conf; 657 } 658 659 /* 660 * Initialization done with PCI system, hardware, firmware. 661 * Add shost to SCSI 662 */ 663 ret = snic_add_host(shost, pdev); 664 if (ret) { 665 SNIC_HOST_ERR(shost, 666 "Adding scsi host Failed ... exiting. %d\n", 667 ret); 668 669 goto err_get_conf; 670 } 671 672 snic_set_state(snic, SNIC_ONLINE); 673 674 ret = snic_disc_start(snic); 675 if (ret) { 676 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", 677 ret); 678 679 goto err_get_conf; 680 } 681 682 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n"); 683 684 return 0; 685 686 err_get_conf: 687 snic_free_all_untagged_reqs(snic); 688 689 for (i = 0; i < snic->intr_count; i++) 690 svnic_intr_mask(&snic->intr[i]); 691 692 snic_free_intr(snic); 693 694 err_req_intr: 695 svnic_dev_disable(snic->vdev); 696 697 err_vdev_enable: 698 svnic_dev_notify_unset(snic->vdev); 699 700 for (i = 0; i < snic->wq_count; i++) { 701 int rc = 0; 702 703 rc = svnic_wq_disable(&snic->wq[i]); 704 if (rc) { 705 SNIC_HOST_ERR(shost, 706 "WQ Disable Failed w/ err = %d\n", rc); 707 708 break; 709 } 710 } 711 snic_del_host(snic->shost); 712 713 err_free_tmreq_pool: 714 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); 715 716 err_free_max_sgl_pool: 717 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]); 718 719 err_free_dflt_sgl_pool: 720 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]); 721 722 err_free_res: 723 snic_free_vnic_res(snic); 724 725 err_clear_intr: 726 snic_clear_intr_mode(snic); 727 728 err_dev_close: 729 svnic_dev_close(snic->vdev); 730 731 err_vnic_unreg: 732 svnic_dev_unregister(snic->vdev); 733 734 err_iounmap: 735 snic_iounmap(snic); 736 737 err_rel_regions: 738 pci_release_regions(pdev); 739 740 err_pci_disable: 741 pci_disable_device(pdev); 742 743 err_free_snic: 744 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 745 snic_stats_debugfs_remove(snic); 746 #endif 747 scsi_host_put(shost); 748 pci_set_drvdata(pdev, NULL); 749 750 prob_end: 751 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n", 752 pdev->bus->number, PCI_SLOT(pdev->devfn), 753 PCI_FUNC(pdev->devfn)); 754 755 return ret; 756 } /* end of snic_probe */ 757 758 759 /* 760 * snic_remove : invoked on unbinding the interface to cleanup the 761 * resources allocated in snic_probe on initialization. 762 */ 763 static void 764 snic_remove(struct pci_dev *pdev) 765 { 766 struct snic *snic = pci_get_drvdata(pdev); 767 unsigned long flags; 768 769 if (!snic) { 770 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n", 771 pdev->bus->number, PCI_SLOT(pdev->devfn), 772 PCI_FUNC(pdev->devfn)); 773 774 return; 775 } 776 777 /* 778 * Mark state so that the workqueue thread stops forwarding 779 * received frames and link events. ISR and other threads 780 * that can queue work items will also stop creating work 781 * items on the snic workqueue 782 */ 783 snic_set_state(snic, SNIC_OFFLINE); 784 spin_lock_irqsave(&snic->snic_lock, flags); 785 snic->stop_link_events = 1; 786 spin_unlock_irqrestore(&snic->snic_lock, flags); 787 788 flush_workqueue(snic_glob->event_q); 789 snic_disc_term(snic); 790 791 spin_lock_irqsave(&snic->snic_lock, flags); 792 snic->in_remove = 1; 793 spin_unlock_irqrestore(&snic->snic_lock, flags); 794 795 /* 796 * This stops the snic device, masks all interrupts, Completed 797 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are 798 * cleanup 799 */ 800 snic_cleanup(snic); 801 802 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 803 list_del(&snic->list); 804 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); 805 806 snic_tgt_del_all(snic); 807 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 808 snic_stats_debugfs_remove(snic); 809 #endif 810 snic_del_host(snic->shost); 811 812 svnic_dev_notify_unset(snic->vdev); 813 snic_free_intr(snic); 814 snic_free_vnic_res(snic); 815 snic_clear_intr_mode(snic); 816 svnic_dev_close(snic->vdev); 817 svnic_dev_unregister(snic->vdev); 818 snic_iounmap(snic); 819 pci_release_regions(pdev); 820 pci_disable_device(pdev); 821 pci_set_drvdata(pdev, NULL); 822 823 /* this frees Scsi_Host and snic memory (continuous chunk) */ 824 scsi_host_put(snic->shost); 825 } /* end of snic_remove */ 826 827 828 struct snic_global *snic_glob; 829 830 /* 831 * snic_global_data_init: Initialize SNIC Global Data 832 * Notes: All the global lists, variables should be part of global data 833 * this helps in debugging. 834 */ 835 static int 836 snic_global_data_init(void) 837 { 838 int ret = 0; 839 struct kmem_cache *cachep; 840 ssize_t len = 0; 841 842 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL); 843 844 if (!snic_glob) { 845 SNIC_ERR("Failed to allocate Global Context.\n"); 846 847 ret = -ENOMEM; 848 goto gdi_end; 849 } 850 851 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 852 /* Debugfs related Initialization */ 853 /* Create debugfs entries for snic */ 854 ret = snic_debugfs_init(); 855 if (ret < 0) { 856 SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n"); 857 snic_debugfs_term(); 858 /* continue even if it fails */ 859 } 860 861 /* Trace related Initialization */ 862 /* Allocate memory for trace buffer */ 863 ret = snic_trc_init(); 864 if (ret < 0) { 865 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n"); 866 snic_trc_free(); 867 /* continue even if it fails */ 868 } 869 870 #endif 871 INIT_LIST_HEAD(&snic_glob->snic_list); 872 spin_lock_init(&snic_glob->snic_list_lock); 873 874 /* Create a cache for allocation of snic_host_req+default size ESGLs */ 875 len = sizeof(struct snic_req_info); 876 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl); 877 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, 878 SLAB_HWCACHE_ALIGN, NULL); 879 if (!cachep) { 880 SNIC_ERR("Failed to create snic default sgl slab\n"); 881 ret = -ENOMEM; 882 883 goto err_dflt_req_slab; 884 } 885 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; 886 887 /* Create a cache for allocation of max size Extended SGLs */ 888 len = sizeof(struct snic_req_info); 889 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl); 890 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, 891 SLAB_HWCACHE_ALIGN, NULL); 892 if (!cachep) { 893 SNIC_ERR("Failed to create snic max sgl slab\n"); 894 ret = -ENOMEM; 895 896 goto err_max_req_slab; 897 } 898 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; 899 900 len = sizeof(struct snic_host_req); 901 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, 902 SLAB_HWCACHE_ALIGN, NULL); 903 if (!cachep) { 904 SNIC_ERR("Failed to create snic tm req slab\n"); 905 ret = -ENOMEM; 906 907 goto err_tmreq_slab; 908 } 909 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; 910 911 /* snic_event queue */ 912 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); 913 if (!snic_glob->event_q) { 914 SNIC_ERR("snic event queue create failed\n"); 915 ret = -ENOMEM; 916 917 goto err_eventq; 918 } 919 920 return ret; 921 922 err_eventq: 923 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 924 925 err_tmreq_slab: 926 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 927 928 err_max_req_slab: 929 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 930 931 err_dflt_req_slab: 932 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 933 snic_trc_free(); 934 snic_debugfs_term(); 935 #endif 936 kfree(snic_glob); 937 snic_glob = NULL; 938 939 gdi_end: 940 return ret; 941 } /* end of snic_glob_init */ 942 943 /* 944 * snic_global_data_cleanup : Frees SNIC Global Data 945 */ 946 static void 947 snic_global_data_cleanup(void) 948 { 949 SNIC_BUG_ON(snic_glob == NULL); 950 951 destroy_workqueue(snic_glob->event_q); 952 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); 953 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); 954 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); 955 956 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS 957 /* Freeing Trace Resources */ 958 snic_trc_free(); 959 960 /* Freeing Debugfs Resources */ 961 snic_debugfs_term(); 962 #endif 963 kfree(snic_glob); 964 snic_glob = NULL; 965 } /* end of snic_glob_cleanup */ 966 967 static struct pci_driver snic_driver = { 968 .name = SNIC_DRV_NAME, 969 .id_table = snic_id_table, 970 .probe = snic_probe, 971 .remove = snic_remove, 972 }; 973 974 static int __init 975 snic_init_module(void) 976 { 977 int ret = 0; 978 979 #ifndef __x86_64__ 980 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n"); 981 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 982 #endif 983 984 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION); 985 986 ret = snic_global_data_init(); 987 if (ret) { 988 SNIC_ERR("Failed to Initialize Global Data.\n"); 989 990 return ret; 991 } 992 993 ret = pci_register_driver(&snic_driver); 994 if (ret < 0) { 995 SNIC_ERR("PCI driver register error\n"); 996 997 goto err_pci_reg; 998 } 999 1000 return ret; 1001 1002 err_pci_reg: 1003 snic_global_data_cleanup(); 1004 1005 return ret; 1006 } 1007 1008 static void __exit 1009 snic_cleanup_module(void) 1010 { 1011 pci_unregister_driver(&snic_driver); 1012 snic_global_data_cleanup(); 1013 } 1014 1015 module_init(snic_init_module); 1016 module_exit(snic_cleanup_module); 1017 1018 MODULE_LICENSE("GPL v2"); 1019 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION); 1020 MODULE_VERSION(SNIC_DRV_VERSION); 1021 MODULE_DEVICE_TABLE(pci, snic_id_table); 1022 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, " 1023 "Sesidhar Baddela <sebaddel@cisco.com>"); 1024