1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/init.h> 40 #include <linux/pci.h> 41 #include <linux/aer.h> 42 #include <linux/mm.h> 43 #include <linux/notifier.h> 44 #include <linux/kdebug.h> 45 #include <linux/seq_file.h> 46 #include <linux/debugfs.h> 47 #include <linux/string.h> 48 #include <linux/export.h> 49 50 #include "csio_init.h" 51 #include "csio_defs.h" 52 53 #define CSIO_MIN_MEMPOOL_SZ 64 54 55 static struct dentry *csio_debugfs_root; 56 57 static struct scsi_transport_template *csio_fcoe_transport; 58 static struct scsi_transport_template *csio_fcoe_transport_vport; 59 60 /* 61 * debugfs support 62 */ 63 static ssize_t 64 csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 65 { 66 loff_t pos = *ppos; 67 loff_t avail = file_inode(file)->i_size; 68 unsigned int mem = (uintptr_t)file->private_data & 3; 69 struct csio_hw *hw = file->private_data - mem; 70 71 if (pos < 0) 72 return -EINVAL; 73 if (pos >= avail) 74 return 0; 75 if (count > avail - pos) 76 count = avail - pos; 77 78 while (count) { 79 size_t len; 80 int ret, ofst; 81 __be32 data[16]; 82 83 if (mem == MEM_MC) 84 ret = hw->chip_ops->chip_mc_read(hw, 0, pos, 85 data, NULL); 86 else 87 ret = hw->chip_ops->chip_edc_read(hw, mem, pos, 88 data, NULL); 89 if (ret) 90 return ret; 91 92 ofst = pos % sizeof(data); 93 len = min(count, sizeof(data) - ofst); 94 if (copy_to_user(buf, (u8 *)data + ofst, len)) 95 return -EFAULT; 96 97 buf += len; 98 pos += len; 99 count -= len; 100 } 101 count = pos - *ppos; 102 *ppos = pos; 103 return count; 104 } 105 106 static const struct file_operations csio_mem_debugfs_fops = { 107 .owner = THIS_MODULE, 108 .open = simple_open, 109 .read = csio_mem_read, 110 .llseek = default_llseek, 111 }; 112 113 void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, 114 unsigned int idx, unsigned int size_mb) 115 { 116 debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root, 117 (void *)hw + idx, &csio_mem_debugfs_fops, 118 size_mb << 20); 119 } 120 121 static int csio_setup_debugfs(struct csio_hw *hw) 122 { 123 int i; 124 125 if (IS_ERR_OR_NULL(hw->debugfs_root)) 126 return -1; 127 128 i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); 129 if (i & EDRAM0_ENABLE_F) 130 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); 131 if (i & EDRAM1_ENABLE_F) 132 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); 133 134 hw->chip_ops->chip_dfs_create_ext_mem(hw); 135 return 0; 136 } 137 138 /* 139 * csio_dfs_create - Creates and sets up per-hw debugfs. 140 * 141 */ 142 static int 143 csio_dfs_create(struct csio_hw *hw) 144 { 145 if (csio_debugfs_root) { 146 hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev), 147 csio_debugfs_root); 148 csio_setup_debugfs(hw); 149 } 150 151 return 0; 152 } 153 154 /* 155 * csio_dfs_destroy - Destroys per-hw debugfs. 156 */ 157 static int 158 csio_dfs_destroy(struct csio_hw *hw) 159 { 160 if (hw->debugfs_root) 161 debugfs_remove_recursive(hw->debugfs_root); 162 163 return 0; 164 } 165 166 /* 167 * csio_dfs_init - Debug filesystem initialization for the module. 168 * 169 */ 170 static int 171 csio_dfs_init(void) 172 { 173 csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 174 if (!csio_debugfs_root) 175 pr_warn("Could not create debugfs entry, continuing\n"); 176 177 return 0; 178 } 179 180 /* 181 * csio_dfs_exit - debugfs cleanup for the module. 182 */ 183 static void 184 csio_dfs_exit(void) 185 { 186 debugfs_remove(csio_debugfs_root); 187 } 188 189 /* 190 * csio_pci_init - PCI initialization. 191 * @pdev: PCI device. 192 * @bars: Bitmask of bars to be requested. 193 * 194 * Initializes the PCI function by enabling MMIO, setting bus 195 * mastership and setting DMA mask. 196 */ 197 static int 198 csio_pci_init(struct pci_dev *pdev, int *bars) 199 { 200 int rv = -ENODEV; 201 202 *bars = pci_select_bars(pdev, IORESOURCE_MEM); 203 204 if (pci_enable_device_mem(pdev)) 205 goto err; 206 207 if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME)) 208 goto err_disable_device; 209 210 pci_set_master(pdev); 211 pci_try_set_mwi(pdev); 212 213 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || 214 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { 215 dev_err(&pdev->dev, "No suitable DMA available.\n"); 216 goto err_release_regions; 217 } 218 219 return 0; 220 221 err_release_regions: 222 pci_release_selected_regions(pdev, *bars); 223 err_disable_device: 224 pci_disable_device(pdev); 225 err: 226 return rv; 227 228 } 229 230 /* 231 * csio_pci_exit - PCI unitialization. 232 * @pdev: PCI device. 233 * @bars: Bars to be released. 234 * 235 */ 236 static void 237 csio_pci_exit(struct pci_dev *pdev, int *bars) 238 { 239 pci_release_selected_regions(pdev, *bars); 240 pci_disable_device(pdev); 241 } 242 243 /* 244 * csio_hw_init_workers - Initialize the HW module's worker threads. 245 * @hw: HW module. 246 * 247 */ 248 static void 249 csio_hw_init_workers(struct csio_hw *hw) 250 { 251 INIT_WORK(&hw->evtq_work, csio_evtq_worker); 252 } 253 254 static void 255 csio_hw_exit_workers(struct csio_hw *hw) 256 { 257 cancel_work_sync(&hw->evtq_work); 258 } 259 260 static int 261 csio_create_queues(struct csio_hw *hw) 262 { 263 int i, j; 264 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); 265 int rv; 266 struct csio_scsi_cpu_info *info; 267 268 if (hw->flags & CSIO_HWF_Q_FW_ALLOCED) 269 return 0; 270 271 if (hw->intr_mode != CSIO_IM_MSIX) { 272 rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx, 273 0, hw->pport[0].portid, false, NULL); 274 if (rv != 0) { 275 csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv); 276 return rv; 277 } 278 } 279 280 /* FW event queue */ 281 rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx, 282 csio_get_fwevt_intr_idx(hw), 283 hw->pport[0].portid, true, NULL); 284 if (rv != 0) { 285 csio_err(hw, "FW event IQ config failed!: %d\n", rv); 286 return rv; 287 } 288 289 /* Create mgmt queue */ 290 rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx, 291 mgmtm->iq_idx, hw->pport[0].portid, NULL); 292 293 if (rv != 0) { 294 csio_err(hw, "Mgmt EQ create failed!: %d\n", rv); 295 goto err; 296 } 297 298 /* Create SCSI queues */ 299 for (i = 0; i < hw->num_pports; i++) { 300 info = &hw->scsi_cpu_info[i]; 301 302 for (j = 0; j < info->max_cpus; j++) { 303 struct csio_scsi_qset *sqset = &hw->sqset[i][j]; 304 305 rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx, 306 sqset->intr_idx, i, false, NULL); 307 if (rv != 0) { 308 csio_err(hw, 309 "SCSI module IQ config failed [%d][%d]:%d\n", 310 i, j, rv); 311 goto err; 312 } 313 rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx, 314 sqset->iq_idx, i, NULL); 315 if (rv != 0) { 316 csio_err(hw, 317 "SCSI module EQ config failed [%d][%d]:%d\n", 318 i, j, rv); 319 goto err; 320 } 321 } /* for all CPUs */ 322 } /* For all ports */ 323 324 hw->flags |= CSIO_HWF_Q_FW_ALLOCED; 325 return 0; 326 err: 327 csio_wr_destroy_queues(hw, true); 328 return -EINVAL; 329 } 330 331 /* 332 * csio_config_queues - Configure the DMA queues. 333 * @hw: HW module. 334 * 335 * Allocates memory for queues are registers them with FW. 336 */ 337 int 338 csio_config_queues(struct csio_hw *hw) 339 { 340 int i, j, idx, k = 0; 341 int rv; 342 struct csio_scsi_qset *sqset; 343 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); 344 struct csio_scsi_qset *orig; 345 struct csio_scsi_cpu_info *info; 346 347 if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED) 348 return csio_create_queues(hw); 349 350 /* Calculate number of SCSI queues for MSIX we would like */ 351 hw->num_scsi_msix_cpus = num_online_cpus(); 352 hw->num_sqsets = num_online_cpus() * hw->num_pports; 353 354 if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) { 355 hw->num_sqsets = CSIO_MAX_SCSI_QSETS; 356 hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU; 357 } 358 359 /* Initialize max_cpus, may get reduced during msix allocations */ 360 for (i = 0; i < hw->num_pports; i++) 361 hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus; 362 363 csio_dbg(hw, "nsqsets:%d scpus:%d\n", 364 hw->num_sqsets, hw->num_scsi_msix_cpus); 365 366 csio_intr_enable(hw); 367 368 if (hw->intr_mode != CSIO_IM_MSIX) { 369 370 /* Allocate Forward interrupt iq. */ 371 hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE, 372 CSIO_INTR_WRSIZE, CSIO_INGRESS, 373 (void *)hw, 0, 0, NULL); 374 if (hw->intr_iq_idx == -1) { 375 csio_err(hw, 376 "Forward interrupt queue creation failed\n"); 377 goto intr_disable; 378 } 379 } 380 381 /* Allocate the FW evt queue */ 382 hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE, 383 CSIO_FWEVT_WRSIZE, 384 CSIO_INGRESS, (void *)hw, 385 CSIO_FWEVT_FLBUFS, 0, 386 csio_fwevt_intx_handler); 387 if (hw->fwevt_iq_idx == -1) { 388 csio_err(hw, "FW evt queue creation failed\n"); 389 goto intr_disable; 390 } 391 392 /* Allocate the mgmt queue */ 393 mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE, 394 CSIO_MGMT_EQ_WRSIZE, 395 CSIO_EGRESS, (void *)hw, 0, 0, NULL); 396 if (mgmtm->eq_idx == -1) { 397 csio_err(hw, "Failed to alloc egress queue for mgmt module\n"); 398 goto intr_disable; 399 } 400 401 /* Use FW IQ for MGMT req completion */ 402 mgmtm->iq_idx = hw->fwevt_iq_idx; 403 404 /* Allocate SCSI queues */ 405 for (i = 0; i < hw->num_pports; i++) { 406 info = &hw->scsi_cpu_info[i]; 407 408 for (j = 0; j < hw->num_scsi_msix_cpus; j++) { 409 sqset = &hw->sqset[i][j]; 410 411 if (j >= info->max_cpus) { 412 k = j % info->max_cpus; 413 orig = &hw->sqset[i][k]; 414 sqset->eq_idx = orig->eq_idx; 415 sqset->iq_idx = orig->iq_idx; 416 continue; 417 } 418 419 idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0, 420 CSIO_EGRESS, (void *)hw, 0, 0, 421 NULL); 422 if (idx == -1) { 423 csio_err(hw, "EQ creation failed for idx:%d\n", 424 idx); 425 goto intr_disable; 426 } 427 428 sqset->eq_idx = idx; 429 430 idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE, 431 CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS, 432 (void *)hw, 0, 0, 433 csio_scsi_intx_handler); 434 if (idx == -1) { 435 csio_err(hw, "IQ creation failed for idx:%d\n", 436 idx); 437 goto intr_disable; 438 } 439 sqset->iq_idx = idx; 440 } /* for all CPUs */ 441 } /* For all ports */ 442 443 hw->flags |= CSIO_HWF_Q_MEM_ALLOCED; 444 445 rv = csio_create_queues(hw); 446 if (rv != 0) 447 goto intr_disable; 448 449 /* 450 * Now request IRQs for the vectors. In the event of a failure, 451 * cleanup is handled internally by this function. 452 */ 453 rv = csio_request_irqs(hw); 454 if (rv != 0) 455 return -EINVAL; 456 457 return 0; 458 459 intr_disable: 460 csio_intr_disable(hw, false); 461 462 return -EINVAL; 463 } 464 465 static int 466 csio_resource_alloc(struct csio_hw *hw) 467 { 468 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 469 int rv = -ENOMEM; 470 471 wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ + 472 CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ); 473 474 hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, 475 sizeof(struct csio_mb)); 476 if (!hw->mb_mempool) 477 goto err; 478 479 hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, 480 sizeof(struct csio_rnode)); 481 if (!hw->rnode_mempool) 482 goto err_free_mb_mempool; 483 484 hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool", 485 &hw->pdev->dev, CSIO_SCSI_RSP_LEN, 486 8, 0); 487 if (!hw->scsi_dma_pool) 488 goto err_free_rn_pool; 489 490 return 0; 491 492 err_free_rn_pool: 493 mempool_destroy(hw->rnode_mempool); 494 hw->rnode_mempool = NULL; 495 err_free_mb_mempool: 496 mempool_destroy(hw->mb_mempool); 497 hw->mb_mempool = NULL; 498 err: 499 return rv; 500 } 501 502 static void 503 csio_resource_free(struct csio_hw *hw) 504 { 505 dma_pool_destroy(hw->scsi_dma_pool); 506 hw->scsi_dma_pool = NULL; 507 mempool_destroy(hw->rnode_mempool); 508 hw->rnode_mempool = NULL; 509 mempool_destroy(hw->mb_mempool); 510 hw->mb_mempool = NULL; 511 } 512 513 /* 514 * csio_hw_alloc - Allocate and initialize the HW module. 515 * @pdev: PCI device. 516 * 517 * Allocates HW structure, DMA, memory resources, maps BARS to 518 * host memory and initializes HW module. 519 */ 520 static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev) 521 { 522 struct csio_hw *hw; 523 524 hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL); 525 if (!hw) 526 goto err; 527 528 hw->pdev = pdev; 529 strncpy(hw->drv_version, CSIO_DRV_VERSION, 32); 530 531 /* memory pool/DMA pool allocation */ 532 if (csio_resource_alloc(hw)) 533 goto err_free_hw; 534 535 /* Get the start address of registers from BAR 0 */ 536 hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0), 537 pci_resource_len(pdev, 0)); 538 if (!hw->regstart) { 539 csio_err(hw, "Could not map BAR 0, regstart = %p\n", 540 hw->regstart); 541 goto err_resource_free; 542 } 543 544 csio_hw_init_workers(hw); 545 546 if (csio_hw_init(hw)) 547 goto err_unmap_bar; 548 549 csio_dfs_create(hw); 550 551 csio_dbg(hw, "hw:%p\n", hw); 552 553 return hw; 554 555 err_unmap_bar: 556 csio_hw_exit_workers(hw); 557 iounmap(hw->regstart); 558 err_resource_free: 559 csio_resource_free(hw); 560 err_free_hw: 561 kfree(hw); 562 err: 563 return NULL; 564 } 565 566 /* 567 * csio_hw_free - Uninitialize and free the HW module. 568 * @hw: The HW module 569 * 570 * Disable interrupts, uninit the HW module, free resources, free hw. 571 */ 572 static void 573 csio_hw_free(struct csio_hw *hw) 574 { 575 csio_intr_disable(hw, true); 576 csio_hw_exit_workers(hw); 577 csio_hw_exit(hw); 578 iounmap(hw->regstart); 579 csio_dfs_destroy(hw); 580 csio_resource_free(hw); 581 kfree(hw); 582 } 583 584 /** 585 * csio_shost_init - Create and initialize the lnode module. 586 * @hw: The HW module. 587 * @dev: The device associated with this invocation. 588 * @probe: Called from probe context or not? 589 * @os_pln: Parent lnode if any. 590 * 591 * Allocates lnode structure via scsi_host_alloc, initializes 592 * shost, initializes lnode module and registers with SCSI ML 593 * via scsi_host_add. This function is shared between physical and 594 * virtual node ports. 595 */ 596 struct csio_lnode * 597 csio_shost_init(struct csio_hw *hw, struct device *dev, 598 bool probe, struct csio_lnode *pln) 599 { 600 struct Scsi_Host *shost = NULL; 601 struct csio_lnode *ln; 602 603 csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth; 604 csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth; 605 606 /* 607 * hw->pdev is the physical port's PCI dev structure, 608 * which will be different from the NPIV dev structure. 609 */ 610 if (dev == &hw->pdev->dev) 611 shost = scsi_host_alloc( 612 &csio_fcoe_shost_template, 613 sizeof(struct csio_lnode)); 614 else 615 shost = scsi_host_alloc( 616 &csio_fcoe_shost_vport_template, 617 sizeof(struct csio_lnode)); 618 619 if (!shost) 620 goto err; 621 622 ln = shost_priv(shost); 623 memset(ln, 0, sizeof(struct csio_lnode)); 624 625 /* Link common lnode to this lnode */ 626 ln->dev_num = (shost->host_no << 16); 627 628 shost->can_queue = CSIO_MAX_QUEUE; 629 shost->this_id = -1; 630 shost->unique_id = shost->host_no; 631 shost->max_cmd_len = 16; /* Max CDB length supported */ 632 shost->max_id = min_t(uint32_t, csio_fcoe_rnodes, 633 hw->fres_info.max_ssns); 634 shost->max_lun = CSIO_MAX_LUN; 635 if (dev == &hw->pdev->dev) 636 shost->transportt = csio_fcoe_transport; 637 else 638 shost->transportt = csio_fcoe_transport_vport; 639 640 /* root lnode */ 641 if (!hw->rln) 642 hw->rln = ln; 643 644 /* Other initialization here: Common, Transport specific */ 645 if (csio_lnode_init(ln, hw, pln)) 646 goto err_shost_put; 647 648 if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev)) 649 goto err_lnode_exit; 650 651 return ln; 652 653 err_lnode_exit: 654 csio_lnode_exit(ln); 655 err_shost_put: 656 scsi_host_put(shost); 657 err: 658 return NULL; 659 } 660 661 /** 662 * csio_shost_exit - De-instantiate the shost. 663 * @ln: The lnode module corresponding to the shost. 664 * 665 */ 666 void 667 csio_shost_exit(struct csio_lnode *ln) 668 { 669 struct Scsi_Host *shost = csio_ln_to_shost(ln); 670 struct csio_hw *hw = csio_lnode_to_hw(ln); 671 672 /* Inform transport */ 673 fc_remove_host(shost); 674 675 /* Inform SCSI ML */ 676 scsi_remove_host(shost); 677 678 /* Flush all the events, so that any rnode removal events 679 * already queued are all handled, before we remove the lnode. 680 */ 681 spin_lock_irq(&hw->lock); 682 csio_evtq_flush(hw); 683 spin_unlock_irq(&hw->lock); 684 685 csio_lnode_exit(ln); 686 scsi_host_put(shost); 687 } 688 689 struct csio_lnode * 690 csio_lnode_alloc(struct csio_hw *hw) 691 { 692 return csio_shost_init(hw, &hw->pdev->dev, false, NULL); 693 } 694 695 void 696 csio_lnodes_block_request(struct csio_hw *hw) 697 { 698 struct Scsi_Host *shost; 699 struct csio_lnode *sln; 700 struct csio_lnode *ln; 701 struct list_head *cur_ln, *cur_cln; 702 struct csio_lnode **lnode_list; 703 int cur_cnt = 0, ii; 704 705 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), 706 GFP_KERNEL); 707 if (!lnode_list) { 708 csio_err(hw, "Failed to allocate lnodes_list"); 709 return; 710 } 711 712 spin_lock_irq(&hw->lock); 713 /* Traverse sibling lnodes */ 714 list_for_each(cur_ln, &hw->sln_head) { 715 sln = (struct csio_lnode *) cur_ln; 716 lnode_list[cur_cnt++] = sln; 717 718 /* Traverse children lnodes */ 719 list_for_each(cur_cln, &sln->cln_head) 720 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; 721 } 722 spin_unlock_irq(&hw->lock); 723 724 for (ii = 0; ii < cur_cnt; ii++) { 725 csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); 726 ln = lnode_list[ii]; 727 shost = csio_ln_to_shost(ln); 728 scsi_block_requests(shost); 729 730 } 731 kfree(lnode_list); 732 } 733 734 void 735 csio_lnodes_unblock_request(struct csio_hw *hw) 736 { 737 struct csio_lnode *ln; 738 struct Scsi_Host *shost; 739 struct csio_lnode *sln; 740 struct list_head *cur_ln, *cur_cln; 741 struct csio_lnode **lnode_list; 742 int cur_cnt = 0, ii; 743 744 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), 745 GFP_KERNEL); 746 if (!lnode_list) { 747 csio_err(hw, "Failed to allocate lnodes_list"); 748 return; 749 } 750 751 spin_lock_irq(&hw->lock); 752 /* Traverse sibling lnodes */ 753 list_for_each(cur_ln, &hw->sln_head) { 754 sln = (struct csio_lnode *) cur_ln; 755 lnode_list[cur_cnt++] = sln; 756 757 /* Traverse children lnodes */ 758 list_for_each(cur_cln, &sln->cln_head) 759 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; 760 } 761 spin_unlock_irq(&hw->lock); 762 763 for (ii = 0; ii < cur_cnt; ii++) { 764 csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); 765 ln = lnode_list[ii]; 766 shost = csio_ln_to_shost(ln); 767 scsi_unblock_requests(shost); 768 } 769 kfree(lnode_list); 770 } 771 772 void 773 csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid) 774 { 775 struct csio_lnode *ln; 776 struct Scsi_Host *shost; 777 struct csio_lnode *sln; 778 struct list_head *cur_ln, *cur_cln; 779 struct csio_lnode **lnode_list; 780 int cur_cnt = 0, ii; 781 782 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), 783 GFP_KERNEL); 784 if (!lnode_list) { 785 csio_err(hw, "Failed to allocate lnodes_list"); 786 return; 787 } 788 789 spin_lock_irq(&hw->lock); 790 /* Traverse sibling lnodes */ 791 list_for_each(cur_ln, &hw->sln_head) { 792 sln = (struct csio_lnode *) cur_ln; 793 if (sln->portid != portid) 794 continue; 795 796 lnode_list[cur_cnt++] = sln; 797 798 /* Traverse children lnodes */ 799 list_for_each(cur_cln, &sln->cln_head) 800 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; 801 } 802 spin_unlock_irq(&hw->lock); 803 804 for (ii = 0; ii < cur_cnt; ii++) { 805 csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); 806 ln = lnode_list[ii]; 807 shost = csio_ln_to_shost(ln); 808 scsi_block_requests(shost); 809 } 810 kfree(lnode_list); 811 } 812 813 void 814 csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid) 815 { 816 struct csio_lnode *ln; 817 struct Scsi_Host *shost; 818 struct csio_lnode *sln; 819 struct list_head *cur_ln, *cur_cln; 820 struct csio_lnode **lnode_list; 821 int cur_cnt = 0, ii; 822 823 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), 824 GFP_KERNEL); 825 if (!lnode_list) { 826 csio_err(hw, "Failed to allocate lnodes_list"); 827 return; 828 } 829 830 spin_lock_irq(&hw->lock); 831 /* Traverse sibling lnodes */ 832 list_for_each(cur_ln, &hw->sln_head) { 833 sln = (struct csio_lnode *) cur_ln; 834 if (sln->portid != portid) 835 continue; 836 lnode_list[cur_cnt++] = sln; 837 838 /* Traverse children lnodes */ 839 list_for_each(cur_cln, &sln->cln_head) 840 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; 841 } 842 spin_unlock_irq(&hw->lock); 843 844 for (ii = 0; ii < cur_cnt; ii++) { 845 csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); 846 ln = lnode_list[ii]; 847 shost = csio_ln_to_shost(ln); 848 scsi_unblock_requests(shost); 849 } 850 kfree(lnode_list); 851 } 852 853 void 854 csio_lnodes_exit(struct csio_hw *hw, bool npiv) 855 { 856 struct csio_lnode *sln; 857 struct csio_lnode *ln; 858 struct list_head *cur_ln, *cur_cln; 859 struct csio_lnode **lnode_list; 860 int cur_cnt = 0, ii; 861 862 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), 863 GFP_KERNEL); 864 if (!lnode_list) { 865 csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n"); 866 return; 867 } 868 869 /* Get all child lnodes(NPIV ports) */ 870 spin_lock_irq(&hw->lock); 871 list_for_each(cur_ln, &hw->sln_head) { 872 sln = (struct csio_lnode *) cur_ln; 873 874 /* Traverse children lnodes */ 875 list_for_each(cur_cln, &sln->cln_head) 876 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; 877 } 878 spin_unlock_irq(&hw->lock); 879 880 /* Delete NPIV lnodes */ 881 for (ii = 0; ii < cur_cnt; ii++) { 882 csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]); 883 ln = lnode_list[ii]; 884 fc_vport_terminate(ln->fc_vport); 885 } 886 887 /* Delete only npiv lnodes */ 888 if (npiv) 889 goto free_lnodes; 890 891 cur_cnt = 0; 892 /* Get all physical lnodes */ 893 spin_lock_irq(&hw->lock); 894 /* Traverse sibling lnodes */ 895 list_for_each(cur_ln, &hw->sln_head) { 896 sln = (struct csio_lnode *) cur_ln; 897 lnode_list[cur_cnt++] = sln; 898 } 899 spin_unlock_irq(&hw->lock); 900 901 /* Delete physical lnodes */ 902 for (ii = 0; ii < cur_cnt; ii++) { 903 csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]); 904 csio_shost_exit(lnode_list[ii]); 905 } 906 907 free_lnodes: 908 kfree(lnode_list); 909 } 910 911 /* 912 * csio_lnode_init_post: Set lnode attributes after starting HW. 913 * @ln: lnode. 914 * 915 */ 916 static void 917 csio_lnode_init_post(struct csio_lnode *ln) 918 { 919 struct Scsi_Host *shost = csio_ln_to_shost(ln); 920 921 csio_fchost_attr_init(ln); 922 923 scsi_scan_host(shost); 924 } 925 926 /* 927 * csio_probe_one - Instantiate this function. 928 * @pdev: PCI device 929 * @id: Device ID 930 * 931 * This is the .probe() callback of the driver. This function: 932 * - Initializes the PCI function by enabling MMIO, setting bus 933 * mastership and setting DMA mask. 934 * - Allocates HW structure, DMA, memory resources, maps BARS to 935 * host memory and initializes HW module. 936 * - Allocates lnode structure via scsi_host_alloc, initializes 937 * shost, initialized lnode module and registers with SCSI ML 938 * via scsi_host_add. 939 * - Enables interrupts, and starts the chip by kicking off the 940 * HW state machine. 941 * - Once hardware is ready, initiated scan of the host via 942 * scsi_scan_host. 943 */ 944 static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 945 { 946 int rv; 947 int bars; 948 int i; 949 struct csio_hw *hw; 950 struct csio_lnode *ln; 951 952 /* probe only T5 and T6 cards */ 953 if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) && 954 !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK))) 955 return -ENODEV; 956 957 rv = csio_pci_init(pdev, &bars); 958 if (rv) 959 goto err; 960 961 hw = csio_hw_alloc(pdev); 962 if (!hw) { 963 rv = -ENODEV; 964 goto err_pci_exit; 965 } 966 967 if (!pcie_relaxed_ordering_enabled(pdev)) 968 hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING; 969 970 pci_set_drvdata(pdev, hw); 971 972 rv = csio_hw_start(hw); 973 if (rv) { 974 if (rv == -EINVAL) { 975 dev_err(&pdev->dev, 976 "Failed to start FW, continuing in debug mode.\n"); 977 return 0; 978 } 979 goto err_lnode_exit; 980 } 981 982 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", 983 FW_HDR_FW_VER_MAJOR_G(hw->fwrev), 984 FW_HDR_FW_VER_MINOR_G(hw->fwrev), 985 FW_HDR_FW_VER_MICRO_G(hw->fwrev), 986 FW_HDR_FW_VER_BUILD_G(hw->fwrev)); 987 988 for (i = 0; i < hw->num_pports; i++) { 989 ln = csio_shost_init(hw, &pdev->dev, true, NULL); 990 if (!ln) { 991 rv = -ENODEV; 992 break; 993 } 994 /* Initialize portid */ 995 ln->portid = hw->pport[i].portid; 996 997 spin_lock_irq(&hw->lock); 998 if (csio_lnode_start(ln) != 0) 999 rv = -ENODEV; 1000 spin_unlock_irq(&hw->lock); 1001 1002 if (rv) 1003 break; 1004 1005 csio_lnode_init_post(ln); 1006 } 1007 1008 if (rv) 1009 goto err_lnode_exit; 1010 1011 return 0; 1012 1013 err_lnode_exit: 1014 csio_lnodes_block_request(hw); 1015 spin_lock_irq(&hw->lock); 1016 csio_hw_stop(hw); 1017 spin_unlock_irq(&hw->lock); 1018 csio_lnodes_unblock_request(hw); 1019 csio_lnodes_exit(hw, 0); 1020 csio_hw_free(hw); 1021 err_pci_exit: 1022 csio_pci_exit(pdev, &bars); 1023 err: 1024 dev_err(&pdev->dev, "probe of device failed: %d\n", rv); 1025 return rv; 1026 } 1027 1028 /* 1029 * csio_remove_one - Remove one instance of the driver at this PCI function. 1030 * @pdev: PCI device 1031 * 1032 * Used during hotplug operation. 1033 */ 1034 static void csio_remove_one(struct pci_dev *pdev) 1035 { 1036 struct csio_hw *hw = pci_get_drvdata(pdev); 1037 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 1038 1039 csio_lnodes_block_request(hw); 1040 spin_lock_irq(&hw->lock); 1041 1042 /* Stops lnode, Rnode s/m 1043 * Quiesce IOs. 1044 * All sessions with remote ports are unregistered. 1045 */ 1046 csio_hw_stop(hw); 1047 spin_unlock_irq(&hw->lock); 1048 csio_lnodes_unblock_request(hw); 1049 1050 csio_lnodes_exit(hw, 0); 1051 csio_hw_free(hw); 1052 csio_pci_exit(pdev, &bars); 1053 } 1054 1055 /* 1056 * csio_pci_error_detected - PCI error was detected 1057 * @pdev: PCI device 1058 * 1059 */ 1060 static pci_ers_result_t 1061 csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 1062 { 1063 struct csio_hw *hw = pci_get_drvdata(pdev); 1064 1065 csio_lnodes_block_request(hw); 1066 spin_lock_irq(&hw->lock); 1067 1068 /* Post PCI error detected evt to HW s/m 1069 * HW s/m handles this evt by quiescing IOs, unregisters rports 1070 * and finally takes the device to offline. 1071 */ 1072 csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED); 1073 spin_unlock_irq(&hw->lock); 1074 csio_lnodes_unblock_request(hw); 1075 csio_lnodes_exit(hw, 0); 1076 csio_intr_disable(hw, true); 1077 pci_disable_device(pdev); 1078 return state == pci_channel_io_perm_failure ? 1079 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1080 } 1081 1082 /* 1083 * csio_pci_slot_reset - PCI slot has been reset. 1084 * @pdev: PCI device 1085 * 1086 */ 1087 static pci_ers_result_t 1088 csio_pci_slot_reset(struct pci_dev *pdev) 1089 { 1090 struct csio_hw *hw = pci_get_drvdata(pdev); 1091 int ready; 1092 1093 if (pci_enable_device(pdev)) { 1094 dev_err(&pdev->dev, "cannot re-enable device in slot reset\n"); 1095 return PCI_ERS_RESULT_DISCONNECT; 1096 } 1097 1098 pci_set_master(pdev); 1099 pci_restore_state(pdev); 1100 pci_save_state(pdev); 1101 1102 /* Bring HW s/m to ready state. 1103 * but don't resume IOs. 1104 */ 1105 spin_lock_irq(&hw->lock); 1106 csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET); 1107 ready = csio_is_hw_ready(hw); 1108 spin_unlock_irq(&hw->lock); 1109 1110 if (ready) { 1111 return PCI_ERS_RESULT_RECOVERED; 1112 } else { 1113 dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n"); 1114 return PCI_ERS_RESULT_DISCONNECT; 1115 } 1116 } 1117 1118 /* 1119 * csio_pci_resume - Resume normal operations 1120 * @pdev: PCI device 1121 * 1122 */ 1123 static void 1124 csio_pci_resume(struct pci_dev *pdev) 1125 { 1126 struct csio_hw *hw = pci_get_drvdata(pdev); 1127 struct csio_lnode *ln; 1128 int rv = 0; 1129 int i; 1130 1131 /* Bring the LINK UP and Resume IO */ 1132 1133 for (i = 0; i < hw->num_pports; i++) { 1134 ln = csio_shost_init(hw, &pdev->dev, true, NULL); 1135 if (!ln) { 1136 rv = -ENODEV; 1137 break; 1138 } 1139 /* Initialize portid */ 1140 ln->portid = hw->pport[i].portid; 1141 1142 spin_lock_irq(&hw->lock); 1143 if (csio_lnode_start(ln) != 0) 1144 rv = -ENODEV; 1145 spin_unlock_irq(&hw->lock); 1146 1147 if (rv) 1148 break; 1149 1150 csio_lnode_init_post(ln); 1151 } 1152 1153 if (rv) 1154 goto err_resume_exit; 1155 1156 return; 1157 1158 err_resume_exit: 1159 csio_lnodes_block_request(hw); 1160 spin_lock_irq(&hw->lock); 1161 csio_hw_stop(hw); 1162 spin_unlock_irq(&hw->lock); 1163 csio_lnodes_unblock_request(hw); 1164 csio_lnodes_exit(hw, 0); 1165 csio_hw_free(hw); 1166 dev_err(&pdev->dev, "resume of device failed: %d\n", rv); 1167 } 1168 1169 static struct pci_error_handlers csio_err_handler = { 1170 .error_detected = csio_pci_error_detected, 1171 .slot_reset = csio_pci_slot_reset, 1172 .resume = csio_pci_resume, 1173 }; 1174 1175 /* 1176 * Macros needed to support the PCI Device ID Table ... 1177 */ 1178 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ 1179 static const struct pci_device_id csio_pci_tbl[] = { 1180 /* Define for FCoE uses PF6 */ 1181 #define CH_PCI_DEVICE_ID_FUNCTION 0x6 1182 1183 #define CH_PCI_ID_TABLE_ENTRY(devid) \ 1184 { PCI_VDEVICE(CHELSIO, (devid)), 0 } 1185 1186 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } 1187 1188 #include "t4_pci_id_tbl.h" 1189 1190 static struct pci_driver csio_pci_driver = { 1191 .name = KBUILD_MODNAME, 1192 .driver = { 1193 .owner = THIS_MODULE, 1194 }, 1195 .id_table = csio_pci_tbl, 1196 .probe = csio_probe_one, 1197 .remove = csio_remove_one, 1198 .err_handler = &csio_err_handler, 1199 }; 1200 1201 /* 1202 * csio_init - Chelsio storage driver initialization function. 1203 * 1204 */ 1205 static int __init 1206 csio_init(void) 1207 { 1208 int rv = -ENOMEM; 1209 1210 pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION); 1211 1212 csio_dfs_init(); 1213 1214 csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs); 1215 if (!csio_fcoe_transport) 1216 goto err; 1217 1218 csio_fcoe_transport_vport = 1219 fc_attach_transport(&csio_fc_transport_vport_funcs); 1220 if (!csio_fcoe_transport_vport) 1221 goto err_vport; 1222 1223 rv = pci_register_driver(&csio_pci_driver); 1224 if (rv) 1225 goto err_pci; 1226 1227 return 0; 1228 1229 err_pci: 1230 fc_release_transport(csio_fcoe_transport_vport); 1231 err_vport: 1232 fc_release_transport(csio_fcoe_transport); 1233 err: 1234 csio_dfs_exit(); 1235 return rv; 1236 } 1237 1238 /* 1239 * csio_exit - Chelsio storage driver uninitialization . 1240 * 1241 * Function that gets called in the unload path. 1242 */ 1243 static void __exit 1244 csio_exit(void) 1245 { 1246 pci_unregister_driver(&csio_pci_driver); 1247 csio_dfs_exit(); 1248 fc_release_transport(csio_fcoe_transport_vport); 1249 fc_release_transport(csio_fcoe_transport); 1250 } 1251 1252 module_init(csio_init); 1253 module_exit(csio_exit); 1254 MODULE_AUTHOR(CSIO_DRV_AUTHOR); 1255 MODULE_DESCRIPTION(CSIO_DRV_DESC); 1256 MODULE_LICENSE("Dual BSD/GPL"); 1257 MODULE_DEVICE_TABLE(pci, csio_pci_tbl); 1258 MODULE_VERSION(CSIO_DRV_VERSION); 1259 MODULE_FIRMWARE(FW_FNAME_T5); 1260 MODULE_FIRMWARE(FW_FNAME_T6); 1261